hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
3ab9e06a7a047775267a82b7ece8625a2ac8fa23.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void findAtomGridIndex(const real4* __restrict__ posq, int2* __restrict__ pmeAtomGridIndex, real4 periodicBoxSize, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { // Compute the index of the grid point each atom is associated with. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { real4 pos = posq[i]; real3 t = make_real3(pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x, pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y, pos.z*recipBoxVecZ.z); t.x = (t.x-floor(t.x))*GRID_SIZE_X; t.y = (t.y-floor(t.y))*GRID_SIZE_Y; t.z = (t.z-floor(t.z))*GRID_SIZE_Z; int3 gridIndex = make_int3(((int) t.x) % GRID_SIZE_X, ((int) t.y) % GRID_SIZE_Y, ((int) t.z) % GRID_SIZE_Z); pmeAtomGridIndex[i] = make_int2(i, gridIndex.x*GRID_SIZE_Y*GRID_SIZE_Z+gridIndex.y*GRID_SIZE_Z+gridIndex.z); } } extern "C" __global__ void gridSpreadCharge(const real4* __restrict__ posq, real* __restrict__ originalPmeGrid, real4 periodicBoxSize, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ, const int2* __restrict__ pmeAtomGridIndex) { real3 data[PME_ORDER]; const real scale = RECIP(PME_ORDER-1); // Process the atoms in spatially sorted order. This improves efficiency when writing // the grid values. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { int atom = pmeAtomGridIndex[i].x; real4 pos = posq[atom]; pos.x -= floor(pos.x*recipBoxVecX.x)*periodicBoxSize.x; pos.y -= floor(pos.y*recipBoxVecY.y)*periodicBoxSize.y; pos.z -= floor(pos.z*recipBoxVecZ.z)*periodicBoxSize.z; real3 t = make_real3(pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x, pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y, pos.z*recipBoxVecZ.z); t.x = (t.x-floor(t.x))*GRID_SIZE_X; t.y = (t.y-floor(t.y))*GRID_SIZE_Y; t.z = (t.z-floor(t.z))*GRID_SIZE_Z; int3 gridIndex = make_int3(((int) t.x) % GRID_SIZE_X, ((int) t.y) % GRID_SIZE_Y, ((int) t.z) % GRID_SIZE_Z); // Since we need the full set of thetas, it's faster to compute them here than load them // from global memory. real3 dr = make_real3(t.x-(int) t.x, t.y-(int) t.y, t.z-(int) t.z); data[PME_ORDER-1] = make_real3(0); data[1] = dr; data[0] = make_real3(1)-dr; for (int j = 3; j < PME_ORDER; j++) { real div = RECIP(j-1); data[j-1] = div*dr*data[j-2]; for (int k = 1; k < (j-1); k++) data[j-k-1] = div*((dr+make_real3(k))*data[j-k-2] + (make_real3(j-k)-dr)*data[j-k-1]); data[0] = div*(make_real3(1)-dr)*data[0]; } data[PME_ORDER-1] = scale*dr*data[PME_ORDER-2]; for (int j = 1; j < (PME_ORDER-1); j++) data[PME_ORDER-j-1] = scale*((dr+make_real3(j))*data[PME_ORDER-j-2] + (make_real3(PME_ORDER-j)-dr)*data[PME_ORDER-j-1]); data[0] = scale*(make_real3(1)-dr)*data[0]; // Spread the charge from this atom onto each grid point. for (int ix = 0; ix < PME_ORDER; ix++) { int xbase = gridIndex.x+ix; xbase -= (xbase >= GRID_SIZE_X ? GRID_SIZE_X : 0); xbase = xbase*GRID_SIZE_Y*GRID_SIZE_Z; real dx = data[ix].x; for (int iy = 0; iy < PME_ORDER; iy++) { int ybase = gridIndex.y+iy; ybase -= (ybase >= GRID_SIZE_Y ? GRID_SIZE_Y : 0); ybase = xbase + ybase*GRID_SIZE_Z; real dy = data[iy].y; for (int iz = 0; iz < PME_ORDER; iz++) { int zindex = gridIndex.z+iz; zindex -= (zindex >= GRID_SIZE_Z ? GRID_SIZE_Z : 0); int index = ybase + zindex; real add = pos.w*dx*dy*data[iz].z; #ifdef USE_DOUBLE_PRECISION unsigned long long * ulonglong_p = (unsigned long long *) originalPmeGrid; atomicAdd(&ulonglong_p[index], static_cast<unsigned long long>((long long) (add*0x100000000))); #elif __CUDA_ARCH__ < 200 unsigned long long * ulonglong_p = (unsigned long long *) originalPmeGrid; int gridIndex = index; gridIndex = (gridIndex%2 == 0 ? gridIndex/2 : (gridIndex+GRID_SIZE_X*GRID_SIZE_Y*GRID_SIZE_Z)/2); atomicAdd(&ulonglong_p[gridIndex], static_cast<unsigned long long>((long long) (add*0x100000000))); #else atomicAdd(&originalPmeGrid[index], add*EPSILON_FACTOR); #endif } } } } } extern "C" __global__ void finishSpreadCharge(long long* __restrict__ originalPmeGrid) { real* floatGrid = (real*) originalPmeGrid; const unsigned int gridSize = GRID_SIZE_X*GRID_SIZE_Y*GRID_SIZE_Z; real scale = EPSILON_FACTOR/(real) 0x100000000; #ifdef USE_DOUBLE_PRECISION for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < gridSize; index += blockDim.x*gridDim.x) floatGrid[index] = scale*originalPmeGrid[index]; #else for (int index = 2*(blockIdx.x*blockDim.x+threadIdx.x); index < gridSize; index += 2*blockDim.x*gridDim.x) { floatGrid[index] = scale*originalPmeGrid[index/2]; if (index+1 < gridSize) floatGrid[index+1] = scale*originalPmeGrid[(index+gridSize+1)/2]; } #endif } // convolutes on the halfcomplex_pmeGrid, which is of size NX*NY*(NZ/2+1) as F(Q) is conjugate symmetric extern "C" __global__ void reciprocalConvolution(real2* __restrict__ halfcomplex_pmeGrid, real* __restrict__ energyBuffer, const real* __restrict__ pmeBsplineModuliX, const real* __restrict__ pmeBsplineModuliY, const real* __restrict__ pmeBsplineModuliZ, real4 periodicBoxSize, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { // R2C stores into a half complex matrix where the last dimension is cut by half const unsigned int gridSize = GRID_SIZE_X*GRID_SIZE_Y*(GRID_SIZE_Z/2+1); const real recipScaleFactor = RECIP(M_PI*periodicBoxSize.x*periodicBoxSize.y*periodicBoxSize.z); for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < gridSize; index += blockDim.x*gridDim.x) { // real indices int kx = index/(GRID_SIZE_Y*(GRID_SIZE_Z/2+1)); int remainder = index-kx*GRID_SIZE_Y*(GRID_SIZE_Z/2+1); int ky = remainder/(GRID_SIZE_Z/2+1); int kz = remainder-ky*(GRID_SIZE_Z/2+1); int mx = (kx < (GRID_SIZE_X+1)/2) ? kx : (kx-GRID_SIZE_X); int my = (ky < (GRID_SIZE_Y+1)/2) ? ky : (ky-GRID_SIZE_Y); int mz = (kz < (GRID_SIZE_Z+1)/2) ? kz : (kz-GRID_SIZE_Z); real mhx = mx*recipBoxVecX.x; real mhy = mx*recipBoxVecY.x+my*recipBoxVecY.y; real mhz = mx*recipBoxVecZ.x+my*recipBoxVecZ.y+mz*recipBoxVecZ.z; real bx = pmeBsplineModuliX[kx]; real by = pmeBsplineModuliY[ky]; real bz = pmeBsplineModuliZ[kz]; real2 grid = halfcomplex_pmeGrid[index]; real m2 = mhx*mhx+mhy*mhy+mhz*mhz; real denom = m2*bx*by*bz; real eterm = recipScaleFactor*EXP(-RECIP_EXP_FACTOR*m2)/denom; if (kx != 0 || ky != 0 || kz != 0) { halfcomplex_pmeGrid[index] = make_real2(grid.x*eterm, grid.y*eterm); } } } extern "C" __global__ void gridEvaluateEnergy(real2* __restrict__ halfcomplex_pmeGrid, real* __restrict__ energyBuffer, const real* __restrict__ pmeBsplineModuliX, const real* __restrict__ pmeBsplineModuliY, const real* __restrict__ pmeBsplineModuliZ, real4 periodicBoxSize, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { // R2C stores into a half complex matrix where the last dimension is cut by half const unsigned int gridSize = GRID_SIZE_X*GRID_SIZE_Y*GRID_SIZE_Z; const real recipScaleFactor = RECIP(M_PI*periodicBoxSize.x*periodicBoxSize.y*periodicBoxSize.z); real energy = 0; for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < gridSize; index += blockDim.x*gridDim.x) { // real indices int kx = index/(GRID_SIZE_Y*(GRID_SIZE_Z)); int remainder = index-kx*GRID_SIZE_Y*(GRID_SIZE_Z); int ky = remainder/(GRID_SIZE_Z); int kz = remainder-ky*(GRID_SIZE_Z); int mx = (kx < (GRID_SIZE_X+1)/2) ? kx : (kx-GRID_SIZE_X); int my = (ky < (GRID_SIZE_Y+1)/2) ? ky : (ky-GRID_SIZE_Y); int mz = (kz < (GRID_SIZE_Z+1)/2) ? kz : (kz-GRID_SIZE_Z); real mhx = mx*recipBoxVecX.x; real mhy = mx*recipBoxVecY.x+my*recipBoxVecY.y; real mhz = mx*recipBoxVecZ.x+my*recipBoxVecZ.y+mz*recipBoxVecZ.z; real m2 = mhx*mhx+mhy*mhy+mhz*mhz; real bx = pmeBsplineModuliX[kx]; real by = pmeBsplineModuliY[ky]; real bz = pmeBsplineModuliZ[kz]; real denom = m2*bx*by*bz; real eterm = recipScaleFactor*EXP(-RECIP_EXP_FACTOR*m2)/denom; if (kz >= (GRID_SIZE_Z/2+1)) { kx = ((kx == 0) ? kx : GRID_SIZE_X-kx); ky = ((ky == 0) ? ky : GRID_SIZE_Y-ky); kz = GRID_SIZE_Z-kz; } int indexInHalfComplexGrid = kz + ky*(GRID_SIZE_Z/2+1)+kx*(GRID_SIZE_Y*(GRID_SIZE_Z/2+1)); real2 grid = halfcomplex_pmeGrid[indexInHalfComplexGrid]; if (kx != 0 || ky != 0 || kz != 0) { energy += eterm*(grid.x*grid.x + grid.y*grid.y); } } energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += 0.5f*energy; } extern "C" __global__ void gridInterpolateForce(const real4* __restrict__ posq, unsigned long long* __restrict__ forceBuffers, const real* __restrict__ originalPmeGrid, real4 periodicBoxSize, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ, const int2* __restrict__ pmeAtomGridIndex) { real3 data[PME_ORDER]; real3 ddata[PME_ORDER]; const real scale = RECIP(PME_ORDER-1); // Process the atoms in spatially sorted order. This improves cache performance when loading // the grid values. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { int atom = pmeAtomGridIndex[i].x; real3 force = make_real3(0); real4 pos = posq[atom]; pos.x -= floor(pos.x*recipBoxVecX.x)*periodicBoxSize.x; pos.y -= floor(pos.y*recipBoxVecY.y)*periodicBoxSize.y; pos.z -= floor(pos.z*recipBoxVecZ.z)*periodicBoxSize.z; real3 t = make_real3(pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x, pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y, pos.z*recipBoxVecZ.z); t.x = (t.x-floor(t.x))*GRID_SIZE_X; t.y = (t.y-floor(t.y))*GRID_SIZE_Y; t.z = (t.z-floor(t.z))*GRID_SIZE_Z; int3 gridIndex = make_int3(((int) t.x) % GRID_SIZE_X, ((int) t.y) % GRID_SIZE_Y, ((int) t.z) % GRID_SIZE_Z); // Since we need the full set of thetas, it's faster to compute them here than load them // from global memory. real3 dr = make_real3(t.x-(int) t.x, t.y-(int) t.y, t.z-(int) t.z); data[PME_ORDER-1] = make_real3(0); data[1] = dr; data[0] = make_real3(1)-dr; for (int j = 3; j < PME_ORDER; j++) { real div = RECIP(j-1); data[j-1] = div*dr*data[j-2]; for (int k = 1; k < (j-1); k++) data[j-k-1] = div*((dr+make_real3(k))*data[j-k-2] + (make_real3(j-k)-dr)*data[j-k-1]); data[0] = div*(make_real3(1)-dr)*data[0]; } ddata[0] = -data[0]; for (int j = 1; j < PME_ORDER; j++) ddata[j] = data[j-1]-data[j]; data[PME_ORDER-1] = scale*dr*data[PME_ORDER-2]; for (int j = 1; j < (PME_ORDER-1); j++) data[PME_ORDER-j-1] = scale*((dr+make_real3(j))*data[PME_ORDER-j-2] + (make_real3(PME_ORDER-j)-dr)*data[PME_ORDER-j-1]); data[0] = scale*(make_real3(1)-dr)*data[0]; // Compute the force on this atom. for (int ix = 0; ix < PME_ORDER; ix++) { int xbase = gridIndex.x+ix; xbase -= (xbase >= GRID_SIZE_X ? GRID_SIZE_X : 0); xbase = xbase*GRID_SIZE_Y*GRID_SIZE_Z; real dx = data[ix].x; real ddx = ddata[ix].x; for (int iy = 0; iy < PME_ORDER; iy++) { int ybase = gridIndex.y+iy; ybase -= (ybase >= GRID_SIZE_Y ? GRID_SIZE_Y : 0); ybase = xbase + ybase*GRID_SIZE_Z; real dy = data[iy].y; real ddy = ddata[iy].y; for (int iz = 0; iz < PME_ORDER; iz++) { int zindex = gridIndex.z+iz; zindex -= (zindex >= GRID_SIZE_Z ? GRID_SIZE_Z : 0); int index = ybase + zindex; real gridvalue = originalPmeGrid[index]; force.x += ddx*dy*data[iz].z*gridvalue; force.y += dx*ddy*data[iz].z*gridvalue; force.z += dx*dy*ddata[iz].z*gridvalue; } } } real q = pos.w*EPSILON_FACTOR; real forceX = -q*(force.x*GRID_SIZE_X*recipBoxVecX.x); real forceY = -q*(force.x*GRID_SIZE_X*recipBoxVecY.x+force.y*GRID_SIZE_Y*recipBoxVecY.y); real forceZ = -q*(force.x*GRID_SIZE_X*recipBoxVecZ.x+force.y*GRID_SIZE_Y*recipBoxVecZ.y+force.z*GRID_SIZE_Z*recipBoxVecZ.z); forceBuffers[atom] += static_cast<unsigned long long>((long long) (forceX*0x100000000)); forceBuffers[atom+PADDED_NUM_ATOMS] += static_cast<unsigned long long>((long long) (forceY*0x100000000)); forceBuffers[atom+2*PADDED_NUM_ATOMS] += static_cast<unsigned long long>((long long) (forceZ*0x100000000)); } } extern "C" __global__ void addForces(const real4* __restrict__ forces, unsigned long long* __restrict__ forceBuffers) { for (int atom = blockIdx.x*blockDim.x+threadIdx.x; atom < NUM_ATOMS; atom += blockDim.x*gridDim.x) { real4 f = forces[atom]; forceBuffers[atom] += static_cast<unsigned long long>((long long) (f.x*0x100000000)); forceBuffers[atom+PADDED_NUM_ATOMS] += static_cast<unsigned long long>((long long) (f.y*0x100000000)); forceBuffers[atom+2*PADDED_NUM_ATOMS] += static_cast<unsigned long long>((long long) (f.z*0x100000000)); } }
3ab9e06a7a047775267a82b7ece8625a2ac8fa23.cu
extern "C" __global__ void findAtomGridIndex(const real4* __restrict__ posq, int2* __restrict__ pmeAtomGridIndex, real4 periodicBoxSize, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { // Compute the index of the grid point each atom is associated with. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { real4 pos = posq[i]; real3 t = make_real3(pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x, pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y, pos.z*recipBoxVecZ.z); t.x = (t.x-floor(t.x))*GRID_SIZE_X; t.y = (t.y-floor(t.y))*GRID_SIZE_Y; t.z = (t.z-floor(t.z))*GRID_SIZE_Z; int3 gridIndex = make_int3(((int) t.x) % GRID_SIZE_X, ((int) t.y) % GRID_SIZE_Y, ((int) t.z) % GRID_SIZE_Z); pmeAtomGridIndex[i] = make_int2(i, gridIndex.x*GRID_SIZE_Y*GRID_SIZE_Z+gridIndex.y*GRID_SIZE_Z+gridIndex.z); } } extern "C" __global__ void gridSpreadCharge(const real4* __restrict__ posq, real* __restrict__ originalPmeGrid, real4 periodicBoxSize, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ, const int2* __restrict__ pmeAtomGridIndex) { real3 data[PME_ORDER]; const real scale = RECIP(PME_ORDER-1); // Process the atoms in spatially sorted order. This improves efficiency when writing // the grid values. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { int atom = pmeAtomGridIndex[i].x; real4 pos = posq[atom]; pos.x -= floor(pos.x*recipBoxVecX.x)*periodicBoxSize.x; pos.y -= floor(pos.y*recipBoxVecY.y)*periodicBoxSize.y; pos.z -= floor(pos.z*recipBoxVecZ.z)*periodicBoxSize.z; real3 t = make_real3(pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x, pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y, pos.z*recipBoxVecZ.z); t.x = (t.x-floor(t.x))*GRID_SIZE_X; t.y = (t.y-floor(t.y))*GRID_SIZE_Y; t.z = (t.z-floor(t.z))*GRID_SIZE_Z; int3 gridIndex = make_int3(((int) t.x) % GRID_SIZE_X, ((int) t.y) % GRID_SIZE_Y, ((int) t.z) % GRID_SIZE_Z); // Since we need the full set of thetas, it's faster to compute them here than load them // from global memory. real3 dr = make_real3(t.x-(int) t.x, t.y-(int) t.y, t.z-(int) t.z); data[PME_ORDER-1] = make_real3(0); data[1] = dr; data[0] = make_real3(1)-dr; for (int j = 3; j < PME_ORDER; j++) { real div = RECIP(j-1); data[j-1] = div*dr*data[j-2]; for (int k = 1; k < (j-1); k++) data[j-k-1] = div*((dr+make_real3(k))*data[j-k-2] + (make_real3(j-k)-dr)*data[j-k-1]); data[0] = div*(make_real3(1)-dr)*data[0]; } data[PME_ORDER-1] = scale*dr*data[PME_ORDER-2]; for (int j = 1; j < (PME_ORDER-1); j++) data[PME_ORDER-j-1] = scale*((dr+make_real3(j))*data[PME_ORDER-j-2] + (make_real3(PME_ORDER-j)-dr)*data[PME_ORDER-j-1]); data[0] = scale*(make_real3(1)-dr)*data[0]; // Spread the charge from this atom onto each grid point. for (int ix = 0; ix < PME_ORDER; ix++) { int xbase = gridIndex.x+ix; xbase -= (xbase >= GRID_SIZE_X ? GRID_SIZE_X : 0); xbase = xbase*GRID_SIZE_Y*GRID_SIZE_Z; real dx = data[ix].x; for (int iy = 0; iy < PME_ORDER; iy++) { int ybase = gridIndex.y+iy; ybase -= (ybase >= GRID_SIZE_Y ? GRID_SIZE_Y : 0); ybase = xbase + ybase*GRID_SIZE_Z; real dy = data[iy].y; for (int iz = 0; iz < PME_ORDER; iz++) { int zindex = gridIndex.z+iz; zindex -= (zindex >= GRID_SIZE_Z ? GRID_SIZE_Z : 0); int index = ybase + zindex; real add = pos.w*dx*dy*data[iz].z; #ifdef USE_DOUBLE_PRECISION unsigned long long * ulonglong_p = (unsigned long long *) originalPmeGrid; atomicAdd(&ulonglong_p[index], static_cast<unsigned long long>((long long) (add*0x100000000))); #elif __CUDA_ARCH__ < 200 unsigned long long * ulonglong_p = (unsigned long long *) originalPmeGrid; int gridIndex = index; gridIndex = (gridIndex%2 == 0 ? gridIndex/2 : (gridIndex+GRID_SIZE_X*GRID_SIZE_Y*GRID_SIZE_Z)/2); atomicAdd(&ulonglong_p[gridIndex], static_cast<unsigned long long>((long long) (add*0x100000000))); #else atomicAdd(&originalPmeGrid[index], add*EPSILON_FACTOR); #endif } } } } } extern "C" __global__ void finishSpreadCharge(long long* __restrict__ originalPmeGrid) { real* floatGrid = (real*) originalPmeGrid; const unsigned int gridSize = GRID_SIZE_X*GRID_SIZE_Y*GRID_SIZE_Z; real scale = EPSILON_FACTOR/(real) 0x100000000; #ifdef USE_DOUBLE_PRECISION for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < gridSize; index += blockDim.x*gridDim.x) floatGrid[index] = scale*originalPmeGrid[index]; #else for (int index = 2*(blockIdx.x*blockDim.x+threadIdx.x); index < gridSize; index += 2*blockDim.x*gridDim.x) { floatGrid[index] = scale*originalPmeGrid[index/2]; if (index+1 < gridSize) floatGrid[index+1] = scale*originalPmeGrid[(index+gridSize+1)/2]; } #endif } // convolutes on the halfcomplex_pmeGrid, which is of size NX*NY*(NZ/2+1) as F(Q) is conjugate symmetric extern "C" __global__ void reciprocalConvolution(real2* __restrict__ halfcomplex_pmeGrid, real* __restrict__ energyBuffer, const real* __restrict__ pmeBsplineModuliX, const real* __restrict__ pmeBsplineModuliY, const real* __restrict__ pmeBsplineModuliZ, real4 periodicBoxSize, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { // R2C stores into a half complex matrix where the last dimension is cut by half const unsigned int gridSize = GRID_SIZE_X*GRID_SIZE_Y*(GRID_SIZE_Z/2+1); const real recipScaleFactor = RECIP(M_PI*periodicBoxSize.x*periodicBoxSize.y*periodicBoxSize.z); for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < gridSize; index += blockDim.x*gridDim.x) { // real indices int kx = index/(GRID_SIZE_Y*(GRID_SIZE_Z/2+1)); int remainder = index-kx*GRID_SIZE_Y*(GRID_SIZE_Z/2+1); int ky = remainder/(GRID_SIZE_Z/2+1); int kz = remainder-ky*(GRID_SIZE_Z/2+1); int mx = (kx < (GRID_SIZE_X+1)/2) ? kx : (kx-GRID_SIZE_X); int my = (ky < (GRID_SIZE_Y+1)/2) ? ky : (ky-GRID_SIZE_Y); int mz = (kz < (GRID_SIZE_Z+1)/2) ? kz : (kz-GRID_SIZE_Z); real mhx = mx*recipBoxVecX.x; real mhy = mx*recipBoxVecY.x+my*recipBoxVecY.y; real mhz = mx*recipBoxVecZ.x+my*recipBoxVecZ.y+mz*recipBoxVecZ.z; real bx = pmeBsplineModuliX[kx]; real by = pmeBsplineModuliY[ky]; real bz = pmeBsplineModuliZ[kz]; real2 grid = halfcomplex_pmeGrid[index]; real m2 = mhx*mhx+mhy*mhy+mhz*mhz; real denom = m2*bx*by*bz; real eterm = recipScaleFactor*EXP(-RECIP_EXP_FACTOR*m2)/denom; if (kx != 0 || ky != 0 || kz != 0) { halfcomplex_pmeGrid[index] = make_real2(grid.x*eterm, grid.y*eterm); } } } extern "C" __global__ void gridEvaluateEnergy(real2* __restrict__ halfcomplex_pmeGrid, real* __restrict__ energyBuffer, const real* __restrict__ pmeBsplineModuliX, const real* __restrict__ pmeBsplineModuliY, const real* __restrict__ pmeBsplineModuliZ, real4 periodicBoxSize, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ) { // R2C stores into a half complex matrix where the last dimension is cut by half const unsigned int gridSize = GRID_SIZE_X*GRID_SIZE_Y*GRID_SIZE_Z; const real recipScaleFactor = RECIP(M_PI*periodicBoxSize.x*periodicBoxSize.y*periodicBoxSize.z); real energy = 0; for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < gridSize; index += blockDim.x*gridDim.x) { // real indices int kx = index/(GRID_SIZE_Y*(GRID_SIZE_Z)); int remainder = index-kx*GRID_SIZE_Y*(GRID_SIZE_Z); int ky = remainder/(GRID_SIZE_Z); int kz = remainder-ky*(GRID_SIZE_Z); int mx = (kx < (GRID_SIZE_X+1)/2) ? kx : (kx-GRID_SIZE_X); int my = (ky < (GRID_SIZE_Y+1)/2) ? ky : (ky-GRID_SIZE_Y); int mz = (kz < (GRID_SIZE_Z+1)/2) ? kz : (kz-GRID_SIZE_Z); real mhx = mx*recipBoxVecX.x; real mhy = mx*recipBoxVecY.x+my*recipBoxVecY.y; real mhz = mx*recipBoxVecZ.x+my*recipBoxVecZ.y+mz*recipBoxVecZ.z; real m2 = mhx*mhx+mhy*mhy+mhz*mhz; real bx = pmeBsplineModuliX[kx]; real by = pmeBsplineModuliY[ky]; real bz = pmeBsplineModuliZ[kz]; real denom = m2*bx*by*bz; real eterm = recipScaleFactor*EXP(-RECIP_EXP_FACTOR*m2)/denom; if (kz >= (GRID_SIZE_Z/2+1)) { kx = ((kx == 0) ? kx : GRID_SIZE_X-kx); ky = ((ky == 0) ? ky : GRID_SIZE_Y-ky); kz = GRID_SIZE_Z-kz; } int indexInHalfComplexGrid = kz + ky*(GRID_SIZE_Z/2+1)+kx*(GRID_SIZE_Y*(GRID_SIZE_Z/2+1)); real2 grid = halfcomplex_pmeGrid[indexInHalfComplexGrid]; if (kx != 0 || ky != 0 || kz != 0) { energy += eterm*(grid.x*grid.x + grid.y*grid.y); } } energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += 0.5f*energy; } extern "C" __global__ void gridInterpolateForce(const real4* __restrict__ posq, unsigned long long* __restrict__ forceBuffers, const real* __restrict__ originalPmeGrid, real4 periodicBoxSize, real3 recipBoxVecX, real3 recipBoxVecY, real3 recipBoxVecZ, const int2* __restrict__ pmeAtomGridIndex) { real3 data[PME_ORDER]; real3 ddata[PME_ORDER]; const real scale = RECIP(PME_ORDER-1); // Process the atoms in spatially sorted order. This improves cache performance when loading // the grid values. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) { int atom = pmeAtomGridIndex[i].x; real3 force = make_real3(0); real4 pos = posq[atom]; pos.x -= floor(pos.x*recipBoxVecX.x)*periodicBoxSize.x; pos.y -= floor(pos.y*recipBoxVecY.y)*periodicBoxSize.y; pos.z -= floor(pos.z*recipBoxVecZ.z)*periodicBoxSize.z; real3 t = make_real3(pos.x*recipBoxVecX.x+pos.y*recipBoxVecY.x+pos.z*recipBoxVecZ.x, pos.y*recipBoxVecY.y+pos.z*recipBoxVecZ.y, pos.z*recipBoxVecZ.z); t.x = (t.x-floor(t.x))*GRID_SIZE_X; t.y = (t.y-floor(t.y))*GRID_SIZE_Y; t.z = (t.z-floor(t.z))*GRID_SIZE_Z; int3 gridIndex = make_int3(((int) t.x) % GRID_SIZE_X, ((int) t.y) % GRID_SIZE_Y, ((int) t.z) % GRID_SIZE_Z); // Since we need the full set of thetas, it's faster to compute them here than load them // from global memory. real3 dr = make_real3(t.x-(int) t.x, t.y-(int) t.y, t.z-(int) t.z); data[PME_ORDER-1] = make_real3(0); data[1] = dr; data[0] = make_real3(1)-dr; for (int j = 3; j < PME_ORDER; j++) { real div = RECIP(j-1); data[j-1] = div*dr*data[j-2]; for (int k = 1; k < (j-1); k++) data[j-k-1] = div*((dr+make_real3(k))*data[j-k-2] + (make_real3(j-k)-dr)*data[j-k-1]); data[0] = div*(make_real3(1)-dr)*data[0]; } ddata[0] = -data[0]; for (int j = 1; j < PME_ORDER; j++) ddata[j] = data[j-1]-data[j]; data[PME_ORDER-1] = scale*dr*data[PME_ORDER-2]; for (int j = 1; j < (PME_ORDER-1); j++) data[PME_ORDER-j-1] = scale*((dr+make_real3(j))*data[PME_ORDER-j-2] + (make_real3(PME_ORDER-j)-dr)*data[PME_ORDER-j-1]); data[0] = scale*(make_real3(1)-dr)*data[0]; // Compute the force on this atom. for (int ix = 0; ix < PME_ORDER; ix++) { int xbase = gridIndex.x+ix; xbase -= (xbase >= GRID_SIZE_X ? GRID_SIZE_X : 0); xbase = xbase*GRID_SIZE_Y*GRID_SIZE_Z; real dx = data[ix].x; real ddx = ddata[ix].x; for (int iy = 0; iy < PME_ORDER; iy++) { int ybase = gridIndex.y+iy; ybase -= (ybase >= GRID_SIZE_Y ? GRID_SIZE_Y : 0); ybase = xbase + ybase*GRID_SIZE_Z; real dy = data[iy].y; real ddy = ddata[iy].y; for (int iz = 0; iz < PME_ORDER; iz++) { int zindex = gridIndex.z+iz; zindex -= (zindex >= GRID_SIZE_Z ? GRID_SIZE_Z : 0); int index = ybase + zindex; real gridvalue = originalPmeGrid[index]; force.x += ddx*dy*data[iz].z*gridvalue; force.y += dx*ddy*data[iz].z*gridvalue; force.z += dx*dy*ddata[iz].z*gridvalue; } } } real q = pos.w*EPSILON_FACTOR; real forceX = -q*(force.x*GRID_SIZE_X*recipBoxVecX.x); real forceY = -q*(force.x*GRID_SIZE_X*recipBoxVecY.x+force.y*GRID_SIZE_Y*recipBoxVecY.y); real forceZ = -q*(force.x*GRID_SIZE_X*recipBoxVecZ.x+force.y*GRID_SIZE_Y*recipBoxVecZ.y+force.z*GRID_SIZE_Z*recipBoxVecZ.z); forceBuffers[atom] += static_cast<unsigned long long>((long long) (forceX*0x100000000)); forceBuffers[atom+PADDED_NUM_ATOMS] += static_cast<unsigned long long>((long long) (forceY*0x100000000)); forceBuffers[atom+2*PADDED_NUM_ATOMS] += static_cast<unsigned long long>((long long) (forceZ*0x100000000)); } } extern "C" __global__ void addForces(const real4* __restrict__ forces, unsigned long long* __restrict__ forceBuffers) { for (int atom = blockIdx.x*blockDim.x+threadIdx.x; atom < NUM_ATOMS; atom += blockDim.x*gridDim.x) { real4 f = forces[atom]; forceBuffers[atom] += static_cast<unsigned long long>((long long) (f.x*0x100000000)); forceBuffers[atom+PADDED_NUM_ATOMS] += static_cast<unsigned long long>((long long) (f.y*0x100000000)); forceBuffers[atom+2*PADDED_NUM_ATOMS] += static_cast<unsigned long long>((long long) (f.z*0x100000000)); } }
cb6054d101bcea85b81ebb366c88e8ee32bf5f11.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zmergeidr.cu, normal z -> d, Sun Nov 20 20:20:40 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_d // These routines merge multiple kernels from didr into one. /* -------------------------------------------------------------------------- */ __global__ void magma_didr_smoothing_1_kernel( int num_rows, int num_cols, double *drs, double *dr, double *dt ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ dt[ i+j*num_rows ] = drs[ i+j*num_rows ] - dr[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: dt = drs - dr Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] drs magmaDouble_ptr vector @param[in] dr magmaDouble_ptr vector @param[in,out] dt magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_didr_smoothing_1( magma_int_t num_rows, magma_int_t num_cols, magmaDouble_ptr drs, magmaDouble_ptr dr, magmaDouble_ptr dt, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_didr_smoothing_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, drs, dr, dt ); return MAGMA_SUCCESS; } __global__ void magma_didr_smoothing_2_kernel( int num_rows, int num_cols, double omega, double *dx, double *dxs ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ dxs[ i+j*num_rows ] = dxs[ i+j*num_rows ] + omega * dxs[ i+j*num_rows ] - omega * dx[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: dxs = dxs - gamma*(dxs-dx) Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] omega double scalar @param[in] dx magmaDouble_ptr vector @param[in,out] dxs magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_didr_smoothing_2( magma_int_t num_rows, magma_int_t num_cols, double omega, magmaDouble_ptr dx, magmaDouble_ptr dxs, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_didr_smoothing_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, omega, dx, dxs); return MAGMA_SUCCESS; }
cb6054d101bcea85b81ebb366c88e8ee32bf5f11.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zmergeidr.cu, normal z -> d, Sun Nov 20 20:20:40 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_d // These routines merge multiple kernels from didr into one. /* -------------------------------------------------------------------------- */ __global__ void magma_didr_smoothing_1_kernel( int num_rows, int num_cols, double *drs, double *dr, double *dt ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ dt[ i+j*num_rows ] = drs[ i+j*num_rows ] - dr[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: dt = drs - dr Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] drs magmaDouble_ptr vector @param[in] dr magmaDouble_ptr vector @param[in,out] dt magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_didr_smoothing_1( magma_int_t num_rows, magma_int_t num_cols, magmaDouble_ptr drs, magmaDouble_ptr dr, magmaDouble_ptr dt, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_didr_smoothing_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, drs, dr, dt ); return MAGMA_SUCCESS; } __global__ void magma_didr_smoothing_2_kernel( int num_rows, int num_cols, double omega, double *dx, double *dxs ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ dxs[ i+j*num_rows ] = dxs[ i+j*num_rows ] + omega * dxs[ i+j*num_rows ] - omega * dx[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: dxs = dxs - gamma*(dxs-dx) Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] omega double scalar @param[in] dx magmaDouble_ptr vector @param[in,out] dxs magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_didr_smoothing_2( magma_int_t num_rows, magma_int_t num_cols, double omega, magmaDouble_ptr dx, magmaDouble_ptr dxs, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_didr_smoothing_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, omega, dx, dxs); return MAGMA_SUCCESS; }
2cbbdd6672dee0b3ca5dcfd0f592e5f3dce21c62.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Parallel reduction kernels */ // #ifndef _REDUCE_KERNEL_H_ // #define _REDUCE_KERNEL_H_ #include <hip/device_functions.h> #include <kernelized_correlation_filters/threadFenceReduction_kernel.h> /* Parallel sum reduction using shared memory - takes log(n) steps for n input elements - uses n/2 threads - only works for power-of-2 arrays This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) See the CUDA SDK "reduction" sample for more information. */ template <unsigned int blockSize> __device__ void reduceBlock(volatile float *sdata, float mySum, const unsigned int tid) { sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { if (blockSize >= 64) { sdata[tid] = mySum = mySum + sdata[tid + 32]; } if (blockSize >= 32) { sdata[tid] = mySum = mySum + sdata[tid + 16]; } if (blockSize >= 16) { sdata[tid] = mySum = mySum + sdata[tid + 8]; } if (blockSize >= 8) { sdata[tid] = mySum = mySum + sdata[tid + 4]; } if (blockSize >= 4) { sdata[tid] = mySum = mySum + sdata[tid + 2]; } if (blockSize >= 2) { sdata[tid] = mySum = mySum + sdata[tid + 1]; } } } template <unsigned int blockSize, bool nIsPow2> __device__ void reduceBlocks(const float *g_idata, float *g_odata, unsigned int n) { extern __shared__ float sdata[]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; float mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < n) mySum += g_idata[i+blockSize]; i += gridSize; } // do reduction in shared mem reduceBlock<blockSize>(sdata, mySum, tid); // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } template <unsigned int blockSize, bool nIsPow2> __global__ void reduceMultiPass(const float *g_idata, float *g_odata, unsigned int n) { reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n); } // Global variable used by reduceSinglePass to count how many blocks have finished __device__ unsigned int retirementCount = 0; hipError_t setRetirementCount(int retCnt) { return hipMemcpyToSymbol(retirementCount, &retCnt, sizeof(unsigned int), 0, hipMemcpyHostToDevice); } // This reduction kernel reduces an arbitrary size array in a single kernel invocation // It does so by keeping track of how many blocks have finished. After each thread // block completes the reduction of its own block of data, it "takes a ticket" by // atomically incrementing a global counter. If the ticket value is equal to the number // of thread blocks, then the block holding the ticket knows that it is the last block // to finish. This last block is responsible for summing the results of all the other // blocks. // // In order for this to work, we must be sure that before a block takes a ticket, all // of its memory transactions have completed. This is what __threadfence() does -- it // blocks until the results of all outstanding memory transactions within the // calling thread are visible to all other threads. // // For more details on the reduction algorithm (notably the multi-pass approach), see // the "reduction" sample in the CUDA SDK. template <unsigned int blockSize, bool nIsPow2> __global__ void reduceSinglePass(const float *g_idata, float *g_odata, unsigned int n) { // // PHASE 1: Process all inputs assigned to this block // reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n); // // PHASE 2: Last block finished will process all partial sums // if (gridDim.x > 1) { const unsigned int tid = threadIdx.x; __shared__ bool amLast; extern float __shared__ smem[]; // wait until all outstanding memory instructions in this thread are finished __threadfence(); // Thread 0 takes a ticket if (tid==0) { unsigned int ticket = atomicInc(&retirementCount, gridDim.x); // If the ticket ID is equal to the number of blocks, we are the last block! amLast = (ticket == gridDim.x-1); } __syncthreads(); // The last block sums the results of all other blocks if (amLast) { int i = tid; float mySum = 0; while (i < gridDim.x) { mySum += g_odata[i]; i += blockSize; } reduceBlock<blockSize>(smem, mySum, tid); if (tid==0) { g_odata[0] = smem[0]; // reset retirement count so that next run succeeds retirementCount = 0; } } } } bool isPow2(unsigned int x) { return ((x&(x-1))==0); } //////////////////////////////////////////////////////////////////////////////// // Wrapper function for kernel launch //////////////////////////////////////////////////////////////////////////////// extern "C" void reduce(int size, int threads, int blocks, float *d_idata, float *d_odata) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float); // choose which of the optimized versions of reduction to launch if (isPow2(size)) { switch (threads) { case 512: hipLaunchKernelGGL(( reduceMultiPass<512, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 256: hipLaunchKernelGGL(( reduceMultiPass<256, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( reduceMultiPass<128, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 64: hipLaunchKernelGGL(( reduceMultiPass< 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 32: hipLaunchKernelGGL(( reduceMultiPass< 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 16: hipLaunchKernelGGL(( reduceMultiPass< 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 8: hipLaunchKernelGGL(( reduceMultiPass< 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 4: hipLaunchKernelGGL(( reduceMultiPass< 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 2: hipLaunchKernelGGL(( reduceMultiPass< 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 1: hipLaunchKernelGGL(( reduceMultiPass< 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; } } else { switch (threads) { case 512: hipLaunchKernelGGL(( reduceMultiPass<512, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 256: hipLaunchKernelGGL(( reduceMultiPass<256, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( reduceMultiPass<128, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 64: hipLaunchKernelGGL(( reduceMultiPass< 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 32: hipLaunchKernelGGL(( reduceMultiPass< 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 16: hipLaunchKernelGGL(( reduceMultiPass< 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 8: hipLaunchKernelGGL(( reduceMultiPass< 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 4: hipLaunchKernelGGL(( reduceMultiPass< 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 2: hipLaunchKernelGGL(( reduceMultiPass< 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 1: hipLaunchKernelGGL(( reduceMultiPass< 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; } } } // extern "C" void reduceSinglePass(int size, int threads, int blocks, float *d_idata, float *d_odata) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(float); // choose which of the optimized versions of reduction to launch if (isPow2(size)) { switch (threads) { case 512: hipLaunchKernelGGL(( reduceSinglePass<512, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 256: hipLaunchKernelGGL(( reduceSinglePass<256, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( reduceSinglePass<128, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 64: hipLaunchKernelGGL(( reduceSinglePass< 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 32: hipLaunchKernelGGL(( reduceSinglePass< 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 16: hipLaunchKernelGGL(( reduceSinglePass< 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 8: hipLaunchKernelGGL(( reduceSinglePass< 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 4: hipLaunchKernelGGL(( reduceSinglePass< 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 2: hipLaunchKernelGGL(( reduceSinglePass< 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 1: hipLaunchKernelGGL(( reduceSinglePass< 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; } } else { switch (threads) { case 512: hipLaunchKernelGGL(( reduceSinglePass<512, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 256: hipLaunchKernelGGL(( reduceSinglePass<256, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( reduceSinglePass<128, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 64: hipLaunchKernelGGL(( reduceSinglePass< 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 32: hipLaunchKernelGGL(( reduceSinglePass< 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 16: hipLaunchKernelGGL(( reduceSinglePass< 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 8: hipLaunchKernelGGL(( reduceSinglePass< 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 4: hipLaunchKernelGGL(( reduceSinglePass< 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 2: hipLaunchKernelGGL(( reduceSinglePass< 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 1: hipLaunchKernelGGL(( reduceSinglePass< 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; } } } // #endif // #ifndef _REDUCE_KERNEL_H_
2cbbdd6672dee0b3ca5dcfd0f592e5f3dce21c62.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Parallel reduction kernels */ // #ifndef _REDUCE_KERNEL_H_ // #define _REDUCE_KERNEL_H_ #include <device_functions.h> #include <kernelized_correlation_filters/threadFenceReduction_kernel.h> /* Parallel sum reduction using shared memory - takes log(n) steps for n input elements - uses n/2 threads - only works for power-of-2 arrays This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) See the CUDA SDK "reduction" sample for more information. */ template <unsigned int blockSize> __device__ void reduceBlock(volatile float *sdata, float mySum, const unsigned int tid) { sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { if (blockSize >= 64) { sdata[tid] = mySum = mySum + sdata[tid + 32]; } if (blockSize >= 32) { sdata[tid] = mySum = mySum + sdata[tid + 16]; } if (blockSize >= 16) { sdata[tid] = mySum = mySum + sdata[tid + 8]; } if (blockSize >= 8) { sdata[tid] = mySum = mySum + sdata[tid + 4]; } if (blockSize >= 4) { sdata[tid] = mySum = mySum + sdata[tid + 2]; } if (blockSize >= 2) { sdata[tid] = mySum = mySum + sdata[tid + 1]; } } } template <unsigned int blockSize, bool nIsPow2> __device__ void reduceBlocks(const float *g_idata, float *g_odata, unsigned int n) { extern __shared__ float sdata[]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; float mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < n) mySum += g_idata[i+blockSize]; i += gridSize; } // do reduction in shared mem reduceBlock<blockSize>(sdata, mySum, tid); // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } template <unsigned int blockSize, bool nIsPow2> __global__ void reduceMultiPass(const float *g_idata, float *g_odata, unsigned int n) { reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n); } // Global variable used by reduceSinglePass to count how many blocks have finished __device__ unsigned int retirementCount = 0; cudaError_t setRetirementCount(int retCnt) { return cudaMemcpyToSymbol(retirementCount, &retCnt, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); } // This reduction kernel reduces an arbitrary size array in a single kernel invocation // It does so by keeping track of how many blocks have finished. After each thread // block completes the reduction of its own block of data, it "takes a ticket" by // atomically incrementing a global counter. If the ticket value is equal to the number // of thread blocks, then the block holding the ticket knows that it is the last block // to finish. This last block is responsible for summing the results of all the other // blocks. // // In order for this to work, we must be sure that before a block takes a ticket, all // of its memory transactions have completed. This is what __threadfence() does -- it // blocks until the results of all outstanding memory transactions within the // calling thread are visible to all other threads. // // For more details on the reduction algorithm (notably the multi-pass approach), see // the "reduction" sample in the CUDA SDK. template <unsigned int blockSize, bool nIsPow2> __global__ void reduceSinglePass(const float *g_idata, float *g_odata, unsigned int n) { // // PHASE 1: Process all inputs assigned to this block // reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n); // // PHASE 2: Last block finished will process all partial sums // if (gridDim.x > 1) { const unsigned int tid = threadIdx.x; __shared__ bool amLast; extern float __shared__ smem[]; // wait until all outstanding memory instructions in this thread are finished __threadfence(); // Thread 0 takes a ticket if (tid==0) { unsigned int ticket = atomicInc(&retirementCount, gridDim.x); // If the ticket ID is equal to the number of blocks, we are the last block! amLast = (ticket == gridDim.x-1); } __syncthreads(); // The last block sums the results of all other blocks if (amLast) { int i = tid; float mySum = 0; while (i < gridDim.x) { mySum += g_odata[i]; i += blockSize; } reduceBlock<blockSize>(smem, mySum, tid); if (tid==0) { g_odata[0] = smem[0]; // reset retirement count so that next run succeeds retirementCount = 0; } } } } bool isPow2(unsigned int x) { return ((x&(x-1))==0); } //////////////////////////////////////////////////////////////////////////////// // Wrapper function for kernel launch //////////////////////////////////////////////////////////////////////////////// extern "C" void reduce(int size, int threads, int blocks, float *d_idata, float *d_odata) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float); // choose which of the optimized versions of reduction to launch if (isPow2(size)) { switch (threads) { case 512: reduceMultiPass<512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: reduceMultiPass<256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: reduceMultiPass<128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: reduceMultiPass< 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: reduceMultiPass< 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: reduceMultiPass< 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: reduceMultiPass< 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: reduceMultiPass< 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: reduceMultiPass< 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: reduceMultiPass< 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; } } else { switch (threads) { case 512: reduceMultiPass<512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: reduceMultiPass<256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: reduceMultiPass<128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: reduceMultiPass< 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: reduceMultiPass< 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: reduceMultiPass< 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: reduceMultiPass< 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: reduceMultiPass< 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: reduceMultiPass< 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: reduceMultiPass< 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; } } } // extern "C" void reduceSinglePass(int size, int threads, int blocks, float *d_idata, float *d_odata) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(float); // choose which of the optimized versions of reduction to launch if (isPow2(size)) { switch (threads) { case 512: reduceSinglePass<512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: reduceSinglePass<256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: reduceSinglePass<128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: reduceSinglePass< 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: reduceSinglePass< 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: reduceSinglePass< 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: reduceSinglePass< 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: reduceSinglePass< 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: reduceSinglePass< 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: reduceSinglePass< 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; } } else { switch (threads) { case 512: reduceSinglePass<512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: reduceSinglePass<256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: reduceSinglePass<128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: reduceSinglePass< 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: reduceSinglePass< 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: reduceSinglePass< 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: reduceSinglePass< 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: reduceSinglePass< 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: reduceSinglePass< 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: reduceSinglePass< 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; } } } // #endif // #ifndef _REDUCE_KERNEL_H_
870b172b035b1711d4dbfa20c8a8964cb56a305c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_tvd_kernel; int xdim0_tvd_kernel_h = -1; int ydim0_tvd_kernel_h = -1; __constant__ int xdim1_tvd_kernel; int xdim1_tvd_kernel_h = -1; int ydim1_tvd_kernel_h = -1; #undef OPS_ACC_MD0 #undef OPS_ACC_MD1 #define OPS_ACC_MD0(d, x) ((x)*3 + (d)) #define OPS_ACC_MD1(d, x) ((x)*3 + (d)) // user function __device__ void tvd_kernel(const double *tht, double *ep2) { double maxim; for (int m = 0; m < 3; m++) { if (tht[OPS_ACC_MD0(m, 0)] > tht[OPS_ACC_MD0(m, 1)]) maxim = tht[OPS_ACC_MD0(m, 0)]; else maxim = tht[OPS_ACC_MD0(m, 1)]; ep2[OPS_ACC_MD1(m, 0)] = akap2 * maxim; } } #undef OPS_ACC_MD0 #undef OPS_ACC_MD1 __global__ void ops_tvd_kernel(const double *__restrict arg0, double *__restrict arg1, int size0) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 3; arg1 += idx_x * 1 * 3; if (idx_x < size0) { tvd_kernel(arg0, arg1); } } // host stub function void ops_par_loop_tvd_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1) { // Timing double t1, t2, c1, c2; ops_arg args[2] = {arg0, arg1}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 2, range, 9)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(9, "tvd_kernel"); OPS_kernels[9].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[1]; int end[1]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 1; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 1; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; if (xdim0 != xdim0_tvd_kernel_h || xdim1 != xdim1_tvd_kernel_h) { hipMemcpyToSymbol(xdim0_tvd_kernel, &xdim0, sizeof(int)); xdim0_tvd_kernel_h = xdim0; hipMemcpyToSymbol(xdim1_tvd_kernel, &xdim1, sizeof(int)); xdim1_tvd_kernel_h = xdim1; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1); dim3 tblock(OPS_block_size_x, 1, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[2]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args, 2, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[9].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_tvd_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], x_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[9].time += t1 - t2; } ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[9].mpi_time += t2 - t1; OPS_kernels[9].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[9].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
870b172b035b1711d4dbfa20c8a8964cb56a305c.cu
// // auto-generated by ops.py // __constant__ int xdim0_tvd_kernel; int xdim0_tvd_kernel_h = -1; int ydim0_tvd_kernel_h = -1; __constant__ int xdim1_tvd_kernel; int xdim1_tvd_kernel_h = -1; int ydim1_tvd_kernel_h = -1; #undef OPS_ACC_MD0 #undef OPS_ACC_MD1 #define OPS_ACC_MD0(d, x) ((x)*3 + (d)) #define OPS_ACC_MD1(d, x) ((x)*3 + (d)) // user function __device__ void tvd_kernel(const double *tht, double *ep2) { double maxim; for (int m = 0; m < 3; m++) { if (tht[OPS_ACC_MD0(m, 0)] > tht[OPS_ACC_MD0(m, 1)]) maxim = tht[OPS_ACC_MD0(m, 0)]; else maxim = tht[OPS_ACC_MD0(m, 1)]; ep2[OPS_ACC_MD1(m, 0)] = akap2 * maxim; } } #undef OPS_ACC_MD0 #undef OPS_ACC_MD1 __global__ void ops_tvd_kernel(const double *__restrict arg0, double *__restrict arg1, int size0) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 3; arg1 += idx_x * 1 * 3; if (idx_x < size0) { tvd_kernel(arg0, arg1); } } // host stub function void ops_par_loop_tvd_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1) { // Timing double t1, t2, c1, c2; ops_arg args[2] = {arg0, arg1}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 2, range, 9)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(9, "tvd_kernel"); OPS_kernels[9].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[1]; int end[1]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 1; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 1; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; if (xdim0 != xdim0_tvd_kernel_h || xdim1 != xdim1_tvd_kernel_h) { cudaMemcpyToSymbol(xdim0_tvd_kernel, &xdim0, sizeof(int)); xdim0_tvd_kernel_h = xdim0; cudaMemcpyToSymbol(xdim1_tvd_kernel, &xdim1, sizeof(int)); xdim1_tvd_kernel_h = xdim1; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1); dim3 tblock(OPS_block_size_x, 1, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[2]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args, 2, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[9].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_tvd_kernel<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1], x_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[9].time += t1 - t2; } ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[9].mpi_time += t2 - t1; OPS_kernels[9].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[9].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
58e5fe5d7cf7cee841f286d0a2b2caa08677e774.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _TIMER_ #include "hip/hip_runtime_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ /* X, Y, Z */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-8); float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f; float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f; for (int __iter_0__=0; __iter_0__<N-1; __iter_0__+=((int)(FORMA_BLOCKDIM_X)-8)) { // Initialize the values int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ; int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) { __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*__iter_4__]; t2 = input[__iter_5__+N*(__iter_4__+M)]; } // Rest of the computation for (int __iter_2__ = 1; __iter_2__ < L-1; __iter_2__++) { if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){ b2 = __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t2; t2 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+1))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))) { float __temp_a3__ = (__tilevar_2__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a7__ = (__tilevar_2__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__); float __temp_a12__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__); float __temp_a17__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__); float __temp_a23__ = (__temp_a18__ + 0.165f * t2); float __temp_a28__ = (__temp_a23__ + 0.166f * b2); float __temp_a32__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__); b3 = __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t3; t3 = __temp_a33__; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))) { float __temp_a50__ = (__tilevar_3__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a54__ = (__tilevar_3__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__); float __temp_a59__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__); float __temp_a64__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__); float __temp_a70__ = (__temp_a65__ + 0.165f * t3); float __temp_a75__ = (__temp_a70__ + 0.166f * b3); float __temp_a79__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__); b4 = __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t4; t4 = __temp_a80__; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))) { float __temp_a94__ = (__tilevar_4__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a95__ = (__tilevar_4__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__); float __temp_a97__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__); float __temp_a99__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__); float __temp_a102__ = (__temp_a100__ + 0.165f * t4); float __temp_a104__ = (__temp_a102__ + 0.166f * b4); float __temp_a105__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__); b5 = __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t5; t5 = __temp_a106__; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-2))) { float __temp_a120__ = (__tilevar_5__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a121__ = (__tilevar_5__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__); float __temp_a123__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__); float __temp_a125__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__); float __temp_a128__ = (__temp_a126__ + 0.165f * t5); float __temp_a130__ = (__temp_a128__ + 0.166f * b5); float __temp_a131__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__); __var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-3,0))] = __temp_a132__; } } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(4*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void j3d7pt(float * h_input, int L, int M, int N, float * __var_0__){ /* Host allocation Begin */ float * input; hipMalloc(&input,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : input\n"); hipPointerAttribute_t ptrAttrib_h_input; hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice; if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess) if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice) memcpy_kind_h_input = hipMemcpyDeviceToDevice; hipGetLastError(); if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){ hipMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input); } float * __var_1__; hipMalloc(&__var_1__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ hipEvent_t _forma_timer_start_,_forma_timer_stop_; hipEventCreate(&_forma_timer_start_); hipEventCreate(&_forma_timer_stop_); hipEventRecord(_forma_timer_start_,0); #endif int __size_1___kernel___forma_kernel__0__ = M; int __block_0___kernel___forma_kernel__0__ = 32; int __block_1___kernel___forma_kernel__0__ = 32; int __block_2___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = 1; int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-8); int __grid_2___kernel___forma_kernel__0__ = 1; dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__); dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); hipPointerAttribute_t ptrAttrib___var_0__; hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost; if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess) if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice) memcpy_kind___var_0__ = hipMemcpyDeviceToDevice; hipGetLastError(); hipMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__); #ifdef _TIMER_ hipEventRecord(_forma_timer_stop_,0); hipEventSynchronize(_forma_timer_stop_); float elapsedTime; hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); hipEventDestroy(_forma_timer_start_); hipEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ hipFree(input); hipFree(__var_1__); } /*Host Free End*/
58e5fe5d7cf7cee841f286d0a2b2caa08677e774.cu
#include "cuda.h" #ifdef _TIMER_ #include "cuda_profiler_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); __kernel_init__<<<init_grid,init_block>>>(d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ /* X, Y, Z */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-8); float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f; float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f; for (int __iter_0__=0; __iter_0__<N-1; __iter_0__+=((int)(FORMA_BLOCKDIM_X)-8)) { // Initialize the values int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ; int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) { __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*__iter_4__]; t2 = input[__iter_5__+N*(__iter_4__+M)]; } // Rest of the computation for (int __iter_2__ = 1; __iter_2__ < L-1; __iter_2__++) { if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){ b2 = __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t2; t2 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+1))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))) { float __temp_a3__ = (__tilevar_2__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a7__ = (__tilevar_2__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__); float __temp_a12__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__); float __temp_a17__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__); float __temp_a23__ = (__temp_a18__ + 0.165f * t2); float __temp_a28__ = (__temp_a23__ + 0.166f * b2); float __temp_a32__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__); b3 = __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t3; t3 = __temp_a33__; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))) { float __temp_a50__ = (__tilevar_3__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a54__ = (__tilevar_3__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__); float __temp_a59__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__); float __temp_a64__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__); float __temp_a70__ = (__temp_a65__ + 0.165f * t3); float __temp_a75__ = (__temp_a70__ + 0.166f * b3); float __temp_a79__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__); b4 = __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t4; t4 = __temp_a80__; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))) { float __temp_a94__ = (__tilevar_4__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a95__ = (__tilevar_4__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__); float __temp_a97__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__); float __temp_a99__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__); float __temp_a102__ = (__temp_a100__ + 0.165f * t4); float __temp_a104__ = (__temp_a102__ + 0.166f * b4); float __temp_a105__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__); b5 = __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t5; t5 = __temp_a106__; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-2))) { float __temp_a120__ = (__tilevar_5__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a121__ = (__tilevar_5__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__); float __temp_a123__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__); float __temp_a125__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__); float __temp_a128__ = (__temp_a126__ + 0.165f * t5); float __temp_a130__ = (__temp_a128__ + 0.166f * b5); float __temp_a131__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__); __var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-3,0))] = __temp_a132__; } } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(4*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void j3d7pt(float * h_input, int L, int M, int N, float * __var_0__){ /* Host allocation Begin */ float * input; cudaMalloc(&input,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : input\n"); cudaPointerAttributes ptrAttrib_h_input; cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice; if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess) if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice) memcpy_kind_h_input = cudaMemcpyDeviceToDevice; cudaGetLastError(); if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){ cudaMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input); } float * __var_1__; cudaMalloc(&__var_1__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ cudaEvent_t _forma_timer_start_,_forma_timer_stop_; cudaEventCreate(&_forma_timer_start_); cudaEventCreate(&_forma_timer_stop_); cudaEventRecord(_forma_timer_start_,0); #endif int __size_1___kernel___forma_kernel__0__ = M; int __block_0___kernel___forma_kernel__0__ = 32; int __block_1___kernel___forma_kernel__0__ = 32; int __block_2___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = 1; int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-8); int __grid_2___kernel___forma_kernel__0__ = 1; dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__); dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); cudaPointerAttributes ptrAttrib___var_0__; cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost; if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess) if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice) memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice; cudaGetLastError(); cudaMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__); #ifdef _TIMER_ cudaEventRecord(_forma_timer_stop_,0); cudaEventSynchronize(_forma_timer_stop_); float elapsedTime; cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); cudaEventDestroy(_forma_timer_start_); cudaEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ cudaFree(input); cudaFree(__var_1__); } /*Host Free End*/
8dbadfe75ffb7337bd915dd81d0efaf05ae053da.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <vector> #include <hipfft.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include "debug.h" #include "timer.h" #include "utils_cuda.h" #include <hipfftXt.h> #include <hip/hip_fp16.h> #include "params.h" #include "results.h" #include "MSD_GPU_library.h" #include "nvml_run.h" #define PHS_NTHREADS 64 #define CT_CORNER_BLOCKS 1 #define CT_ROWS_PER_WARP 2 #define CT_NTHREADS 512 #define WARP 32 //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- __global__ void GPU_simple_power_and_interbin_kernel(float2 *d_input_complex, float *d_output_power, int nTimesamples, float norm){ int pos_x = blockIdx.x*blockDim.x + threadIdx.x; int pos_y = blockIdx.y*nTimesamples; float2 A; A.x = 0; A.y = 0; if( pos_x < nTimesamples ) { A = d_input_complex[pos_y + pos_x]; d_output_power[pos_y + pos_x] = (A.x*A.x + A.y*A.y)*norm; } } __global__ void corner_turn_SM_kernel(float const* __restrict__ d_input, float *d_output, int primary_size, int secondary_size) { __shared__ float s_input[WARP*(WARP+1)*CT_CORNER_BLOCKS]; int i, spos, itemp, pc, sc; size_t gpos; int warp_id = threadIdx.x>>5; int local_id = threadIdx.x & (WARP - 1); gpos=(size_t)((size_t)(blockIdx.y*(blockDim.x>>5)) + (size_t)warp_id)*CT_ROWS_PER_WARP*primary_size + (size_t)(blockIdx.x*CT_CORNER_BLOCKS*WARP) + (size_t)local_id; for(int by=0; by<CT_ROWS_PER_WARP; by++){ spos=local_id*WARP + local_id + warp_id*CT_ROWS_PER_WARP + by; for(int bx=0; bx<CT_CORNER_BLOCKS; bx++){ // temporary if(gpos<primary_size){ s_input[spos]=d_input[gpos]; } gpos=gpos + (size_t)WARP; spos=spos + WARP*(WARP+1); } gpos=gpos + (size_t)primary_size - (size_t)(CT_CORNER_BLOCKS*WARP); } __syncthreads(); itemp=warp_id*CT_ROWS_PER_WARP*CT_CORNER_BLOCKS; for(i=0; i<CT_ROWS_PER_WARP*CT_CORNER_BLOCKS; i++){ pc = (blockIdx.x*CT_CORNER_BLOCKS*WARP + itemp + i); sc = WARP*blockIdx.y + local_id; if( pc<primary_size && sc<secondary_size ) { gpos=(size_t)(pc*secondary_size) + (size_t)sc; spos=(itemp + i)*(WARP+1) + local_id; d_output[gpos]=s_input[spos]; } } } __global__ void PHS_GPU_kernel(float const* __restrict__ d_input, float *d_output_SNR, ushort *d_output_harmonics, float *d_MSD, int nTimesamples, int nSpectra, int nHarmonics){ float HS_value, temp_SNR, SNR; ushort max_SNR_harmonic; int pos; // reading 0th harmonic, i.e. fundamental frequency pos = blockIdx.x*nSpectra + blockIdx.y*blockDim.x + threadIdx.x; if( (blockIdx.y*blockDim.x + threadIdx.x)<nSpectra ){ HS_value = __ldg(&d_input[pos]); SNR = (HS_value - __ldg(&d_MSD[0]))/(__ldg(&d_MSD[1])); max_SNR_harmonic = 0; if(blockIdx.x>0) { for(int f=1; f<nHarmonics; f++) { if( (blockIdx.x + f*blockIdx.x)<nTimesamples ) { pos = (blockIdx.x + f*blockIdx.x)*nSpectra + blockIdx.y*blockDim.x + threadIdx.x; HS_value = HS_value + __ldg(&d_input[pos]); temp_SNR = (HS_value - __ldg(&d_MSD[f*2]))/(__ldg(&d_MSD[2*f+1])); //assuming white noise if(temp_SNR > SNR) { SNR = temp_SNR; max_SNR_harmonic = f; } } } } pos = blockIdx.x*nSpectra + blockIdx.y*blockDim.x + threadIdx.x; d_output_SNR[pos] = SNR; d_output_harmonics[pos] = max_SNR_harmonic; } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- int Initiate_device(int device){ int devCount; hipGetDeviceCount(&devCount); if(device<devCount) { hipSetDevice(device); return(0); } else return(1); } int Check_free_memory(size_t total_input_FFT_size, size_t total_output_FFT_size){ hipError_t err_code; size_t free_mem, total_mem; err_code = hipMemGetInfo(&free_mem,&total_mem); if(err_code!=hipSuccess) { printf("CUDA ERROR!\n"); return(1); } if(free_mem<(total_input_FFT_size+total_output_FFT_size)) { printf("ERROR: Not enough GPU memory\n"); return(1); } return(0); } double stdev(std::vector<double> *times, double mean_time){ double sum = 0; for(size_t i=0; i<times->size(); i++){ double x = (times->operator[](i)-mean_time); sum = sum + x*x; } double stdev = sqrt( sum/((double) times->size()) ); return(stdev); } // *********************************************************************************** // *********************************************************************************** // *********************************************************************************** int Calculate_GPU_HRMS(float2 *h_input, float *h_output, Performance_results *HRMS_results, int device, int core_clock){ Initiate_device(device); int nElements = HRMS_results->nElements; int nHarmonics = HRMS_results->nHarmonics; int nSeries = HRMS_results->nSeries; int nRuns = HRMS_results->nRuns; size_t input_size = nElements*nSeries*sizeof(float2); size_t power_size = nElements*nSeries*sizeof(float); size_t output_size = nElements*nSeries; GpuTimer timer, total_timer; total_timer.Start(); float2 *d_input; float *d_power; float *d_output_SNR; ushort *d_output_harmonics; hipError_t err_code; err_code = hipMalloc((void **) &d_input, input_size); if(err_code!=hipSuccess) { printf("\nError in allocation of the device memory!\n"); return(1); } err_code = hipMalloc((void **) &d_power, power_size); if(err_code!=hipSuccess) { printf("\nError in allocation of the device memory!\n"); return(1); } err_code = hipMalloc((void **) &d_output_SNR, output_size*sizeof(float)); if(err_code!=hipSuccess) { printf("\nError in allocation of the device memory!\n"); return(1); } err_code = hipMalloc((void **) &d_output_harmonics, output_size*sizeof(ushort)); if(err_code!=hipSuccess) { printf("\nError in allocation of the device memory!\n"); return(1); } err_code = hipMemcpy(d_input, h_input, input_size, hipMemcpyHostToDevice); if(err_code!=hipSuccess) { printf("\nError in allocation of the device memory!\n"); return(1); } //-------------------- cuFFT -----------------> hipfftHandle plan; hipfftResult cuFFT_error; cuFFT_error = hipfftPlan1d(&plan, nElements, HIPFFT_C2C, nSeries); double FFT_execution_time = 0; if (HIPFFT_SUCCESS == cuFFT_error) { if (core_clock >0) nvml_setup(device, core_clock); for(int f=0; f<nRuns; f++){ timer.Start(); hipfftExecC2C(plan, (hipfftComplex *) d_input, (hipfftComplex *) d_input, HIPFFT_FORWARD); timer.Stop(); FFT_execution_time += timer.Elapsed(); } // stop before reset to default; kernel call is async hipDeviceSynchronize(); if (core_clock > 0) nvml_reset(); FFT_execution_time = FFT_execution_time/((double) nRuns); HRMS_results->GPU_FFT_time = FFT_execution_time; } else printf("CUFFT error: Plan creation failed"); hipfftDestroy(plan); //--------------------------------------------< //------------- Power calculation ------------> int power_blocks_x, power_blocks_y; power_blocks_x = (nElements + 256 - 1)/256; power_blocks_y = nSeries; dim3 power_blockDim(256, 1, 1); dim3 power_gridSize(power_blocks_x ,power_blocks_y , 1); FFT_execution_time = 0; for(int f=0; f<nRuns; f++){ timer.Start(); hipLaunchKernelGGL(( GPU_simple_power_and_interbin_kernel), dim3(power_gridSize) , dim3(power_blockDim) , 0, 0, d_input, d_power, nElements, 1); timer.Stop(); FFT_execution_time += timer.Elapsed(); } FFT_execution_time = FFT_execution_time/((double) nRuns); HRMS_results->GPU_MSD_time = FFT_execution_time; //--------------------------------------------< //--------------------- MSD ------------------> int nBatches = 1; int MSD_size = MSD_RESULTS_SIZE*nBatches*sizeof(float); int MSD_elements_size = nBatches*sizeof(size_t); float *d_MSD; size_t *d_MSD_nElements; if ( hipSuccess != hipMalloc((void **) &d_MSD, MSD_size)) { printf("CUDA API error while allocating GPU memory\n"); } if ( hipSuccess != hipMalloc((void **) &d_MSD_nElements, MSD_elements_size)) { printf("CUDA API error while allocating GPU memory\n"); } MSD_Error MSD_error; MSD_Configuration MSD_conf; std::vector<size_t> dimensions={ (size_t) nSeries, (size_t) nElements}; bool outlier_rejection = false; int offset = 0; double outlier_rejection_sigma = 3.0; MSD_error = MSD_conf.Create_MSD_Plan(dimensions, offset, outlier_rejection, outlier_rejection_sigma, nBatches); if(MSD_error!=MSDSuccess) Get_MSD_Error(MSD_error); FFT_execution_time = 0; for(int f=0; f<nRuns; f++){ timer.Start(); MSD_error = MSD_GetMeanStdev(d_MSD, d_MSD_nElements, d_power, MSD_conf); timer.Stop(); FFT_execution_time += timer.Elapsed(); } FFT_execution_time = FFT_execution_time/((double) nRuns); HRMS_results->GPU_MSD_time += FFT_execution_time; if(MSD_error!=MSDSuccess) Get_MSD_Error(MSD_error); //--------------------------------------------< //--------------- Harmonic Sum ---------------> int CT_nBlocks_x, CT_nBlocks_y; int Elements_per_block=CT_CORNER_BLOCKS*WARP; CT_nBlocks_x = (nElements + Elements_per_block - 1)/Elements_per_block; CT_nBlocks_y = (nSeries + WARP + 1)/WARP; dim3 CT_gridSize(CT_nBlocks_x, CT_nBlocks_y, 1); dim3 CT_blockSize(CT_NTHREADS, 1, 1); FFT_execution_time = 0; for(int f=0; f<nRuns; f++){ timer.Start(); hipLaunchKernelGGL(( corner_turn_SM_kernel), dim3(CT_gridSize), dim3(CT_blockSize) , 0, 0, d_power, (float *) d_input, nElements, nSeries); timer.Stop(); FFT_execution_time += timer.Elapsed(); } FFT_execution_time = FFT_execution_time/((double) nRuns); HRMS_results->GPU_HRMS_time = FFT_execution_time; int HRMS_nBlocks_x, HRMS_nBlocks_y; HRMS_nBlocks_x = nElements; HRMS_nBlocks_y = (nSeries + PHS_NTHREADS - 1)/PHS_NTHREADS; dim3 HRMS_gridSize(HRMS_nBlocks_x, HRMS_nBlocks_y, 1); dim3 HRMS_blockSize(PHS_NTHREADS, 1, 1); FFT_execution_time = 0; for(int f=0; f<nRuns; f++){ timer.Start(); hipLaunchKernelGGL(( PHS_GPU_kernel), dim3(HRMS_gridSize), dim3(HRMS_blockSize) , 0, 0, (float *) d_input, d_output_SNR, d_output_harmonics, d_MSD, nElements, nSeries, nHarmonics); timer.Stop(); FFT_execution_time += timer.Elapsed(); } FFT_execution_time = FFT_execution_time/((double) nRuns); HRMS_results->GPU_HRMS_time += FFT_execution_time; //--------------------------------------------< total_timer.Stop(); HRMS_results->GPU_total_time = HRMS_results->GPU_HRMS_time + HRMS_results->GPU_MSD_time + HRMS_results->GPU_FFT_time; hipFree(d_MSD); hipFree(d_MSD_nElements); hipFree(d_input); hipFree(d_power); hipFree(d_output_SNR); hipFree(d_output_harmonics); return(0); }
8dbadfe75ffb7337bd915dd81d0efaf05ae053da.cu
#include <iostream> #include <fstream> #include <vector> #include <cufft.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include "debug.h" #include "timer.h" #include "utils_cuda.h" #include <cufftXt.h> #include <cuda_fp16.h> #include "params.h" #include "results.h" #include "MSD_GPU_library.h" #include "nvml_run.h" #define PHS_NTHREADS 64 #define CT_CORNER_BLOCKS 1 #define CT_ROWS_PER_WARP 2 #define CT_NTHREADS 512 #define WARP 32 //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- __global__ void GPU_simple_power_and_interbin_kernel(float2 *d_input_complex, float *d_output_power, int nTimesamples, float norm){ int pos_x = blockIdx.x*blockDim.x + threadIdx.x; int pos_y = blockIdx.y*nTimesamples; float2 A; A.x = 0; A.y = 0; if( pos_x < nTimesamples ) { A = d_input_complex[pos_y + pos_x]; d_output_power[pos_y + pos_x] = (A.x*A.x + A.y*A.y)*norm; } } __global__ void corner_turn_SM_kernel(float const* __restrict__ d_input, float *d_output, int primary_size, int secondary_size) { __shared__ float s_input[WARP*(WARP+1)*CT_CORNER_BLOCKS]; int i, spos, itemp, pc, sc; size_t gpos; int warp_id = threadIdx.x>>5; int local_id = threadIdx.x & (WARP - 1); gpos=(size_t)((size_t)(blockIdx.y*(blockDim.x>>5)) + (size_t)warp_id)*CT_ROWS_PER_WARP*primary_size + (size_t)(blockIdx.x*CT_CORNER_BLOCKS*WARP) + (size_t)local_id; for(int by=0; by<CT_ROWS_PER_WARP; by++){ spos=local_id*WARP + local_id + warp_id*CT_ROWS_PER_WARP + by; for(int bx=0; bx<CT_CORNER_BLOCKS; bx++){ // temporary if(gpos<primary_size){ s_input[spos]=d_input[gpos]; } gpos=gpos + (size_t)WARP; spos=spos + WARP*(WARP+1); } gpos=gpos + (size_t)primary_size - (size_t)(CT_CORNER_BLOCKS*WARP); } __syncthreads(); itemp=warp_id*CT_ROWS_PER_WARP*CT_CORNER_BLOCKS; for(i=0; i<CT_ROWS_PER_WARP*CT_CORNER_BLOCKS; i++){ pc = (blockIdx.x*CT_CORNER_BLOCKS*WARP + itemp + i); sc = WARP*blockIdx.y + local_id; if( pc<primary_size && sc<secondary_size ) { gpos=(size_t)(pc*secondary_size) + (size_t)sc; spos=(itemp + i)*(WARP+1) + local_id; d_output[gpos]=s_input[spos]; } } } __global__ void PHS_GPU_kernel(float const* __restrict__ d_input, float *d_output_SNR, ushort *d_output_harmonics, float *d_MSD, int nTimesamples, int nSpectra, int nHarmonics){ float HS_value, temp_SNR, SNR; ushort max_SNR_harmonic; int pos; // reading 0th harmonic, i.e. fundamental frequency pos = blockIdx.x*nSpectra + blockIdx.y*blockDim.x + threadIdx.x; if( (blockIdx.y*blockDim.x + threadIdx.x)<nSpectra ){ HS_value = __ldg(&d_input[pos]); SNR = (HS_value - __ldg(&d_MSD[0]))/(__ldg(&d_MSD[1])); max_SNR_harmonic = 0; if(blockIdx.x>0) { for(int f=1; f<nHarmonics; f++) { if( (blockIdx.x + f*blockIdx.x)<nTimesamples ) { pos = (blockIdx.x + f*blockIdx.x)*nSpectra + blockIdx.y*blockDim.x + threadIdx.x; HS_value = HS_value + __ldg(&d_input[pos]); temp_SNR = (HS_value - __ldg(&d_MSD[f*2]))/(__ldg(&d_MSD[2*f+1])); //assuming white noise if(temp_SNR > SNR) { SNR = temp_SNR; max_SNR_harmonic = f; } } } } pos = blockIdx.x*nSpectra + blockIdx.y*blockDim.x + threadIdx.x; d_output_SNR[pos] = SNR; d_output_harmonics[pos] = max_SNR_harmonic; } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- int Initiate_device(int device){ int devCount; cudaGetDeviceCount(&devCount); if(device<devCount) { cudaSetDevice(device); return(0); } else return(1); } int Check_free_memory(size_t total_input_FFT_size, size_t total_output_FFT_size){ cudaError_t err_code; size_t free_mem, total_mem; err_code = cudaMemGetInfo(&free_mem,&total_mem); if(err_code!=cudaSuccess) { printf("CUDA ERROR!\n"); return(1); } if(free_mem<(total_input_FFT_size+total_output_FFT_size)) { printf("ERROR: Not enough GPU memory\n"); return(1); } return(0); } double stdev(std::vector<double> *times, double mean_time){ double sum = 0; for(size_t i=0; i<times->size(); i++){ double x = (times->operator[](i)-mean_time); sum = sum + x*x; } double stdev = sqrt( sum/((double) times->size()) ); return(stdev); } // *********************************************************************************** // *********************************************************************************** // *********************************************************************************** int Calculate_GPU_HRMS(float2 *h_input, float *h_output, Performance_results *HRMS_results, int device, int core_clock){ Initiate_device(device); int nElements = HRMS_results->nElements; int nHarmonics = HRMS_results->nHarmonics; int nSeries = HRMS_results->nSeries; int nRuns = HRMS_results->nRuns; size_t input_size = nElements*nSeries*sizeof(float2); size_t power_size = nElements*nSeries*sizeof(float); size_t output_size = nElements*nSeries; GpuTimer timer, total_timer; total_timer.Start(); float2 *d_input; float *d_power; float *d_output_SNR; ushort *d_output_harmonics; cudaError_t err_code; err_code = cudaMalloc((void **) &d_input, input_size); if(err_code!=cudaSuccess) { printf("\nError in allocation of the device memory!\n"); return(1); } err_code = cudaMalloc((void **) &d_power, power_size); if(err_code!=cudaSuccess) { printf("\nError in allocation of the device memory!\n"); return(1); } err_code = cudaMalloc((void **) &d_output_SNR, output_size*sizeof(float)); if(err_code!=cudaSuccess) { printf("\nError in allocation of the device memory!\n"); return(1); } err_code = cudaMalloc((void **) &d_output_harmonics, output_size*sizeof(ushort)); if(err_code!=cudaSuccess) { printf("\nError in allocation of the device memory!\n"); return(1); } err_code = cudaMemcpy(d_input, h_input, input_size, cudaMemcpyHostToDevice); if(err_code!=cudaSuccess) { printf("\nError in allocation of the device memory!\n"); return(1); } //-------------------- cuFFT -----------------> cufftHandle plan; cufftResult cuFFT_error; cuFFT_error = cufftPlan1d(&plan, nElements, CUFFT_C2C, nSeries); double FFT_execution_time = 0; if (CUFFT_SUCCESS == cuFFT_error) { if (core_clock >0) nvml_setup(device, core_clock); for(int f=0; f<nRuns; f++){ timer.Start(); cufftExecC2C(plan, (cufftComplex *) d_input, (cufftComplex *) d_input, CUFFT_FORWARD); timer.Stop(); FFT_execution_time += timer.Elapsed(); } // stop before reset to default; kernel call is async cudaDeviceSynchronize(); if (core_clock > 0) nvml_reset(); FFT_execution_time = FFT_execution_time/((double) nRuns); HRMS_results->GPU_FFT_time = FFT_execution_time; } else printf("CUFFT error: Plan creation failed"); cufftDestroy(plan); //--------------------------------------------< //------------- Power calculation ------------> int power_blocks_x, power_blocks_y; power_blocks_x = (nElements + 256 - 1)/256; power_blocks_y = nSeries; dim3 power_blockDim(256, 1, 1); dim3 power_gridSize(power_blocks_x ,power_blocks_y , 1); FFT_execution_time = 0; for(int f=0; f<nRuns; f++){ timer.Start(); GPU_simple_power_and_interbin_kernel<<< power_gridSize , power_blockDim >>>(d_input, d_power, nElements, 1); timer.Stop(); FFT_execution_time += timer.Elapsed(); } FFT_execution_time = FFT_execution_time/((double) nRuns); HRMS_results->GPU_MSD_time = FFT_execution_time; //--------------------------------------------< //--------------------- MSD ------------------> int nBatches = 1; int MSD_size = MSD_RESULTS_SIZE*nBatches*sizeof(float); int MSD_elements_size = nBatches*sizeof(size_t); float *d_MSD; size_t *d_MSD_nElements; if ( cudaSuccess != cudaMalloc((void **) &d_MSD, MSD_size)) { printf("CUDA API error while allocating GPU memory\n"); } if ( cudaSuccess != cudaMalloc((void **) &d_MSD_nElements, MSD_elements_size)) { printf("CUDA API error while allocating GPU memory\n"); } MSD_Error MSD_error; MSD_Configuration MSD_conf; std::vector<size_t> dimensions={ (size_t) nSeries, (size_t) nElements}; bool outlier_rejection = false; int offset = 0; double outlier_rejection_sigma = 3.0; MSD_error = MSD_conf.Create_MSD_Plan(dimensions, offset, outlier_rejection, outlier_rejection_sigma, nBatches); if(MSD_error!=MSDSuccess) Get_MSD_Error(MSD_error); FFT_execution_time = 0; for(int f=0; f<nRuns; f++){ timer.Start(); MSD_error = MSD_GetMeanStdev(d_MSD, d_MSD_nElements, d_power, MSD_conf); timer.Stop(); FFT_execution_time += timer.Elapsed(); } FFT_execution_time = FFT_execution_time/((double) nRuns); HRMS_results->GPU_MSD_time += FFT_execution_time; if(MSD_error!=MSDSuccess) Get_MSD_Error(MSD_error); //--------------------------------------------< //--------------- Harmonic Sum ---------------> int CT_nBlocks_x, CT_nBlocks_y; int Elements_per_block=CT_CORNER_BLOCKS*WARP; CT_nBlocks_x = (nElements + Elements_per_block - 1)/Elements_per_block; CT_nBlocks_y = (nSeries + WARP + 1)/WARP; dim3 CT_gridSize(CT_nBlocks_x, CT_nBlocks_y, 1); dim3 CT_blockSize(CT_NTHREADS, 1, 1); FFT_execution_time = 0; for(int f=0; f<nRuns; f++){ timer.Start(); corner_turn_SM_kernel<<< CT_gridSize, CT_blockSize >>>(d_power, (float *) d_input, nElements, nSeries); timer.Stop(); FFT_execution_time += timer.Elapsed(); } FFT_execution_time = FFT_execution_time/((double) nRuns); HRMS_results->GPU_HRMS_time = FFT_execution_time; int HRMS_nBlocks_x, HRMS_nBlocks_y; HRMS_nBlocks_x = nElements; HRMS_nBlocks_y = (nSeries + PHS_NTHREADS - 1)/PHS_NTHREADS; dim3 HRMS_gridSize(HRMS_nBlocks_x, HRMS_nBlocks_y, 1); dim3 HRMS_blockSize(PHS_NTHREADS, 1, 1); FFT_execution_time = 0; for(int f=0; f<nRuns; f++){ timer.Start(); PHS_GPU_kernel<<< HRMS_gridSize, HRMS_blockSize >>>((float *) d_input, d_output_SNR, d_output_harmonics, d_MSD, nElements, nSeries, nHarmonics); timer.Stop(); FFT_execution_time += timer.Elapsed(); } FFT_execution_time = FFT_execution_time/((double) nRuns); HRMS_results->GPU_HRMS_time += FFT_execution_time; //--------------------------------------------< total_timer.Stop(); HRMS_results->GPU_total_time = HRMS_results->GPU_HRMS_time + HRMS_results->GPU_MSD_time + HRMS_results->GPU_FFT_time; cudaFree(d_MSD); cudaFree(d_MSD_nElements); cudaFree(d_input); cudaFree(d_power); cudaFree(d_output_SNR); cudaFree(d_output_harmonics); return(0); }
4d8ee17cfaf4be9a3a7627b3fca47c9cda3285af.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <pthread.h> #include <stdio.h> const int N = 1 << 20; __global__ void kernel(float *x, int n) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < n; i += blockDim.x * gridDim.x) { x[i] = sqrt(pow(3.14159,i)); } } void *launch_kernel(void *dummy) { float *data; hipMalloc(&data, N * sizeof(float)); hipLaunchKernelGGL(( kernel), dim3(1), dim3(64), 0, 0, data, N); hipStreamSynchronize(0); return NULL; } int main() { const int num_threads = 8; pthread_t threads[num_threads]; for (int i = 0; i < num_threads; i++) { if (pthread_create(&threads[i], NULL, launch_kernel, 0)) { fprintf(stderr, "Error creating threadn"); return 1; } } for (int i = 0; i < num_threads; i++) { if(pthread_join(threads[i], NULL)) { fprintf(stderr, "Error joining threadn"); return 2; } } hipDeviceReset(); return 0; }
4d8ee17cfaf4be9a3a7627b3fca47c9cda3285af.cu
#include <pthread.h> #include <stdio.h> const int N = 1 << 20; __global__ void kernel(float *x, int n) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < n; i += blockDim.x * gridDim.x) { x[i] = sqrt(pow(3.14159,i)); } } void *launch_kernel(void *dummy) { float *data; cudaMalloc(&data, N * sizeof(float)); kernel<<<1, 64>>>(data, N); cudaStreamSynchronize(0); return NULL; } int main() { const int num_threads = 8; pthread_t threads[num_threads]; for (int i = 0; i < num_threads; i++) { if (pthread_create(&threads[i], NULL, launch_kernel, 0)) { fprintf(stderr, "Error creating threadn"); return 1; } } for (int i = 0; i < num_threads; i++) { if(pthread_join(threads[i], NULL)) { fprintf(stderr, "Error joining threadn"); return 2; } } cudaDeviceReset(); return 0; }
3a7d978987c5d3bf2f56543f007ab44397c54569.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #include <thrust/scan.h> #include <thrust/device_ptr.h> #include <thrust/device_malloc.h> #include <thrust/device_free.h> #include "CycleTimer.h" #define THREADS_PER_BLOCK 256 // helper function to round an integer up to the next power of 2 static inline int nextPow2(int n) { n--; n |= n >> 1; n |= n >> 2; n |= n >> 4; n |= n >> 8; n |= n >> 16; n++; return n; } __global__ void scanUp(int *input, int rounded, int N, int i) { int index = (blockIdx.x * blockDim.x + threadIdx.x) << (i+1); twod = 1 << i; twodp1 = 1 << (i+1); int endInd = index+twodp1-1; if (endInd < N) { input[endInd] += input[index+twod-1]; } } __global__ void scanDown(int *input, int rounded, int N, int i) { int index = (blockIdx.x * blockDim.x + threadIdx.x) << (i+1); twod = 1 << i; twodp1 = 1 << (i+1); int endInd = index+twodp1-1; if (endInd < N) { int t = input[i+twod-1]; input[i+twod-1] = input[endInd]; input[endind] += t; } } // exclusive_scan -- // // Implementation of an exclusive scan on global memory array `input`, // with results placed in global memory `result`. // // N is the logical size of the input and output arrays, however // students can assume that both the start and result arrays we // allocated with next power-of-two sizes as described by the comments // in cudaScan(). This is helpful, since your parallel scan // will likely write to memory locations beyond N, but of course not // greater than N rounded up to the next power of 2. // // Also, as per the comments in cudaScan(), you can implement an // "in-place" scan, since the timing harness makes a copy of input and // places it in result void exclusive_scan(int* input, int N, int* result) { // CS149 TODO: // // Implement your exclusive scan implementation here. Keep input // mind that although the arguments to this function are device // allocated arrays, this is a function that is running in a thread // on the CPU. Your implementation will need to make multiple calls // to CUDA kernel functions (that you must write) to implement the // scan. int rounded = nextPow2(N); for (int i = 0; i < log2(rounded); i++) { hipLaunchKernelGGL(( scanUp), dim3(((rounded >> (i+1))+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, input, rounded, N, i); } input[0] = 0; for (int i = log2(rounded)=1; i >= -; i--) { hipLaunchKernelGGL(( scanDown), dim3(((rounded >> (i+1))+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, input, rounded, N, i); } } // // cudaScan -- // // This function is a timing wrapper around the student's // implementation of scan - it copies the input to the GPU // and times the invocation of the exclusive_scan() function // above. Students should not modify it. double cudaScan(int* inarray, int* end, int* resultarray) { int* device_result; int* device_input; int N = end - inarray; // This code rounds the arrays provided to exclusive_scan up // to a power of 2, but elements after the end of the original // input are left uninitialized and not checked for correctness. // // Student implementations of exclusive_scan may assume an array's // allocated length is a power of 2 for simplicity. This will // result in extra work on non-power-of-2 inputs, but it's worth // the simplicity of a power of two only solution. int rounded_length = nextPow2(end - inarray); hipMalloc((void **)&device_result, sizeof(int) * rounded_length); hipMalloc((void **)&device_input, sizeof(int) * rounded_length); // For convenience, both the input and output vectors on the // device are initialized to the input values. This means that // students are free to implement an in-place scan on the result // vector if desired. If you do this, you will need to keep this // in mind when calling exclusive_scan from find_repeats. hipMemcpy(device_input, inarray, (end - inarray) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(device_result, inarray, (end - inarray) * sizeof(int), hipMemcpyHostToDevice); double startTime = CycleTimer::currentSeconds(); exclusive_scan(device_input, N, device_result); // Wait for completion hipDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); hipMemcpy(resultarray, device_result, (end - inarray) * sizeof(int), hipMemcpyDeviceToHost); double overallDuration = endTime - startTime; return overallDuration; } // cudaScanThrust -- // // Wrapper around the Thrust library's exclusive scan function // As above in cudaScan(), this function copies the input to the GPU // and times only the execution of the scan itself. // // Students are not expected to produce implementations that achieve // performance that is competition to the Thrust version, but it is fun to try. double cudaScanThrust(int* inarray, int* end, int* resultarray) { int length = end - inarray; thrust::device_ptr<int> d_input = thrust::device_malloc<int>(length); thrust::device_ptr<int> d_output = thrust::device_malloc<int>(length); hipMemcpy(d_input.get(), inarray, length * sizeof(int), hipMemcpyHostToDevice); double startTime = CycleTimer::currentSeconds(); thrust::exclusive_scan(d_input, d_input + length, d_output); hipDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); hipMemcpy(resultarray, d_output.get(), length * sizeof(int), hipMemcpyDeviceToHost); thrust::device_free(d_input); thrust::device_free(d_output); double overallDuration = endTime - startTime; return overallDuration; } // find_repeats -- // // Given an array of integers `device_input`, returns an array of all // indices `i` for which `device_input[i] == device_input[i+1]`. // // Returns the total number of pairs found int find_repeats(int* device_input, int length, int* device_output) { // CS149 TODO: // // Implement this function. You will probably want to // make use of one or more calls to exclusive_scan(), as well as // additional CUDA kernel launches. // // Note: As in the scan code, the calling code ensures that // allocated arrays are a power of 2 in size, so you can use your // exclusive_scan function with them. However, your implementation // must ensure that the results of find_repeats are correct given // the actual array length. return 0; } // // cudaFindRepeats -- // // Timing wrapper around find_repeats. You should not modify this function. double cudaFindRepeats(int *input, int length, int *output, int *output_length) { int *device_input; int *device_output; int rounded_length = nextPow2(length); hipMalloc((void **)&device_input, rounded_length * sizeof(int)); hipMalloc((void **)&device_output, rounded_length * sizeof(int)); hipMemcpy(device_input, input, length * sizeof(int), hipMemcpyHostToDevice); hipDeviceSynchronize(); double startTime = CycleTimer::currentSeconds(); int result = find_repeats(device_input, length, device_output); hipDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); // set output count and results array *output_length = result; hipMemcpy(output, device_output, length * sizeof(int), hipMemcpyDeviceToHost); hipFree(device_input); hipFree(device_output); float duration = endTime - startTime; return duration; } void printCudaInfo() { int deviceCount = 0; hipError_t err = hipGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { hipDeviceProp_t deviceProps; hipGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
3a7d978987c5d3bf2f56543f007ab44397c54569.cu
#include <stdio.h> #include <math.h> #include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #include <thrust/scan.h> #include <thrust/device_ptr.h> #include <thrust/device_malloc.h> #include <thrust/device_free.h> #include "CycleTimer.h" #define THREADS_PER_BLOCK 256 // helper function to round an integer up to the next power of 2 static inline int nextPow2(int n) { n--; n |= n >> 1; n |= n >> 2; n |= n >> 4; n |= n >> 8; n |= n >> 16; n++; return n; } __global__ void scanUp(int *input, int rounded, int N, int i) { int index = (blockIdx.x * blockDim.x + threadIdx.x) << (i+1); twod = 1 << i; twodp1 = 1 << (i+1); int endInd = index+twodp1-1; if (endInd < N) { input[endInd] += input[index+twod-1]; } } __global__ void scanDown(int *input, int rounded, int N, int i) { int index = (blockIdx.x * blockDim.x + threadIdx.x) << (i+1); twod = 1 << i; twodp1 = 1 << (i+1); int endInd = index+twodp1-1; if (endInd < N) { int t = input[i+twod-1]; input[i+twod-1] = input[endInd]; input[endind] += t; } } // exclusive_scan -- // // Implementation of an exclusive scan on global memory array `input`, // with results placed in global memory `result`. // // N is the logical size of the input and output arrays, however // students can assume that both the start and result arrays we // allocated with next power-of-two sizes as described by the comments // in cudaScan(). This is helpful, since your parallel scan // will likely write to memory locations beyond N, but of course not // greater than N rounded up to the next power of 2. // // Also, as per the comments in cudaScan(), you can implement an // "in-place" scan, since the timing harness makes a copy of input and // places it in result void exclusive_scan(int* input, int N, int* result) { // CS149 TODO: // // Implement your exclusive scan implementation here. Keep input // mind that although the arguments to this function are device // allocated arrays, this is a function that is running in a thread // on the CPU. Your implementation will need to make multiple calls // to CUDA kernel functions (that you must write) to implement the // scan. int rounded = nextPow2(N); for (int i = 0; i < log2(rounded); i++) { scanUp<<<((rounded >> (i+1))+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(input, rounded, N, i); } input[0] = 0; for (int i = log2(rounded)=1; i >= -; i--) { scanDown<<<((rounded >> (i+1))+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(input, rounded, N, i); } } // // cudaScan -- // // This function is a timing wrapper around the student's // implementation of scan - it copies the input to the GPU // and times the invocation of the exclusive_scan() function // above. Students should not modify it. double cudaScan(int* inarray, int* end, int* resultarray) { int* device_result; int* device_input; int N = end - inarray; // This code rounds the arrays provided to exclusive_scan up // to a power of 2, but elements after the end of the original // input are left uninitialized and not checked for correctness. // // Student implementations of exclusive_scan may assume an array's // allocated length is a power of 2 for simplicity. This will // result in extra work on non-power-of-2 inputs, but it's worth // the simplicity of a power of two only solution. int rounded_length = nextPow2(end - inarray); cudaMalloc((void **)&device_result, sizeof(int) * rounded_length); cudaMalloc((void **)&device_input, sizeof(int) * rounded_length); // For convenience, both the input and output vectors on the // device are initialized to the input values. This means that // students are free to implement an in-place scan on the result // vector if desired. If you do this, you will need to keep this // in mind when calling exclusive_scan from find_repeats. cudaMemcpy(device_input, inarray, (end - inarray) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(device_result, inarray, (end - inarray) * sizeof(int), cudaMemcpyHostToDevice); double startTime = CycleTimer::currentSeconds(); exclusive_scan(device_input, N, device_result); // Wait for completion cudaDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); cudaMemcpy(resultarray, device_result, (end - inarray) * sizeof(int), cudaMemcpyDeviceToHost); double overallDuration = endTime - startTime; return overallDuration; } // cudaScanThrust -- // // Wrapper around the Thrust library's exclusive scan function // As above in cudaScan(), this function copies the input to the GPU // and times only the execution of the scan itself. // // Students are not expected to produce implementations that achieve // performance that is competition to the Thrust version, but it is fun to try. double cudaScanThrust(int* inarray, int* end, int* resultarray) { int length = end - inarray; thrust::device_ptr<int> d_input = thrust::device_malloc<int>(length); thrust::device_ptr<int> d_output = thrust::device_malloc<int>(length); cudaMemcpy(d_input.get(), inarray, length * sizeof(int), cudaMemcpyHostToDevice); double startTime = CycleTimer::currentSeconds(); thrust::exclusive_scan(d_input, d_input + length, d_output); cudaDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); cudaMemcpy(resultarray, d_output.get(), length * sizeof(int), cudaMemcpyDeviceToHost); thrust::device_free(d_input); thrust::device_free(d_output); double overallDuration = endTime - startTime; return overallDuration; } // find_repeats -- // // Given an array of integers `device_input`, returns an array of all // indices `i` for which `device_input[i] == device_input[i+1]`. // // Returns the total number of pairs found int find_repeats(int* device_input, int length, int* device_output) { // CS149 TODO: // // Implement this function. You will probably want to // make use of one or more calls to exclusive_scan(), as well as // additional CUDA kernel launches. // // Note: As in the scan code, the calling code ensures that // allocated arrays are a power of 2 in size, so you can use your // exclusive_scan function with them. However, your implementation // must ensure that the results of find_repeats are correct given // the actual array length. return 0; } // // cudaFindRepeats -- // // Timing wrapper around find_repeats. You should not modify this function. double cudaFindRepeats(int *input, int length, int *output, int *output_length) { int *device_input; int *device_output; int rounded_length = nextPow2(length); cudaMalloc((void **)&device_input, rounded_length * sizeof(int)); cudaMalloc((void **)&device_output, rounded_length * sizeof(int)); cudaMemcpy(device_input, input, length * sizeof(int), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); double startTime = CycleTimer::currentSeconds(); int result = find_repeats(device_input, length, device_output); cudaDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); // set output count and results array *output_length = result; cudaMemcpy(output, device_output, length * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(device_input); cudaFree(device_output); float duration = endTime - startTime; return duration; } void printCudaInfo() { int deviceCount = 0; cudaError_t err = cudaGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { cudaDeviceProp deviceProps; cudaGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
454af23ddb93788e5eed1051f7f0efc40e181d84.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "prime_curve_arithmetic.cuh" #include "curve_definitions.cuh" #include "scalar_multiplication.cuh" #define min(a,b) a<b?a:b namespace ScalarMultiply { namespace pca = PrimeCurveArithmetic; /* Helper function used inside scalarMultiplyOnGpu. */ template <size_t N> __host__ bool launchScalarMultKernel(AffinePoint<N> *dev_res, BigNum<N> *dev_scalar, AffinePoint<N> *dev_point, CurveType curve, unsigned int count); //Using manual definitions so that curve can be inlined during compile time. __global__ void scalarMultKernelSecp192r1(AffinePoint<6> *res, BigNum<6> *scalar, AffinePoint<6> *point, unsigned int count) { for (unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < count; idx += gridDim.x*blockDim.x) { res[idx] = pca::scalarMultBinary(scalar[idx], point[idx], &Curve::secp192r1); } } __global__ void scalarMultKernelSecp224r1(AffinePoint<7> *res, BigNum<7> *scalar, AffinePoint<7> *point, unsigned int count) { for (unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < count; idx += gridDim.x*blockDim.x) { res[idx] = pca::scalarMultBinary(scalar[idx], point[idx], &Curve::secp224r1); } } __global__ void scalarMultKernelSecp256r1(AffinePoint<8> *res, BigNum<8> *scalar, AffinePoint<8> *point, unsigned int count) { for (unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < count; idx += gridDim.x*blockDim.x) { res[idx] = pca::scalarMultBinary(scalar[idx], point[idx], &Curve::secp256r1); } } __global__ void scalarMultKernelSecp384r1(AffinePoint<12> *res, BigNum<12> *scalar, AffinePoint<12> *point, unsigned int count) { for (unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < count; idx += gridDim.x*blockDim.x) { res[idx] = pca::scalarMultBinary(scalar[idx], point[idx], &Curve::secp384r1); } } __global__ void scalarMultKernelSecp521r1(AffinePoint<17> *res, BigNum<17> *scalar, AffinePoint<17> *point, unsigned int count) { for (unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < count; idx += gridDim.x*blockDim.x) { res[idx] = pca::scalarMultBinary(scalar[idx], point[idx], &Curve::secp521r1); } } //Manual definitions to bypass compile time errors of incompatible arguments. __host__ bool launchScalarMultKernel(AffinePoint<6> *dev_res, BigNum<6> *dev_scalar, AffinePoint<6> *dev_point, CurveType curve, unsigned int count) { const int threadCount = 256; const int blockCount = min(count/threadCount+1,256); switch (curve) { case secp192r1: hipFuncSetCacheConfig(scalarMultKernelSecp192r1, hipFuncCachePreferL1); scalarMultKernelSecp192r1 << <blockCount, threadCount >> > (dev_res, dev_scalar, dev_point, count); break; default: fprintf(stderr, "No curve type found!/n"); return 1; } return 0; } __host__ bool launchScalarMultKernel(AffinePoint<7> *dev_res, BigNum<7> *dev_scalar, AffinePoint<7> *dev_point, CurveType curve, unsigned int count) { const int threadCount = 256; const int blockCount = min(count / threadCount + 1, 256); switch (curve) { case secp224r1: hipFuncSetCacheConfig(scalarMultKernelSecp224r1, hipFuncCachePreferL1); scalarMultKernelSecp224r1 << <blockCount, threadCount >> > (dev_res, dev_scalar, dev_point, count); break; default: fprintf(stderr, "No curve type found!/n"); return 1; } return 0; } __host__ bool launchScalarMultKernel(AffinePoint<8> *dev_res, BigNum<8> *dev_scalar, AffinePoint<8> *dev_point, CurveType curve, unsigned int count) { const int threadCount = 256; const int blockCount = min(count / threadCount + 1, 256); switch (curve) { case secp256r1: hipFuncSetCacheConfig(scalarMultKernelSecp256r1, hipFuncCachePreferL1); scalarMultKernelSecp256r1 << <blockCount, threadCount >> > (dev_res, dev_scalar, dev_point, count); break; default: fprintf(stderr, "No curve type found!/n"); return 1; } return 0; } __host__ bool launchScalarMultKernel(AffinePoint<12> *dev_res, BigNum<12> *dev_scalar, AffinePoint<12> *dev_point, CurveType curve, unsigned int count) { const int threadCount = 256; const int blockCount = min(count / threadCount + 1, 256); switch (curve) { case secp384r1: hipFuncSetCacheConfig(scalarMultKernelSecp384r1, hipFuncCachePreferL1); scalarMultKernelSecp384r1 << <blockCount, threadCount >> > (dev_res, dev_scalar, dev_point, count); break; default: fprintf(stderr, "No curve type found!/n"); return 1; } return 0; } __host__ bool launchScalarMultKernel(AffinePoint<17> *dev_res, BigNum<17> *dev_scalar, AffinePoint<17> *dev_point, CurveType curve, unsigned int count) { const int threadCount = 256; const int blockCount = min(count / threadCount + 1, 256); switch (curve) { case secp521r1: hipFuncSetCacheConfig(scalarMultKernelSecp521r1, hipFuncCachePreferL1); scalarMultKernelSecp521r1 << <blockCount, threadCount >> > (dev_res, dev_scalar, dev_point, count); break; default: fprintf(stderr, "No curve type found!/n"); return 1; } return 0; } template <size_t N> __host__ hipError_t scalarMultiplyOnGpu(AffinePoint<N> *resultArray, BigNum<N> *scalarArray, AffinePoint<N> *pointArray, unsigned int count, CurveType curveType) { AffinePoint<N> *dev_res = 0; BigNum<N> *dev_scalar = 0; AffinePoint<N> *dev_point = 0; hipError_t cudaStatus = hipSuccess; //Allocate device memory. cudaStatus = hipMalloc((void**)&dev_point, count * sizeof(AffinePoint<N>)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipMalloc((void**)&dev_res, count * sizeof(AffinePoint<N>)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipMalloc((void**)&dev_scalar, count * sizeof(BigNum<N>)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } //Copy input arrays to device. cudaStatus = hipMemcpy(dev_point, pointArray, count * sizeof(AffinePoint<N>), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipMemcpy(dev_scalar, scalarArray, count * sizeof(BigNum<N>), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } //Check if kernel launch returns non zero. if (launchScalarMultKernel(dev_res, dev_scalar, dev_point, curveType, count)) { goto Error; } // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "launchScalarMultKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } //Copy result to output array. cudaStatus = hipMemcpy(resultArray, dev_res, count * sizeof(AffinePoint<N>), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy devicetohost launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } Error: //Free pointers. hipFree(dev_point); hipFree(dev_scalar); hipFree(dev_res); return cudaStatus; } } //Exposing for linkage only supported scalarMultiplyOnGpu. template hipError_t ScalarMultiply::scalarMultiplyOnGpu(AffinePoint<6> *resultArray, BigNum<6> *scalarArray, AffinePoint<6> *pointArray, unsigned int count, CurveType curveType); template hipError_t ScalarMultiply::scalarMultiplyOnGpu(AffinePoint<7> *resultArray, BigNum<7> *scalarArray, AffinePoint<7> *pointArray, unsigned int count, CurveType curveType); template hipError_t ScalarMultiply::scalarMultiplyOnGpu(AffinePoint<8> *resultArray, BigNum<8> *scalarArray, AffinePoint<8> *pointArray, unsigned int count, CurveType curveType); template hipError_t ScalarMultiply::scalarMultiplyOnGpu(AffinePoint<12> *resultArray, BigNum<12> *scalarArray, AffinePoint<12> *pointArray, unsigned int count, CurveType curveType); template hipError_t ScalarMultiply::scalarMultiplyOnGpu(AffinePoint<17> *resultArray, BigNum<17> *scalarArray, AffinePoint<17> *pointArray, unsigned int count, CurveType curveType);
454af23ddb93788e5eed1051f7f0efc40e181d84.cu
#include "prime_curve_arithmetic.cuh" #include "curve_definitions.cuh" #include "scalar_multiplication.cuh" #define min(a,b) a<b?a:b namespace ScalarMultiply { namespace pca = PrimeCurveArithmetic; /* Helper function used inside scalarMultiplyOnGpu. */ template <size_t N> __host__ bool launchScalarMultKernel(AffinePoint<N> *dev_res, BigNum<N> *dev_scalar, AffinePoint<N> *dev_point, CurveType curve, unsigned int count); //Using manual definitions so that curve can be inlined during compile time. __global__ void scalarMultKernelSecp192r1(AffinePoint<6> *res, BigNum<6> *scalar, AffinePoint<6> *point, unsigned int count) { for (unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < count; idx += gridDim.x*blockDim.x) { res[idx] = pca::scalarMultBinary(scalar[idx], point[idx], &Curve::secp192r1); } } __global__ void scalarMultKernelSecp224r1(AffinePoint<7> *res, BigNum<7> *scalar, AffinePoint<7> *point, unsigned int count) { for (unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < count; idx += gridDim.x*blockDim.x) { res[idx] = pca::scalarMultBinary(scalar[idx], point[idx], &Curve::secp224r1); } } __global__ void scalarMultKernelSecp256r1(AffinePoint<8> *res, BigNum<8> *scalar, AffinePoint<8> *point, unsigned int count) { for (unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < count; idx += gridDim.x*blockDim.x) { res[idx] = pca::scalarMultBinary(scalar[idx], point[idx], &Curve::secp256r1); } } __global__ void scalarMultKernelSecp384r1(AffinePoint<12> *res, BigNum<12> *scalar, AffinePoint<12> *point, unsigned int count) { for (unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < count; idx += gridDim.x*blockDim.x) { res[idx] = pca::scalarMultBinary(scalar[idx], point[idx], &Curve::secp384r1); } } __global__ void scalarMultKernelSecp521r1(AffinePoint<17> *res, BigNum<17> *scalar, AffinePoint<17> *point, unsigned int count) { for (unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < count; idx += gridDim.x*blockDim.x) { res[idx] = pca::scalarMultBinary(scalar[idx], point[idx], &Curve::secp521r1); } } //Manual definitions to bypass compile time errors of incompatible arguments. __host__ bool launchScalarMultKernel(AffinePoint<6> *dev_res, BigNum<6> *dev_scalar, AffinePoint<6> *dev_point, CurveType curve, unsigned int count) { const int threadCount = 256; const int blockCount = min(count/threadCount+1,256); switch (curve) { case secp192r1: cudaFuncSetCacheConfig(scalarMultKernelSecp192r1, cudaFuncCachePreferL1); scalarMultKernelSecp192r1 << <blockCount, threadCount >> > (dev_res, dev_scalar, dev_point, count); break; default: fprintf(stderr, "No curve type found!/n"); return 1; } return 0; } __host__ bool launchScalarMultKernel(AffinePoint<7> *dev_res, BigNum<7> *dev_scalar, AffinePoint<7> *dev_point, CurveType curve, unsigned int count) { const int threadCount = 256; const int blockCount = min(count / threadCount + 1, 256); switch (curve) { case secp224r1: cudaFuncSetCacheConfig(scalarMultKernelSecp224r1, cudaFuncCachePreferL1); scalarMultKernelSecp224r1 << <blockCount, threadCount >> > (dev_res, dev_scalar, dev_point, count); break; default: fprintf(stderr, "No curve type found!/n"); return 1; } return 0; } __host__ bool launchScalarMultKernel(AffinePoint<8> *dev_res, BigNum<8> *dev_scalar, AffinePoint<8> *dev_point, CurveType curve, unsigned int count) { const int threadCount = 256; const int blockCount = min(count / threadCount + 1, 256); switch (curve) { case secp256r1: cudaFuncSetCacheConfig(scalarMultKernelSecp256r1, cudaFuncCachePreferL1); scalarMultKernelSecp256r1 << <blockCount, threadCount >> > (dev_res, dev_scalar, dev_point, count); break; default: fprintf(stderr, "No curve type found!/n"); return 1; } return 0; } __host__ bool launchScalarMultKernel(AffinePoint<12> *dev_res, BigNum<12> *dev_scalar, AffinePoint<12> *dev_point, CurveType curve, unsigned int count) { const int threadCount = 256; const int blockCount = min(count / threadCount + 1, 256); switch (curve) { case secp384r1: cudaFuncSetCacheConfig(scalarMultKernelSecp384r1, cudaFuncCachePreferL1); scalarMultKernelSecp384r1 << <blockCount, threadCount >> > (dev_res, dev_scalar, dev_point, count); break; default: fprintf(stderr, "No curve type found!/n"); return 1; } return 0; } __host__ bool launchScalarMultKernel(AffinePoint<17> *dev_res, BigNum<17> *dev_scalar, AffinePoint<17> *dev_point, CurveType curve, unsigned int count) { const int threadCount = 256; const int blockCount = min(count / threadCount + 1, 256); switch (curve) { case secp521r1: cudaFuncSetCacheConfig(scalarMultKernelSecp521r1, cudaFuncCachePreferL1); scalarMultKernelSecp521r1 << <blockCount, threadCount >> > (dev_res, dev_scalar, dev_point, count); break; default: fprintf(stderr, "No curve type found!/n"); return 1; } return 0; } template <size_t N> __host__ cudaError_t scalarMultiplyOnGpu(AffinePoint<N> *resultArray, BigNum<N> *scalarArray, AffinePoint<N> *pointArray, unsigned int count, CurveType curveType) { AffinePoint<N> *dev_res = 0; BigNum<N> *dev_scalar = 0; AffinePoint<N> *dev_point = 0; cudaError_t cudaStatus = cudaSuccess; //Allocate device memory. cudaStatus = cudaMalloc((void**)&dev_point, count * sizeof(AffinePoint<N>)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaMalloc((void**)&dev_res, count * sizeof(AffinePoint<N>)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaMalloc((void**)&dev_scalar, count * sizeof(BigNum<N>)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } //Copy input arrays to device. cudaStatus = cudaMemcpy(dev_point, pointArray, count * sizeof(AffinePoint<N>), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaMemcpy(dev_scalar, scalarArray, count * sizeof(BigNum<N>), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } //Check if kernel launch returns non zero. if (launchScalarMultKernel(dev_res, dev_scalar, dev_point, curveType, count)) { goto Error; } // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "launchScalarMultKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } //Copy result to output array. cudaStatus = cudaMemcpy(resultArray, dev_res, count * sizeof(AffinePoint<N>), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy devicetohost launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } Error: //Free pointers. cudaFree(dev_point); cudaFree(dev_scalar); cudaFree(dev_res); return cudaStatus; } } //Exposing for linkage only supported scalarMultiplyOnGpu. template cudaError_t ScalarMultiply::scalarMultiplyOnGpu(AffinePoint<6> *resultArray, BigNum<6> *scalarArray, AffinePoint<6> *pointArray, unsigned int count, CurveType curveType); template cudaError_t ScalarMultiply::scalarMultiplyOnGpu(AffinePoint<7> *resultArray, BigNum<7> *scalarArray, AffinePoint<7> *pointArray, unsigned int count, CurveType curveType); template cudaError_t ScalarMultiply::scalarMultiplyOnGpu(AffinePoint<8> *resultArray, BigNum<8> *scalarArray, AffinePoint<8> *pointArray, unsigned int count, CurveType curveType); template cudaError_t ScalarMultiply::scalarMultiplyOnGpu(AffinePoint<12> *resultArray, BigNum<12> *scalarArray, AffinePoint<12> *pointArray, unsigned int count, CurveType curveType); template cudaError_t ScalarMultiply::scalarMultiplyOnGpu(AffinePoint<17> *resultArray, BigNum<17> *scalarArray, AffinePoint<17> *pointArray, unsigned int count, CurveType curveType);
5ff71f821c71796a990348d09734a6037c100135.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _VECTOR_DOT_PRODUCT_KERNEL_H_ #define _VECTOR_DOT_PRODUCT_KERNEL_H_ /* Edit this function to complete the functionality of dot product on the GPU using atomics. You may add other kernel functions as you deem necessary. */ __constant__ int n_c[1]; // allocation on the kernel __global__ void vector_dot_product(float* Ad, float* Bd, float* Cd) { int k=THREAD_COUNT; //blockDim.x*gridDim.x; int tid=threadIdx.x+(blockDim.x*blockIdx.x); __shared__ double C_shared[THREAD_COUNT]; int n_c_local = n_c[0]; int num_strides = n_c_local/k; if (n_c[0]%k>0) num_strides++; if(tid < n_c_local) C_shared[tid]=0; // C is the size the number of threads int i; for(i=0; i<num_strides; i++) if(tid<THREAD_COUNT) if((tid+(k*i))<n_c_local) C_shared[tid]+=((double)Ad[tid+(k*i)]*(double)Bd[tid+(k*i)]); /*Now every thing is multiplied and loaded into share d memory that is the * size of k number of threads, and reduction needs to be applied to get the * answer*/ __syncthreads(); int stride; for(stride=k; stride>0; stride/=2){ if(tid<stride && tid+stride < k) C_shared[tid]+=C_shared[tid+stride]; __syncthreads(); } if (tid==0) *Cd=(float)C_shared[0]; //copy back to global memory } #endif // #ifndef _VECTOR_DOT_PRODUCT_KERNEL_H
5ff71f821c71796a990348d09734a6037c100135.cu
#ifndef _VECTOR_DOT_PRODUCT_KERNEL_H_ #define _VECTOR_DOT_PRODUCT_KERNEL_H_ /* Edit this function to complete the functionality of dot product on the GPU using atomics. You may add other kernel functions as you deem necessary. */ __constant__ int n_c[1]; // allocation on the kernel __global__ void vector_dot_product(float* Ad, float* Bd, float* Cd) { int k=THREAD_COUNT; //blockDim.x*gridDim.x; int tid=threadIdx.x+(blockDim.x*blockIdx.x); __shared__ double C_shared[THREAD_COUNT]; int n_c_local = n_c[0]; int num_strides = n_c_local/k; if (n_c[0]%k>0) num_strides++; if(tid < n_c_local) C_shared[tid]=0; // C is the size the number of threads int i; for(i=0; i<num_strides; i++) if(tid<THREAD_COUNT) if((tid+(k*i))<n_c_local) C_shared[tid]+=((double)Ad[tid+(k*i)]*(double)Bd[tid+(k*i)]); /*Now every thing is multiplied and loaded into share d memory that is the * size of k number of threads, and reduction needs to be applied to get the * answer*/ __syncthreads(); int stride; for(stride=k; stride>0; stride/=2){ if(tid<stride && tid+stride < k) C_shared[tid]+=C_shared[tid+stride]; __syncthreads(); } if (tid==0) *Cd=(float)C_shared[0]; //copy back to global memory } #endif // #ifndef _VECTOR_DOT_PRODUCT_KERNEL_H
e95e88f6d2e848e2a79cf2a62cd1936679a6f13a.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2018 by Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <xgboost/linear_updater.h> #include "../common/common.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "coordinate_common.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); // training parameter struct GPUCoordinateTrainParam : public dmlc::Parameter<GPUCoordinateTrainParam> { /*! \brief learning_rate */ float learning_rate; /*! \brief regularization weight for L2 norm */ float reg_lambda; /*! \brief regularization weight for L1 norm */ float reg_alpha; int feature_selector; int top_k; int debug_verbose; int n_gpus; int gpu_id; bool silent; // declare parameters DMLC_DECLARE_PARAMETER(GPUCoordinateTrainParam) { DMLC_DECLARE_FIELD(learning_rate) .set_lower_bound(0.0f) .set_default(1.0f) .describe("Learning rate of each update."); DMLC_DECLARE_FIELD(reg_lambda) .set_lower_bound(0.0f) .set_default(0.0f) .describe("L2 regularization on weights."); DMLC_DECLARE_FIELD(reg_alpha) .set_lower_bound(0.0f) .set_default(0.0f) .describe("L1 regularization on weights."); DMLC_DECLARE_FIELD(feature_selector) .set_default(kCyclic) .add_enum("cyclic", kCyclic) .add_enum("shuffle", kShuffle) .add_enum("thrifty", kThrifty) .add_enum("greedy", kGreedy) .add_enum("random", kRandom) .describe("Feature selection or ordering method."); DMLC_DECLARE_FIELD(top_k).set_lower_bound(0).set_default(0).describe( "The number of top features to select in 'thrifty' feature_selector. " "The value of zero means using all the features."); DMLC_DECLARE_FIELD(debug_verbose) .set_lower_bound(0) .set_default(0) .describe("flag to print out detailed breakdown of runtime"); DMLC_DECLARE_FIELD(n_gpus).set_default(1).describe( "Number of devices to use."); DMLC_DECLARE_FIELD(gpu_id).set_default(0).describe( "Primary device ordinal."); DMLC_DECLARE_FIELD(silent).set_default(false).describe( "Do not print information during trainig."); // alias of parameters DMLC_DECLARE_ALIAS(learning_rate, eta); DMLC_DECLARE_ALIAS(reg_lambda, lambda); DMLC_DECLARE_ALIAS(reg_alpha, alpha); } /*! \brief Denormalizes the regularization penalties - to be called at each * update */ void DenormalizePenalties(double sum_instance_weight) { reg_lambda_denorm = reg_lambda * sum_instance_weight; reg_alpha_denorm = reg_alpha * sum_instance_weight; } // denormalizated regularization penalties float reg_lambda_denorm; float reg_alpha_denorm; }; void RescaleIndices(size_t ridx_begin, dh::DVec<Entry> *data) { auto d_data = data->Data(); dh::LaunchN(data->DeviceIdx(), data->Size(), [=] __device__(size_t idx) { d_data[idx].index -= ridx_begin; }); } class DeviceShard { int device_idx_; dh::BulkAllocator<dh::MemoryType::kDevice> ba_; std::vector<size_t> row_ptr_; dh::DVec<Entry> data_; dh::DVec<GradientPair> gpair_; dh::CubMemory temp_; size_t ridx_begin_; size_t ridx_end_; public: DeviceShard(int device_idx, const SparsePage &batch, bst_uint row_begin, bst_uint row_end, const GPUCoordinateTrainParam &param, const gbm::GBLinearModelParam &model_param) : device_idx_(device_idx), ridx_begin_(row_begin), ridx_end_(row_end) { dh::safe_cuda(hipSetDevice(device_idx)); // The begin and end indices for the section of each column associated with // this shard std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; for (auto fidx = 0; fidx < batch.Size(); fidx++) { auto col = batch[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.data(), col.data() + col.size(), Entry(row_begin, 0.0f), cmp); auto column_end = std::upper_bound(col.data(), col.data() + col.size(), Entry(row_end, 0.0f), cmp); column_segments.push_back( std::make_pair(column_begin - col.data(), column_end - col.data())); row_ptr_.push_back(row_ptr_.back() + column_end - column_begin); } ba_.Allocate(device_idx, param.silent, &data_, row_ptr_.back(), &gpair_, (row_end - row_begin) * model_param.num_output_group); for (int fidx = 0; fidx < batch.Size(); fidx++) { auto col = batch[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(hipMemcpy( data_.Data() + row_ptr_[fidx], col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), hipMemcpyHostToDevice)); } // Rescale indices with respect to current shard RescaleIndices(ridx_begin_, &data_); } void UpdateGpair(const std::vector<GradientPair> &host_gpair, const gbm::GBLinearModelParam &model_param) { gpair_.copy(host_gpair.begin() + ridx_begin_ * model_param.num_output_group, host_gpair.begin() + ridx_end_ * model_param.num_output_group); } GradientPair GetBiasGradient(int group_idx, int num_group) { auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.tbegin(), skip); return dh::SumReduction(temp_, perm, ridx_end_ - ridx_begin_); } void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = gpair_.Data(); dh::LaunchN(device_idx_, ridx_end_ - ridx_begin_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } GradientPair GetGradient(int group_idx, int num_group, int fidx) { auto d_col = data_.Data() + row_ptr_[fidx]; size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; auto d_gpair = gpair_.Data(); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair(g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue); }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(temp_, multiply_iterator, col_size); } void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { auto d_gpair = gpair_.Data(); auto d_col = data_.Data() + row_ptr_[fidx]; size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(device_idx_, col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } }; /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { public: // set training parameter void Init( const std::vector<std::pair<std::string, std::string>> &args) override { param.InitAllowUnknown(args); selector.reset(FeatureSelector::Create(param.feature_selector)); monitor.Init("GPUCoordinateUpdater", param.debug_verbose); } void LazyInitShards(DMatrix *p_fmat, const gbm::GBLinearModelParam &model_param) { if (!shards.empty()) return; dist_ = GPUDistribution::Block(GPUSet::All(param.gpu_id, param.n_gpus, p_fmat->Info().num_row_)); auto devices = dist_.Devices(); int n_devices = devices.Size(); bst_uint row_begin = 0; bst_uint shard_size = ::ceil(static_cast<double>(p_fmat->Info().num_row_) / n_devices); // Partition input matrix into row segments std::vector<size_t> row_segments; row_segments.push_back(0); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { bst_uint row_end = ::min(static_cast<size_t>(row_begin + shard_size), p_fmat->Info().num_row_); row_segments.push_back(row_end); row_begin = row_end; } CHECK(p_fmat->SingleColBlock()); const auto &batch = *p_fmat->GetColumnBatches().begin(); shards.resize(n_devices); // Create device shards dh::ExecuteIndexShards(&shards, [&](int i, std::unique_ptr<DeviceShard>& shard) { shard = std::unique_ptr<DeviceShard>( new DeviceShard(devices.DeviceId(i), batch, row_segments[i], row_segments[i + 1], param, model_param)); }); } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { param.DenormalizePenalties(sum_instance_weight); monitor.Start("LazyInitShards"); this->LazyInitShards(p_fmat, model->param); monitor.Stop("LazyInitShards"); monitor.Start("UpdateGpair"); // Update gpair dh::ExecuteShards(&shards, [&](std::unique_ptr<DeviceShard> &shard) { shard->UpdateGpair(in_gpair->ConstHostVector(), model->param); }); monitor.Stop("UpdateGpair"); monitor.Start("UpdateBias"); this->UpdateBias(p_fmat, model); monitor.Stop("UpdateBias"); // prepare for updating the weights selector->Setup(*model, in_gpair->ConstHostVector(), p_fmat, param.reg_alpha_denorm, param.reg_lambda_denorm, param.top_k); monitor.Start("UpdateFeature"); for (auto group_idx = 0; group_idx < model->param.num_output_group; ++group_idx) { for (auto i = 0U; i < model->param.num_feature; i++) { auto fidx = selector->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, param.reg_alpha_denorm, param.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, &in_gpair->HostVector(), model); } } monitor.Stop("UpdateFeature"); } void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) { for (int group_idx = 0; group_idx < model->param.num_output_group; ++group_idx) { // Get gradient auto grad = dh::ReduceShards<GradientPair>( &shards, [&](std::unique_ptr<DeviceShard> &shard) { return shard->GetBiasGradient(group_idx, model->param.num_output_group); }); auto dbias = static_cast<float>( param.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->bias()[group_idx] += dbias; // Update residual dh::ExecuteShards(&shards, [&](std::unique_ptr<DeviceShard> &shard) { shard->UpdateBiasResidual(dbias, group_idx, model->param.num_output_group); }); } } void UpdateFeature(int fidx, int group_idx, std::vector<GradientPair> *in_gpair, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = dh::ReduceShards<GradientPair>( &shards, [&](std::unique_ptr<DeviceShard> &shard) { return shard->GetGradient(group_idx, model->param.num_output_group, fidx); }); auto dw = static_cast<float>(param.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, param.reg_alpha_denorm, param.reg_lambda_denorm)); w += dw; dh::ExecuteShards(&shards, [&](std::unique_ptr<DeviceShard> &shard) { shard->UpdateResidual(dw, group_idx, model->param.num_output_group, fidx); }); } // training parameter GPUCoordinateTrainParam param; GPUDistribution dist_; std::unique_ptr<FeatureSelector> selector; common::Monitor monitor; std::vector<std::unique_ptr<DeviceShard>> shards; }; DMLC_REGISTER_PARAMETER(GPUCoordinateTrainParam); XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost
e95e88f6d2e848e2a79cf2a62cd1936679a6f13a.cu
/*! * Copyright 2018 by Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <xgboost/linear_updater.h> #include "../common/common.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "coordinate_common.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); // training parameter struct GPUCoordinateTrainParam : public dmlc::Parameter<GPUCoordinateTrainParam> { /*! \brief learning_rate */ float learning_rate; /*! \brief regularization weight for L2 norm */ float reg_lambda; /*! \brief regularization weight for L1 norm */ float reg_alpha; int feature_selector; int top_k; int debug_verbose; int n_gpus; int gpu_id; bool silent; // declare parameters DMLC_DECLARE_PARAMETER(GPUCoordinateTrainParam) { DMLC_DECLARE_FIELD(learning_rate) .set_lower_bound(0.0f) .set_default(1.0f) .describe("Learning rate of each update."); DMLC_DECLARE_FIELD(reg_lambda) .set_lower_bound(0.0f) .set_default(0.0f) .describe("L2 regularization on weights."); DMLC_DECLARE_FIELD(reg_alpha) .set_lower_bound(0.0f) .set_default(0.0f) .describe("L1 regularization on weights."); DMLC_DECLARE_FIELD(feature_selector) .set_default(kCyclic) .add_enum("cyclic", kCyclic) .add_enum("shuffle", kShuffle) .add_enum("thrifty", kThrifty) .add_enum("greedy", kGreedy) .add_enum("random", kRandom) .describe("Feature selection or ordering method."); DMLC_DECLARE_FIELD(top_k).set_lower_bound(0).set_default(0).describe( "The number of top features to select in 'thrifty' feature_selector. " "The value of zero means using all the features."); DMLC_DECLARE_FIELD(debug_verbose) .set_lower_bound(0) .set_default(0) .describe("flag to print out detailed breakdown of runtime"); DMLC_DECLARE_FIELD(n_gpus).set_default(1).describe( "Number of devices to use."); DMLC_DECLARE_FIELD(gpu_id).set_default(0).describe( "Primary device ordinal."); DMLC_DECLARE_FIELD(silent).set_default(false).describe( "Do not print information during trainig."); // alias of parameters DMLC_DECLARE_ALIAS(learning_rate, eta); DMLC_DECLARE_ALIAS(reg_lambda, lambda); DMLC_DECLARE_ALIAS(reg_alpha, alpha); } /*! \brief Denormalizes the regularization penalties - to be called at each * update */ void DenormalizePenalties(double sum_instance_weight) { reg_lambda_denorm = reg_lambda * sum_instance_weight; reg_alpha_denorm = reg_alpha * sum_instance_weight; } // denormalizated regularization penalties float reg_lambda_denorm; float reg_alpha_denorm; }; void RescaleIndices(size_t ridx_begin, dh::DVec<Entry> *data) { auto d_data = data->Data(); dh::LaunchN(data->DeviceIdx(), data->Size(), [=] __device__(size_t idx) { d_data[idx].index -= ridx_begin; }); } class DeviceShard { int device_idx_; dh::BulkAllocator<dh::MemoryType::kDevice> ba_; std::vector<size_t> row_ptr_; dh::DVec<Entry> data_; dh::DVec<GradientPair> gpair_; dh::CubMemory temp_; size_t ridx_begin_; size_t ridx_end_; public: DeviceShard(int device_idx, const SparsePage &batch, bst_uint row_begin, bst_uint row_end, const GPUCoordinateTrainParam &param, const gbm::GBLinearModelParam &model_param) : device_idx_(device_idx), ridx_begin_(row_begin), ridx_end_(row_end) { dh::safe_cuda(cudaSetDevice(device_idx)); // The begin and end indices for the section of each column associated with // this shard std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; for (auto fidx = 0; fidx < batch.Size(); fidx++) { auto col = batch[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.data(), col.data() + col.size(), Entry(row_begin, 0.0f), cmp); auto column_end = std::upper_bound(col.data(), col.data() + col.size(), Entry(row_end, 0.0f), cmp); column_segments.push_back( std::make_pair(column_begin - col.data(), column_end - col.data())); row_ptr_.push_back(row_ptr_.back() + column_end - column_begin); } ba_.Allocate(device_idx, param.silent, &data_, row_ptr_.back(), &gpair_, (row_end - row_begin) * model_param.num_output_group); for (int fidx = 0; fidx < batch.Size(); fidx++) { auto col = batch[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(cudaMemcpy( data_.Data() + row_ptr_[fidx], col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), cudaMemcpyHostToDevice)); } // Rescale indices with respect to current shard RescaleIndices(ridx_begin_, &data_); } void UpdateGpair(const std::vector<GradientPair> &host_gpair, const gbm::GBLinearModelParam &model_param) { gpair_.copy(host_gpair.begin() + ridx_begin_ * model_param.num_output_group, host_gpair.begin() + ridx_end_ * model_param.num_output_group); } GradientPair GetBiasGradient(int group_idx, int num_group) { auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.tbegin(), skip); return dh::SumReduction(temp_, perm, ridx_end_ - ridx_begin_); } void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = gpair_.Data(); dh::LaunchN(device_idx_, ridx_end_ - ridx_begin_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } GradientPair GetGradient(int group_idx, int num_group, int fidx) { auto d_col = data_.Data() + row_ptr_[fidx]; size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; auto d_gpair = gpair_.Data(); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair(g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue); }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(temp_, multiply_iterator, col_size); } void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { auto d_gpair = gpair_.Data(); auto d_col = data_.Data() + row_ptr_[fidx]; size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(device_idx_, col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } }; /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { public: // set training parameter void Init( const std::vector<std::pair<std::string, std::string>> &args) override { param.InitAllowUnknown(args); selector.reset(FeatureSelector::Create(param.feature_selector)); monitor.Init("GPUCoordinateUpdater", param.debug_verbose); } void LazyInitShards(DMatrix *p_fmat, const gbm::GBLinearModelParam &model_param) { if (!shards.empty()) return; dist_ = GPUDistribution::Block(GPUSet::All(param.gpu_id, param.n_gpus, p_fmat->Info().num_row_)); auto devices = dist_.Devices(); int n_devices = devices.Size(); bst_uint row_begin = 0; bst_uint shard_size = std::ceil(static_cast<double>(p_fmat->Info().num_row_) / n_devices); // Partition input matrix into row segments std::vector<size_t> row_segments; row_segments.push_back(0); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { bst_uint row_end = std::min(static_cast<size_t>(row_begin + shard_size), p_fmat->Info().num_row_); row_segments.push_back(row_end); row_begin = row_end; } CHECK(p_fmat->SingleColBlock()); const auto &batch = *p_fmat->GetColumnBatches().begin(); shards.resize(n_devices); // Create device shards dh::ExecuteIndexShards(&shards, [&](int i, std::unique_ptr<DeviceShard>& shard) { shard = std::unique_ptr<DeviceShard>( new DeviceShard(devices.DeviceId(i), batch, row_segments[i], row_segments[i + 1], param, model_param)); }); } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { param.DenormalizePenalties(sum_instance_weight); monitor.Start("LazyInitShards"); this->LazyInitShards(p_fmat, model->param); monitor.Stop("LazyInitShards"); monitor.Start("UpdateGpair"); // Update gpair dh::ExecuteShards(&shards, [&](std::unique_ptr<DeviceShard> &shard) { shard->UpdateGpair(in_gpair->ConstHostVector(), model->param); }); monitor.Stop("UpdateGpair"); monitor.Start("UpdateBias"); this->UpdateBias(p_fmat, model); monitor.Stop("UpdateBias"); // prepare for updating the weights selector->Setup(*model, in_gpair->ConstHostVector(), p_fmat, param.reg_alpha_denorm, param.reg_lambda_denorm, param.top_k); monitor.Start("UpdateFeature"); for (auto group_idx = 0; group_idx < model->param.num_output_group; ++group_idx) { for (auto i = 0U; i < model->param.num_feature; i++) { auto fidx = selector->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, param.reg_alpha_denorm, param.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, &in_gpair->HostVector(), model); } } monitor.Stop("UpdateFeature"); } void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) { for (int group_idx = 0; group_idx < model->param.num_output_group; ++group_idx) { // Get gradient auto grad = dh::ReduceShards<GradientPair>( &shards, [&](std::unique_ptr<DeviceShard> &shard) { return shard->GetBiasGradient(group_idx, model->param.num_output_group); }); auto dbias = static_cast<float>( param.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->bias()[group_idx] += dbias; // Update residual dh::ExecuteShards(&shards, [&](std::unique_ptr<DeviceShard> &shard) { shard->UpdateBiasResidual(dbias, group_idx, model->param.num_output_group); }); } } void UpdateFeature(int fidx, int group_idx, std::vector<GradientPair> *in_gpair, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = dh::ReduceShards<GradientPair>( &shards, [&](std::unique_ptr<DeviceShard> &shard) { return shard->GetGradient(group_idx, model->param.num_output_group, fidx); }); auto dw = static_cast<float>(param.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, param.reg_alpha_denorm, param.reg_lambda_denorm)); w += dw; dh::ExecuteShards(&shards, [&](std::unique_ptr<DeviceShard> &shard) { shard->UpdateResidual(dw, group_idx, model->param.num_output_group, fidx); }); } // training parameter GPUCoordinateTrainParam param; GPUDistribution dist_; std::unique_ptr<FeatureSelector> selector; common::Monitor monitor; std::vector<std::unique_ptr<DeviceShard>> shards; }; DMLC_REGISTER_PARAMETER(GPUCoordinateTrainParam); XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost
52b0018aa976b5e0e9449289fde5d5dc9153a53a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "Laplacian_Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *output = NULL; hipMalloc(&output, XSIZE*YSIZE); const float *input = NULL; hipMalloc(&input, XSIZE*YSIZE); const int width = 1; const int height = 1; const int nChannels = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( Laplacian_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,width,height,nChannels); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( Laplacian_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,width,height,nChannels); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( Laplacian_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,width,height,nChannels); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
52b0018aa976b5e0e9449289fde5d5dc9153a53a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Laplacian_Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); const float *input = NULL; cudaMalloc(&input, XSIZE*YSIZE); const int width = 1; const int height = 1; const int nChannels = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Laplacian_Kernel<<<gridBlock,threadBlock>>>(output,input,width,height,nChannels); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Laplacian_Kernel<<<gridBlock,threadBlock>>>(output,input,width,height,nChannels); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Laplacian_Kernel<<<gridBlock,threadBlock>>>(output,input,width,height,nChannels); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f757a433fff3f48ff04fa54a9f0b6752f2eb25eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <iostream> #include <ctime> #include <assert.h> #include <math.h> #include "bitonic_com.cuh" #include "wtime.h" #include <fstream> #include <random> using namespace std; typedef unsigned int data_t; //typedef unsigned int data_t; typedef int index_t; int compare(const void *p1, const void *p2) { const struct max_withIndex *elem1 = (const struct max_withIndex *)p1; const struct max_withIndex *elem2 = (const struct max_withIndex *)p2; if (elem1->value > elem2->value)//For descending order { return -1; } else if (elem1->value < elem2->value) { return 1; } else { return 0; } } template<typename data_t,typename index_t> index_t power(index_t x,index_t n) { index_t number=1; for (index_t i=0; i<n ;i++) { number*=x; } return number; } template<typename data_t, typename index_t> __global__ void alpha_range_max_sample( data_t* vec, data_t* sampled_top, index_t num_element, index_t num_subrange, index_t alpha, index_t* SubrangeId) { index_t tid = threadIdx.x + blockIdx.x * blockDim.x; index_t warp_id = tid >> 5; const index_t lane_id = threadIdx.x & 31; const index_t warp_count = (blockDim.x * gridDim.x) >> 5; // const index_t subrange_size = 1<<alpha; //schedule one warp to work on one subrange while(warp_id < num_subrange - 1) { index_t my_beg = (warp_id << alpha) + lane_id; index_t my_end = (warp_id+1) << alpha; assert(my_end < num_element); data_t my_max = vec[my_beg]; while(my_beg < my_end) { my_max = (my_max < vec[my_beg] ? vec[my_beg]:my_max); my_beg += 32; } //max across the warp for (int i=16; i>0; i>>=1) { data_t new_max = __shfl_down_sync(0xffffffff,my_max, i); my_max = (my_max < new_max ? new_max:my_max); } if(!lane_id) { sampled_top[warp_id] = my_max; SubrangeId[warp_id]=my_max; // printf("%d\n",my_max); } warp_id += warp_count; } } template<typename data_t,typename index_t> __global__ void sampleMax_old (data_t* A,data_t* SubRangeMax, index_t N,index_t NSubranges,index_t SubRangeSize,const index_t alpha,index_t* SubrangeId) { int thid = blockDim.x*blockIdx.x+threadIdx.x; int laneId= threadIdx.x & 0x1f; int myWarpID=thid >> 5; int NWarps=(blockDim.x*gridDim.x) >> 5; while (myWarpID < NSubranges)//WarpID is used as subrange ID { // index_t mybegin_pos=myWarpID*SubRangeSize+laneId;//<<alpha+laneId; index_t mybegin_pos=(myWarpID<<alpha)+laneId; if (mybegin_pos >= N) { printf("Error! Illegal memory access in a thread. Return this thread\n"); //return; } int Nthreadstowork=32; if (SubRangeSize<32) { Nthreadstowork=SubRangeSize; } index_t myend_pos=(((myWarpID+1)<<alpha) < (N-1)) ? ((myWarpID+1)<<alpha):N-1; //index_t myend_pos=(myWarpID+1)<<alpha; data_t Max=A[mybegin_pos]; while(mybegin_pos < myend_pos) { if (mybegin_pos >= N) { printf("Error! Illegal memory access. at mybegin_pos:%d when warpId:%d \n",mybegin_pos,myWarpID); } Max=(Max < A[mybegin_pos]) ? A[mybegin_pos]:Max; // printf("mybeginpos:%d ",mybegin_pos); mybegin_pos+=32; } data_t MaxFromOther; for (int j=Nthreadstowork >> 1;j >=1;j=j>>1) { MaxFromOther=__shfl_sync(0xffffffff, Max,laneId+j,32/*Nthreadstowork*//*32*/); if (laneId<j) { Max= (MaxFromOther > Max) ? MaxFromOther : Max ; } } if(laneId==0) { SubRangeMax[myWarpID]=Max; SubrangeId[myWarpID]=myWarpID; } myWarpID+=NWarps; } return; } template<typename data_t,typename index_t> __global__ void sampleMax (data_t* A,data_t* SubRangeMax, index_t N,index_t NSubranges,index_t SubRangeSize,const index_t alpha,index_t* SubrangeId,int Nthreadstowork) { int thid = blockDim.x*blockIdx.x+threadIdx.x; int laneId= threadIdx.x & 0x1f; int myWarpID=thid >> 5; int NWarps=(blockDim.x*gridDim.x) >> 5; while (myWarpID < NSubranges)//WarpID is used as subrange ID { index_t mybegin_pos=(myWarpID<<alpha)+laneId; index_t myend_pos=(((myWarpID+1)<<alpha) < (N)) ? ((myWarpID+1)<<alpha):N; data_t Max=0;//Assigning all the threads Max reads to 0 intially. Avoids the illegal memory access for Max=A[mybegin_pos]; condition while(mybegin_pos < myend_pos) { Max=(Max < A[mybegin_pos]) ? A[mybegin_pos]:Max; mybegin_pos+=32; } data_t MaxFromOther; for (int j=Nthreadstowork >> 1;j >=1;j=j>>1) { MaxFromOther=__shfl_sync(0xffffffff, Max,laneId+j,32/*Nthreadstowork*//*32*/); if (laneId<j) { Max= (MaxFromOther > Max) ? MaxFromOther : Max ; } } if(laneId==0) { SubRangeMax[myWarpID]=Max; SubrangeId[myWarpID]=myWarpID; } myWarpID+=NWarps; } return; } template<typename data_t,typename index_t> bool IsPowerof2(index_t x) { return (x != 0) && ((x & (x - 1)) == 0); } int main(int argc,char**argv) { cout<<"./exe exp_num_element k alpha scoreFile output_file"<<endl; if (argc != 6) {cout<<"wrong input"<<endl;exit(-1);} index_t p=atol(argv[1]); index_t k= atol(argv[2]); if (!IsPowerof2<data_t,index_t>(k)) { cout<<"k should be power of 2!"<<endl; exit(-1); } index_t alpha= atol(argv[3]); index_t base=2; index_t N=power<data_t,index_t>(base,p); hipSetDevice(0); data_t* A=(data_t*)malloc((sizeof(data_t)*N)); data_t *A_d; H_ERR(hipMalloc((void**) &A_d,sizeof(data_t)*N)); int count=0; cout<<"subrange:"<<count<<endl; index_t SubRangesize=pow(2,alpha); index_t NSubranges=N/SubRangesize; if (NSubranges<k) { cout<<"Small number of subranges!. Decrease the value of alpha!"<<endl; exit(-1); } std::fstream statusLog; // statusLog.open("testSMemDiffK_alpha_N.csv",std::fstream::out | std::fstream::app); // statusLog<<endl<<endl<<"Started Top K with N_"<<N<<"k_"<<k<<"alpha_"<<alpha<<endl; int c=0; data_t* Subrange=(data_t*)malloc((sizeof(data_t)*NSubranges)); max_withIndex* max_Obj=new max_withIndex[NSubranges]; float a=1500.0; // for (index_t i=0;i<N;i++) // { /// A[i]=rand()%(2147483648);//2^31 -1 // A[i]=(float)rand()/(float)(RAND_MAX)*a;//2^31 -1 // if (A[i]==2147481384) c++; // } std::random_device rd; std::mt19937 gen(rd()); float value; // For Normal distribution float minvalue=100000000; // int value; // int minvalue=100000000; const char *scr_file=argv[4]; // const char * ${arr[1]} FILE* fptr; if ((fptr = fopen(scr_file,"r")) == NULL) { printf("Error! opening SCORE file"); exit(1); } for (index_t i=0;i<N;i++) { fscanf(fptr,"%d", &A[i]); // vec1[i]=A[i]; } // // int dis=atoi(argv[4]); // std::uniform_int_distribution <unsigned int> d(0, 4294967295); // // std::normal_distribution<float> d(100000000, 10);//Mean =100 mill , sd=100 // for (index_t i=0;i<N;i++) // { // // value=d(gen); // // if (minvalue > value) // // { // // minvalue=value; // // } // // if (value > 4294967295) // // { // // cout<<"Overflow of unsigned int detected"<<endl; // // return -1; // // } // A[i]=d(gen); // // vec1[i]=vec[i]; // } // cout<<endl; // if (minvalue < 0) // { // cout<<"-ve value detected:"<<minvalue<<endl; // return -1; // } cout<<endl; cout<<"SubRangeSize:"<<SubRangesize<<endl; cout<<"Number of Subranges:"<<NSubranges<<endl; data_t* Max_d; H_ERR(hipMalloc((void**) &Max_d,sizeof(data_t)*NSubranges)); H_ERR(hipMemcpy(A_d,A,sizeof(data_t)*N,hipMemcpyHostToDevice)); index_t* SubrangeId_d; H_ERR(hipMalloc((void**) &SubrangeId_d,sizeof(index_t)*NSubranges)); data_t TopKElement; data_t* ConcatenatedRange_d; H_ERR(hipMalloc((void**) &ConcatenatedRange_d,sizeof(data_t)*k*SubRangesize)); double time_beg=wtime(); double t2=0;double t3=0;double t4=0;double totalTime=0;double timeforFirstTopk=0;double timeforMaxsample=0;double timeforSecondTopk=0;double timeforNormalBitonicsort=0; double concatenation_time=0; if (NSubranges>k) { int Nthreadstowork=32; if (SubRangesize<32) { Nthreadstowork=SubRangesize; } hipLaunchKernelGGL(( sampleMax<data_t,index_t>), dim3(4096),dim3(512), 0, 0, A_d,Max_d,N,NSubranges,SubRangesize,alpha,SubrangeId_d,Nthreadstowork); H_ERR(hipDeviceSynchronize()); t2=wtime(); timeforMaxsample=t2-time_beg; // cout<<"timeforMaxsample:"<<timeforMaxsample<<endl; // cout<<"Max for every subranges"<<endl; bitonic_firstTopk<data_t,index_t>(Max_d,NSubranges,k,SubRangesize,NSubranges,SubrangeId_d,A_d,ConcatenatedRange_d,timeforFirstTopk, concatenation_time); t3=wtime(); // timeforFirstTopk=t3-t2; bitonic<data_t,index_t>(ConcatenatedRange_d,k*SubRangesize,k,TopKElement,SubRangesize,A,N,SubrangeId_d,A_d,ConcatenatedRange_d,timeforSecondTopk); } else { double NormalBitonicstart=wtime(); bitonic<data_t,index_t>(A_d,N,k,TopKElement,SubRangesize,A,N,SubrangeId_d,A_d,ConcatenatedRange_d,timeforSecondTopk); timeforNormalBitonicsort=wtime()-NormalBitonicstart; } cout <<endl; t4=wtime(); // timeforSecondTopk=t4-t3; hipFree(A_d); hipFree(Max_d); free(A); totalTime=timeforMaxsample+timeforFirstTopk+concatenation_time+timeforSecondTopk; cout<<"timeforMaxsample:"<<timeforMaxsample*1000<<" ms"<<endl; cout<<"timeforFirstTopk:"<<timeforFirstTopk*1000<<" ms"<<endl; cout<<"timeforConcatenation:"<<concatenation_time*1000<<" ms"<<endl; cout<<"timeforSecondTopk:"<<timeforSecondTopk*1000<<" ms"<<endl; cout<<"totalTime:"<<totalTime*1000<<" ms"<<endl; cout<<"timeforNormalBitonicsort"<<timeforNormalBitonicsort*1000<<" ms"<<endl; std::fstream timeLog; // timeLog.open("timeBitonicsampleOCT11.csv",std::fstream::out | std::fstream::app); timeLog.open(argv[4],std::fstream::out | std::fstream::app); timeLog<<p<<";"<<k<<";"<<alpha<<";"<<timeforMaxsample*1000<<";"<<timeforFirstTopk*1000<<";"<<timeforSecondTopk*1000<<";"<<timeforNormalBitonicsort*1000<<";"<<totalTime*1000<<endl; // timeLog<<"N_"<<N<<"k_"<<k<<"alpha_"<<alpha<<";"<<timeforNormalBitonicsort*1000<<endl; timeLog.close(); // statusLog<<"Successfully Finished Top K with N_"<<N<<"k_"<<k<<"alpha_"<<alpha<<endl; // statusLog.close(); return 0; }
f757a433fff3f48ff04fa54a9f0b6752f2eb25eb.cu
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <ctime> #include <assert.h> #include <math.h> #include "bitonic_com.cuh" #include "wtime.h" #include <fstream> #include <random> using namespace std; typedef unsigned int data_t; //typedef unsigned int data_t; typedef int index_t; int compare(const void *p1, const void *p2) { const struct max_withIndex *elem1 = (const struct max_withIndex *)p1; const struct max_withIndex *elem2 = (const struct max_withIndex *)p2; if (elem1->value > elem2->value)//For descending order { return -1; } else if (elem1->value < elem2->value) { return 1; } else { return 0; } } template<typename data_t,typename index_t> index_t power(index_t x,index_t n) { index_t number=1; for (index_t i=0; i<n ;i++) { number*=x; } return number; } template<typename data_t, typename index_t> __global__ void alpha_range_max_sample( data_t* vec, data_t* sampled_top, index_t num_element, index_t num_subrange, index_t alpha, index_t* SubrangeId) { index_t tid = threadIdx.x + blockIdx.x * blockDim.x; index_t warp_id = tid >> 5; const index_t lane_id = threadIdx.x & 31; const index_t warp_count = (blockDim.x * gridDim.x) >> 5; // const index_t subrange_size = 1<<alpha; //schedule one warp to work on one subrange while(warp_id < num_subrange - 1) { index_t my_beg = (warp_id << alpha) + lane_id; index_t my_end = (warp_id+1) << alpha; assert(my_end < num_element); data_t my_max = vec[my_beg]; while(my_beg < my_end) { my_max = (my_max < vec[my_beg] ? vec[my_beg]:my_max); my_beg += 32; } //max across the warp for (int i=16; i>0; i>>=1) { data_t new_max = __shfl_down_sync(0xffffffff,my_max, i); my_max = (my_max < new_max ? new_max:my_max); } if(!lane_id) { sampled_top[warp_id] = my_max; SubrangeId[warp_id]=my_max; // printf("%d\n",my_max); } warp_id += warp_count; } } template<typename data_t,typename index_t> __global__ void sampleMax_old (data_t* A,data_t* SubRangeMax, index_t N,index_t NSubranges,index_t SubRangeSize,const index_t alpha,index_t* SubrangeId) { int thid = blockDim.x*blockIdx.x+threadIdx.x; int laneId= threadIdx.x & 0x1f; int myWarpID=thid >> 5; int NWarps=(blockDim.x*gridDim.x) >> 5; while (myWarpID < NSubranges)//WarpID is used as subrange ID { // index_t mybegin_pos=myWarpID*SubRangeSize+laneId;//<<alpha+laneId; index_t mybegin_pos=(myWarpID<<alpha)+laneId; if (mybegin_pos >= N) { printf("Error! Illegal memory access in a thread. Return this thread\n"); //return; } int Nthreadstowork=32; if (SubRangeSize<32) { Nthreadstowork=SubRangeSize; } index_t myend_pos=(((myWarpID+1)<<alpha) < (N-1)) ? ((myWarpID+1)<<alpha):N-1; //index_t myend_pos=(myWarpID+1)<<alpha; data_t Max=A[mybegin_pos]; while(mybegin_pos < myend_pos) { if (mybegin_pos >= N) { printf("Error! Illegal memory access. at mybegin_pos:%d when warpId:%d \n",mybegin_pos,myWarpID); } Max=(Max < A[mybegin_pos]) ? A[mybegin_pos]:Max; // printf("mybeginpos:%d ",mybegin_pos); mybegin_pos+=32; } data_t MaxFromOther; for (int j=Nthreadstowork >> 1;j >=1;j=j>>1) { MaxFromOther=__shfl_sync(0xffffffff, Max,laneId+j,32/*Nthreadstowork*//*32*/); if (laneId<j) { Max= (MaxFromOther > Max) ? MaxFromOther : Max ; } } if(laneId==0) { SubRangeMax[myWarpID]=Max; SubrangeId[myWarpID]=myWarpID; } myWarpID+=NWarps; } return; } template<typename data_t,typename index_t> __global__ void sampleMax (data_t* A,data_t* SubRangeMax, index_t N,index_t NSubranges,index_t SubRangeSize,const index_t alpha,index_t* SubrangeId,int Nthreadstowork) { int thid = blockDim.x*blockIdx.x+threadIdx.x; int laneId= threadIdx.x & 0x1f; int myWarpID=thid >> 5; int NWarps=(blockDim.x*gridDim.x) >> 5; while (myWarpID < NSubranges)//WarpID is used as subrange ID { index_t mybegin_pos=(myWarpID<<alpha)+laneId; index_t myend_pos=(((myWarpID+1)<<alpha) < (N)) ? ((myWarpID+1)<<alpha):N; data_t Max=0;//Assigning all the threads Max reads to 0 intially. Avoids the illegal memory access for Max=A[mybegin_pos]; condition while(mybegin_pos < myend_pos) { Max=(Max < A[mybegin_pos]) ? A[mybegin_pos]:Max; mybegin_pos+=32; } data_t MaxFromOther; for (int j=Nthreadstowork >> 1;j >=1;j=j>>1) { MaxFromOther=__shfl_sync(0xffffffff, Max,laneId+j,32/*Nthreadstowork*//*32*/); if (laneId<j) { Max= (MaxFromOther > Max) ? MaxFromOther : Max ; } } if(laneId==0) { SubRangeMax[myWarpID]=Max; SubrangeId[myWarpID]=myWarpID; } myWarpID+=NWarps; } return; } template<typename data_t,typename index_t> bool IsPowerof2(index_t x) { return (x != 0) && ((x & (x - 1)) == 0); } int main(int argc,char**argv) { cout<<"./exe exp_num_element k alpha scoreFile output_file"<<endl; if (argc != 6) {cout<<"wrong input"<<endl;exit(-1);} index_t p=atol(argv[1]); index_t k= atol(argv[2]); if (!IsPowerof2<data_t,index_t>(k)) { cout<<"k should be power of 2!"<<endl; exit(-1); } index_t alpha= atol(argv[3]); index_t base=2; index_t N=power<data_t,index_t>(base,p); cudaSetDevice(0); data_t* A=(data_t*)malloc((sizeof(data_t)*N)); data_t *A_d; H_ERR(cudaMalloc((void**) &A_d,sizeof(data_t)*N)); int count=0; cout<<"subrange:"<<count<<endl; index_t SubRangesize=pow(2,alpha); index_t NSubranges=N/SubRangesize; if (NSubranges<k) { cout<<"Small number of subranges!. Decrease the value of alpha!"<<endl; exit(-1); } std::fstream statusLog; // statusLog.open("testSMemDiffK_alpha_N.csv",std::fstream::out | std::fstream::app); // statusLog<<endl<<endl<<"Started Top K with N_"<<N<<"k_"<<k<<"alpha_"<<alpha<<endl; int c=0; data_t* Subrange=(data_t*)malloc((sizeof(data_t)*NSubranges)); max_withIndex* max_Obj=new max_withIndex[NSubranges]; float a=1500.0; // for (index_t i=0;i<N;i++) // { /// A[i]=rand()%(2147483648);//2^31 -1 // A[i]=(float)rand()/(float)(RAND_MAX)*a;//2^31 -1 // if (A[i]==2147481384) c++; // } std::random_device rd; std::mt19937 gen(rd()); float value; // For Normal distribution float minvalue=100000000; // int value; // int minvalue=100000000; const char *scr_file=argv[4]; // const char * ${arr[1]} FILE* fptr; if ((fptr = fopen(scr_file,"r")) == NULL) { printf("Error! opening SCORE file"); exit(1); } for (index_t i=0;i<N;i++) { fscanf(fptr,"%d", &A[i]); // vec1[i]=A[i]; } // // int dis=atoi(argv[4]); // std::uniform_int_distribution <unsigned int> d(0, 4294967295); // // std::normal_distribution<float> d(100000000, 10);//Mean =100 mill , sd=100 // for (index_t i=0;i<N;i++) // { // // value=d(gen); // // if (minvalue > value) // // { // // minvalue=value; // // } // // if (value > 4294967295) // // { // // cout<<"Overflow of unsigned int detected"<<endl; // // return -1; // // } // A[i]=d(gen); // // vec1[i]=vec[i]; // } // cout<<endl; // if (minvalue < 0) // { // cout<<"-ve value detected:"<<minvalue<<endl; // return -1; // } cout<<endl; cout<<"SubRangeSize:"<<SubRangesize<<endl; cout<<"Number of Subranges:"<<NSubranges<<endl; data_t* Max_d; H_ERR(cudaMalloc((void**) &Max_d,sizeof(data_t)*NSubranges)); H_ERR(cudaMemcpy(A_d,A,sizeof(data_t)*N,cudaMemcpyHostToDevice)); index_t* SubrangeId_d; H_ERR(cudaMalloc((void**) &SubrangeId_d,sizeof(index_t)*NSubranges)); data_t TopKElement; data_t* ConcatenatedRange_d; H_ERR(cudaMalloc((void**) &ConcatenatedRange_d,sizeof(data_t)*k*SubRangesize)); double time_beg=wtime(); double t2=0;double t3=0;double t4=0;double totalTime=0;double timeforFirstTopk=0;double timeforMaxsample=0;double timeforSecondTopk=0;double timeforNormalBitonicsort=0; double concatenation_time=0; if (NSubranges>k) { int Nthreadstowork=32; if (SubRangesize<32) { Nthreadstowork=SubRangesize; } sampleMax<data_t,index_t><<<4096,512>>>(A_d,Max_d,N,NSubranges,SubRangesize,alpha,SubrangeId_d,Nthreadstowork); H_ERR(cudaDeviceSynchronize()); t2=wtime(); timeforMaxsample=t2-time_beg; // cout<<"timeforMaxsample:"<<timeforMaxsample<<endl; // cout<<"Max for every subranges"<<endl; bitonic_firstTopk<data_t,index_t>(Max_d,NSubranges,k,SubRangesize,NSubranges,SubrangeId_d,A_d,ConcatenatedRange_d,timeforFirstTopk, concatenation_time); t3=wtime(); // timeforFirstTopk=t3-t2; bitonic<data_t,index_t>(ConcatenatedRange_d,k*SubRangesize,k,TopKElement,SubRangesize,A,N,SubrangeId_d,A_d,ConcatenatedRange_d,timeforSecondTopk); } else { double NormalBitonicstart=wtime(); bitonic<data_t,index_t>(A_d,N,k,TopKElement,SubRangesize,A,N,SubrangeId_d,A_d,ConcatenatedRange_d,timeforSecondTopk); timeforNormalBitonicsort=wtime()-NormalBitonicstart; } cout <<endl; t4=wtime(); // timeforSecondTopk=t4-t3; cudaFree(A_d); cudaFree(Max_d); free(A); totalTime=timeforMaxsample+timeforFirstTopk+concatenation_time+timeforSecondTopk; cout<<"timeforMaxsample:"<<timeforMaxsample*1000<<" ms"<<endl; cout<<"timeforFirstTopk:"<<timeforFirstTopk*1000<<" ms"<<endl; cout<<"timeforConcatenation:"<<concatenation_time*1000<<" ms"<<endl; cout<<"timeforSecondTopk:"<<timeforSecondTopk*1000<<" ms"<<endl; cout<<"totalTime:"<<totalTime*1000<<" ms"<<endl; cout<<"timeforNormalBitonicsort"<<timeforNormalBitonicsort*1000<<" ms"<<endl; std::fstream timeLog; // timeLog.open("timeBitonicsampleOCT11.csv",std::fstream::out | std::fstream::app); timeLog.open(argv[4],std::fstream::out | std::fstream::app); timeLog<<p<<";"<<k<<";"<<alpha<<";"<<timeforMaxsample*1000<<";"<<timeforFirstTopk*1000<<";"<<timeforSecondTopk*1000<<";"<<timeforNormalBitonicsort*1000<<";"<<totalTime*1000<<endl; // timeLog<<"N_"<<N<<"k_"<<k<<"alpha_"<<alpha<<";"<<timeforNormalBitonicsort*1000<<endl; timeLog.close(); // statusLog<<"Successfully Finished Top K with N_"<<N<<"k_"<<k<<"alpha_"<<alpha<<endl; // statusLog.close(); return 0; }
380f67601bf7f2d67fea9a12ab0cfc5a36be9c71.hip
// !!! This is a file automatically generated by hipify!!! #include <THH/THHBlas.h> #include <THH/THHGeneral.h> #include <TH/THHalf.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPBlas.h> #include <algorithm> #include <mutex> float THCudaBlas_Sdot(THCState *state, int64_t n, float *x, int64_t incx, float *y, int64_t incy) { if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { int i_n = (int)n; int i_incx = (int)incx; int i_incy = (int)incy; float result; hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(hipblasSdot(handle, i_n, x, i_incx, y, i_incy, &result)); return result; } THError("Cublas_Sdot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0; } double THCudaBlas_Ddot(THCState *state, int64_t n, double *x, int64_t incx, double *y, int64_t incy) { if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { int i_n = (int)n; int i_incx = (int)incx; int i_incy = (int)incy; double result; hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(hipblasDdot(handle, i_n, x, i_incx, y, i_incy, &result)); return result; } THError("Cublas_Ddot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0; } at::Half THCudaBlas_Hdot(THCState *state, int64_t n, at::Half *x, int64_t incx, at::Half *y, int64_t incy) { #if TORCH_HIP_VERSION >= 8000 if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { at::Half result; hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(hipblasDotEx_v2(handle, n, x, HIP_R_16F, incx, y, HIP_R_16F, incy, &result, HIP_R_16F, HIP_R_32F)); return result; } THError("Cublas_Hdot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0.0; #else THError("Cublas_Hdot requires CUDA 8.0+"); return 0.0; #endif } /* Level 2 */ void adjustLdLevel2(int64_t m, int64_t n, int64_t *lda) { // Note: leading dimensions generally are checked that they are > 0 and at least as big the result // requires (even if the value won't be used). // TODO: why does Level3 check trans but this doesn't? if (n <= 1) *lda = std::max<int64_t>(m, 1); } void THCudaBlas_Sgemv(THCState *state, char trans, int64_t m, int64_t n, float alpha, float *a, int64_t lda, float *x, int64_t incx, float beta, float *y, int64_t incy) { at::cuda::blas::gemv<float>(trans, m, n, alpha, a, lda, x, incx, beta, y, incy); } void THCudaBlas_Dgemv(THCState *state, char trans, int64_t m, int64_t n, double alpha, double *a, int64_t lda, double *x, int64_t incx, double beta, double *y, int64_t incy) { at::cuda::blas::gemv<double>(trans, m, n, alpha, a, lda, x, incx, beta, y, incy); } void THCudaBlas_Sger(THCState *state, int64_t m, int64_t n, float alpha, float *x, int64_t incx, float *y, int64_t incy, float *a, int64_t lda) { adjustLdLevel2(m, n, &lda); if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(hipblasSger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda)); return; } THError("Cublas_Sger only supports m, n, lda, incx, incy" "with the bound [val] <= %d", INT_MAX); } void THCudaBlas_Dger(THCState *state, int64_t m, int64_t n, double alpha, double *x, int64_t incx, double *y, int64_t incy, double *a, int64_t lda) { adjustLdLevel2(m, n, &lda); if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(hipblasDger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda)); return; } THError("Cublas_Dger only supports m, n, lda, incx, incy" "with the bound [val] <= %d", INT_MAX); } hipblasOperation_t convertTransToCublasOperation(char trans) { if (trans == 't') return HIPBLAS_OP_T; else if (trans == 'n') return HIPBLAS_OP_N; else if (trans == 'c') return HIPBLAS_OP_C; else { THError("trans must be one of: t, n, c"); return HIPBLAS_OP_T; } } void adjustLdLevel3(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc) { int transa_ = ((transa == 't') || (transa == 'T')); int transb_ = ((transb == 't') || (transb == 'T')); // Note: leading dimensions generally are checked that they are > 0 and at least as big the result // requires (even if the value won't be used). if(n <= 1) *ldc = std::max<int64_t>(m, 1); if(transa_) { if(m <= 1) *lda = std::max<int64_t>(k, 1); } else { if(k <= 1) *lda = std::max<int64_t>(m, 1); } if(transb_) { if(k <= 1) *ldb = std::max<int64_t>(n, 1); } else { if(n <= 1) *ldb = std::max<int64_t>(k, 1); } } // Check https://github.com/pytorch/pytorch/issues/22078 // for information about the bug. We don't know the exact conditions that trigger it, // but using Sgemm or Hgemm on Maxwell or Pascal seems to be a // necessary condition. static void checkCuda90Bug(int i_m, int i_n, int i_k) { #if TORCH_HIP_VERSION < 9200 && TORCH_HIP_VERSION >= 9000 static std::once_flag alreadyWarned; const int LIMIT = 1 << 21; if (i_m > LIMIT || i_n > LIMIT || i_k > LIMIT) { hipDeviceProp_t* prop = at::cuda::getCurrentDeviceProperties(); if (prop->major == 5 || prop->major == 6) { std::call_once(alreadyWarned, []() { TORCH_WARN("Matrix multiplication for dimensions larger than 2^21 has known bugs on your combination of CUDA version and device type. Please consider upgrading to CUDA 9.2 or later."); }); } } #endif } /* Level 3 */ void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc) { checkCuda90Bug((int)m, (int)n, (int)k); at::cuda::blas::gemm<float>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } // In CUDA 8.0, definition of data types for sgemmex changed #if TORCH_HIP_VERSION < 8000 # define HIP_R_16F HIPBLAS_DATA_HALF #endif void THCudaBlas_Hgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::Half alpha, at::Half *a, int64_t lda, at::Half *b, int64_t ldb, at::Half beta, at::Half *c, int64_t ldc) { checkCuda90Bug((int)m, (int)n, (int)k); at::cuda::blas::gemm<at::Half>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } #ifdef __HIP_PLATFORM_HCC__ void THCudaBlas_Bgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::BFloat16 alpha, at::BFloat16 *a, int64_t lda, at::BFloat16 *b, int64_t ldb, at::BFloat16 beta, at::BFloat16 *c, int64_t ldc) { at::cuda::blas::gemm<at::BFloat16>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } #endif void THCudaBlas_Dgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, double *a, int64_t lda, double *b, int64_t ldb, double beta, double *c, int64_t ldc) { at::cuda::blas::gemm<double>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } #if TORCH_HIP_VERSION >= 9010 || defined __HIP_PLATFORM_HCC__ void THCudaBlas_HgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::Half alpha, const at::Half *a, int64_t lda, int64_t strideA, const at::Half *b, int64_t ldb, int64_t strideB, at::Half beta, at::Half *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); float fAlpha = alpha; float fBeta = beta; #ifdef __HIP_PLATFORM_HCC__ THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k, (void*)&fAlpha, a, rocblas_datatype_f16_r, (int)lda, strideA, b, rocblas_datatype_f16_r, (int)ldb, strideB, (void*)&fBeta, c, rocblas_datatype_f16_r, (int)ldc, strideC, c, rocblas_datatype_f16_r, (int)ldc, strideC, (int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard, 0, 0)); #else THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); THCublasCheck(hipblasGemmStridedBatchedEx(handle, opa, opb, (int)m, (int)n, (int)k, (void*)&fAlpha, a, HIP_R_16F, (int)lda, strideA, b, HIP_R_16F, (int)ldb, strideB, (void*)&fBeta, c, HIP_R_16F, (int)ldc, strideC, (int)batchCount, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); #endif // __HIP_PLATFORM_HCC__ } #endif // TORCH_HIP_VERSION or __HIP_PLATFORM_HCC__ #ifdef __HIP_PLATFORM_HCC__ void THCudaBlas_BgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::BFloat16 alpha, const at::BFloat16 *a, int64_t lda, int64_t strideA, const at::BFloat16 *b, int64_t ldb, int64_t strideB, at::BFloat16 beta, at::BFloat16 *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); float fAlpha = alpha; float fBeta = beta; THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k, (void*)&fAlpha, a, rocblas_datatype_bf16_r, (int)lda, strideA, b, rocblas_datatype_bf16_r, (int)ldb, strideB, (void*)&fBeta, c, rocblas_datatype_bf16_r, (int)ldc, strideC, c, rocblas_datatype_bf16_r, (int)ldc, strideC, (int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard, 0, 0, NULL, NULL)); } #endif // __HIP_PLATFORM_HCC__ void THCudaBlas_SgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, const float *a[], int64_t lda, const float *b[], int64_t ldb, float beta, float *c[], int64_t ldc, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } #ifdef __HIP_PLATFORM_HCC__ const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n; const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k; const int64_t stridec = ldc*n; THCudaBlas_SgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount); #else adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(hipblasSgemmBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc, (int)batchCount)); #endif } #if TORCH_HIP_VERSION >= 8000 || defined __HIP_PLATFORM_HCC__ void THCudaBlas_SgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, const float *a, int64_t lda, int64_t strideA, const float *b, int64_t ldb, int64_t strideB, float beta, float *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(hipblasSgemmStridedBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC, (int)batchCount)); } #endif void THCudaBlas_DgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, const double *a[], int64_t lda, const double *b[], int64_t ldb, double beta, double *c[], int64_t ldc, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } #ifdef __HIP_PLATFORM_HCC__ const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n; const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k; const int64_t stridec = ldc*n; THCudaBlas_DgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount); #else adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(hipblasDgemmBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc, (int)batchCount)); #endif } #if TORCH_HIP_VERSION >= 8000 || defined __HIP_PLATFORM_HCC__ void THCudaBlas_DgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, const double *a, int64_t lda, int64_t strideA, const double *b, int64_t ldb, int64_t strideB, double beta, double *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); hipblasOperation_t opa = convertTransToCublasOperation(transa); hipblasOperation_t opb = convertTransToCublasOperation(transb); hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(hipblasDgemmStridedBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC, (int)batchCount)); } #endif
380f67601bf7f2d67fea9a12ab0cfc5a36be9c71.cu
#include <THC/THCBlas.h> #include <THC/THCGeneral.h> #include <TH/THHalf.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDABlas.h> #include <algorithm> #include <mutex> float THCudaBlas_Sdot(THCState *state, int64_t n, float *x, int64_t incx, float *y, int64_t incy) { if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { int i_n = (int)n; int i_incx = (int)incx; int i_incy = (int)incy; float result; cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(cublasSdot(handle, i_n, x, i_incx, y, i_incy, &result)); return result; } THError("Cublas_Sdot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0; } double THCudaBlas_Ddot(THCState *state, int64_t n, double *x, int64_t incx, double *y, int64_t incy) { if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { int i_n = (int)n; int i_incx = (int)incx; int i_incy = (int)incy; double result; cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(cublasDdot(handle, i_n, x, i_incx, y, i_incy, &result)); return result; } THError("Cublas_Ddot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0; } at::Half THCudaBlas_Hdot(THCState *state, int64_t n, at::Half *x, int64_t incx, at::Half *y, int64_t incy) { #if CUDA_VERSION >= 8000 if (n == 1) { incx = 1; incy = 1; } if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) { at::Half result; cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(cublasDotEx(handle, n, x, CUDA_R_16F, incx, y, CUDA_R_16F, incy, &result, CUDA_R_16F, CUDA_R_32F)); return result; } THError("Cublas_Hdot only supports n, incx and incy " "up to signed integer limits: %d", INT_MAX); return 0.0; #else THError("Cublas_Hdot requires CUDA 8.0+"); return 0.0; #endif } /* Level 2 */ void adjustLdLevel2(int64_t m, int64_t n, int64_t *lda) { // Note: leading dimensions generally are checked that they are > 0 and at least as big the result // requires (even if the value won't be used). // TODO: why does Level3 check trans but this doesn't? if (n <= 1) *lda = std::max<int64_t>(m, 1); } void THCudaBlas_Sgemv(THCState *state, char trans, int64_t m, int64_t n, float alpha, float *a, int64_t lda, float *x, int64_t incx, float beta, float *y, int64_t incy) { at::cuda::blas::gemv<float>(trans, m, n, alpha, a, lda, x, incx, beta, y, incy); } void THCudaBlas_Dgemv(THCState *state, char trans, int64_t m, int64_t n, double alpha, double *a, int64_t lda, double *x, int64_t incx, double beta, double *y, int64_t incy) { at::cuda::blas::gemv<double>(trans, m, n, alpha, a, lda, x, incx, beta, y, incy); } void THCudaBlas_Sger(THCState *state, int64_t m, int64_t n, float alpha, float *x, int64_t incx, float *y, int64_t incy, float *a, int64_t lda) { adjustLdLevel2(m, n, &lda); if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(cublasSger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda)); return; } THError("Cublas_Sger only supports m, n, lda, incx, incy" "with the bound [val] <= %d", INT_MAX); } void THCudaBlas_Dger(THCState *state, int64_t m, int64_t n, double alpha, double *x, int64_t incx, double *y, int64_t incy, double *a, int64_t lda) { adjustLdLevel2(m, n, &lda); if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) { int i_m = (int)m; int i_n = (int)n; int i_lda = (int)lda; int i_incx = (int)incx; int i_incy = (int)incy; cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(cublasDger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda)); return; } THError("Cublas_Dger only supports m, n, lda, incx, incy" "with the bound [val] <= %d", INT_MAX); } cublasOperation_t convertTransToCublasOperation(char trans) { if (trans == 't') return CUBLAS_OP_T; else if (trans == 'n') return CUBLAS_OP_N; else if (trans == 'c') return CUBLAS_OP_C; else { THError("trans must be one of: t, n, c"); return CUBLAS_OP_T; } } void adjustLdLevel3(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc) { int transa_ = ((transa == 't') || (transa == 'T')); int transb_ = ((transb == 't') || (transb == 'T')); // Note: leading dimensions generally are checked that they are > 0 and at least as big the result // requires (even if the value won't be used). if(n <= 1) *ldc = std::max<int64_t>(m, 1); if(transa_) { if(m <= 1) *lda = std::max<int64_t>(k, 1); } else { if(k <= 1) *lda = std::max<int64_t>(m, 1); } if(transb_) { if(k <= 1) *ldb = std::max<int64_t>(n, 1); } else { if(n <= 1) *ldb = std::max<int64_t>(k, 1); } } // Check https://github.com/pytorch/pytorch/issues/22078 // for information about the bug. We don't know the exact conditions that trigger it, // but using Sgemm or Hgemm on Maxwell or Pascal seems to be a // necessary condition. static void checkCuda90Bug(int i_m, int i_n, int i_k) { #if CUDA_VERSION < 9200 && CUDA_VERSION >= 9000 static std::once_flag alreadyWarned; const int LIMIT = 1 << 21; if (i_m > LIMIT || i_n > LIMIT || i_k > LIMIT) { cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties(); if (prop->major == 5 || prop->major == 6) { std::call_once(alreadyWarned, []() { TORCH_WARN("Matrix multiplication for dimensions larger than 2^21 has known bugs on your combination of CUDA version and device type. Please consider upgrading to CUDA 9.2 or later."); }); } } #endif } /* Level 3 */ void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc) { checkCuda90Bug((int)m, (int)n, (int)k); at::cuda::blas::gemm<float>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } // In CUDA 8.0, definition of data types for sgemmex changed #if CUDA_VERSION < 8000 # define CUDA_R_16F CUBLAS_DATA_HALF #endif void THCudaBlas_Hgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::Half alpha, at::Half *a, int64_t lda, at::Half *b, int64_t ldb, at::Half beta, at::Half *c, int64_t ldc) { checkCuda90Bug((int)m, (int)n, (int)k); at::cuda::blas::gemm<at::Half>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } #ifdef __HIP_PLATFORM_HCC__ void THCudaBlas_Bgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::BFloat16 alpha, at::BFloat16 *a, int64_t lda, at::BFloat16 *b, int64_t ldb, at::BFloat16 beta, at::BFloat16 *c, int64_t ldc) { at::cuda::blas::gemm<at::BFloat16>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } #endif void THCudaBlas_Dgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, double *a, int64_t lda, double *b, int64_t ldb, double beta, double *c, int64_t ldc) { at::cuda::blas::gemm<double>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } #if CUDA_VERSION >= 9010 || defined __HIP_PLATFORM_HCC__ void THCudaBlas_HgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::Half alpha, const at::Half *a, int64_t lda, int64_t strideA, const at::Half *b, int64_t ldb, int64_t strideB, at::Half beta, at::Half *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); float fAlpha = alpha; float fBeta = beta; #ifdef __HIP_PLATFORM_HCC__ THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k, (void*)&fAlpha, a, rocblas_datatype_f16_r, (int)lda, strideA, b, rocblas_datatype_f16_r, (int)ldb, strideB, (void*)&fBeta, c, rocblas_datatype_f16_r, (int)ldc, strideC, c, rocblas_datatype_f16_r, (int)ldc, strideC, (int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard, 0, 0)); #else THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); THCublasCheck(cublasGemmStridedBatchedEx(handle, opa, opb, (int)m, (int)n, (int)k, (void*)&fAlpha, a, CUDA_R_16F, (int)lda, strideA, b, CUDA_R_16F, (int)ldb, strideB, (void*)&fBeta, c, CUDA_R_16F, (int)ldc, strideC, (int)batchCount, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); #endif // __HIP_PLATFORM_HCC__ } #endif // CUDA_VERSION or __HIP_PLATFORM_HCC__ #ifdef __HIP_PLATFORM_HCC__ void THCudaBlas_BgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::BFloat16 alpha, const at::BFloat16 *a, int64_t lda, int64_t strideA, const at::BFloat16 *b, int64_t ldb, int64_t strideB, at::BFloat16 beta, at::BFloat16 *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); float fAlpha = alpha; float fBeta = beta; THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k, (void*)&fAlpha, a, rocblas_datatype_bf16_r, (int)lda, strideA, b, rocblas_datatype_bf16_r, (int)ldb, strideB, (void*)&fBeta, c, rocblas_datatype_bf16_r, (int)ldc, strideC, c, rocblas_datatype_bf16_r, (int)ldc, strideC, (int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard, 0, 0, NULL, NULL)); } #endif // __HIP_PLATFORM_HCC__ void THCudaBlas_SgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, const float *a[], int64_t lda, const float *b[], int64_t ldb, float beta, float *c[], int64_t ldc, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } #ifdef __HIP_PLATFORM_HCC__ const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n; const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k; const int64_t stridec = ldc*n; THCudaBlas_SgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount); #else adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(cublasSgemmBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc, (int)batchCount)); #endif } #if CUDA_VERSION >= 8000 || defined __HIP_PLATFORM_HCC__ void THCudaBlas_SgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, const float *a, int64_t lda, int64_t strideA, const float *b, int64_t ldb, int64_t strideB, float beta, float *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(cublasSgemmStridedBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC, (int)batchCount)); } #endif void THCudaBlas_DgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, const double *a[], int64_t lda, const double *b[], int64_t ldb, double beta, double *c[], int64_t ldc, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } #ifdef __HIP_PLATFORM_HCC__ const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n; const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k; const int64_t stridec = ldc*n; THCudaBlas_DgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount); #else adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(cublasDgemmBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc, (int)batchCount)); #endif } #if CUDA_VERSION >= 8000 || defined __HIP_PLATFORM_HCC__ void THCudaBlas_DgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, const double *a, int64_t lda, int64_t strideA, const double *b, int64_t ldb, int64_t strideB, double beta, double *c, int64_t ldc, int64_t strideC, int64_t batchCount) { if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) ) { THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); THCublasCheck(cublasDgemmStridedBatched(handle, opa, opb, (int)m, (int)n, (int)k, &alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC, (int)batchCount)); } #endif
38be747e534fb39c10b3fca6dbce4ea27627cbc5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void reduce(int *g_idata, int searchedNumber, int *ok) { int i = blockIdx.x * blockDim.x + threadIdx.x; //printf("%d ", i); __syncthreads(); //printf("%d %d///", g_idata[i], searchedNumber); if (g_idata[i] == searchedNumber) { printf("Found %d on %d position %d", searchedNumber, i, *ok); *ok = i; } }
38be747e534fb39c10b3fca6dbce4ea27627cbc5.cu
#include "includes.h" __global__ void reduce(int *g_idata, int searchedNumber, int *ok) { int i = blockIdx.x * blockDim.x + threadIdx.x; //printf("%d ", i); __syncthreads(); //printf("%d %d///", g_idata[i], searchedNumber); if (g_idata[i] == searchedNumber) { printf("Found %d on %d position %d", searchedNumber, i, *ok); *ok = i; } }
76d99e48ee0ef76b56110e04d31bdf83e8b42ece.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> #include <omp.h> #include <hip/hip_runtime.h> #define THREADS_PER_DIM 16 #define BLOCKS_PER_DIM 16 #define THREADS_PER_BLOCK THREADS_PER_DIM*THREADS_PER_DIM #include "kmeans_hip_kernel.hip" //#define BLOCK_DELTA_REDUCE //#define BLOCK_CENTER_REDUCE #define CPU_DELTA_REDUCE #define CPU_CENTER_REDUCE extern "C" int setup(int argc, char** argv); /* function prototype */ // GLOBAL!!!!! unsigned int num_threads_perdim = THREADS_PER_DIM; /* sqrt(256) -- see references for this choice */ unsigned int num_blocks_perdim = BLOCKS_PER_DIM; /* temporary */ unsigned int num_threads = num_threads_perdim*num_threads_perdim; /* number of threads */ unsigned int num_blocks = num_blocks_perdim*num_blocks_perdim; /* number of blocks */ /* _d denotes it resides on the device */ int *membership_new; /* newly assignment membership */ float *feature_d; /* inverted data array */ float *feature_flipped_d; /* original (not inverted) data array */ int *membership_d; /* membership on the device */ float *block_new_centers; /* sum of points in a cluster (per block) */ float *clusters_d; /* cluster centers on the device */ float *block_clusters_d; /* per block calculation of cluster centers */ int *block_deltas_d; /* per block calculation of deltas */ /* -------------- allocateMemory() ------------------- */ /* allocate device memory, calculate number of blocks and threads, and invert the data array */ extern "C" void allocateMemory(int npoints, int nfeatures, int nclusters, float **features) { num_blocks = npoints / num_threads; if (npoints % num_threads > 0) /* defeat truncation */ num_blocks++; num_blocks_perdim = sqrt((double) num_blocks); while (num_blocks_perdim * num_blocks_perdim < num_blocks) // defeat truncation (should run once) num_blocks_perdim++; num_blocks = num_blocks_perdim*num_blocks_perdim; /* allocate memory for memory_new[] and initialize to -1 (host) */ membership_new = (int*) malloc(npoints * sizeof(int)); for(int i=0;i<npoints;i++) { membership_new[i] = -1; } /* allocate memory for block_new_centers[] (host) */ block_new_centers = (float *) malloc(nclusters*nfeatures*sizeof(float)); /* allocate memory for feature_flipped_d[][], feature_d[][] (device) */ hipMalloc((void**) &feature_flipped_d, npoints*nfeatures*sizeof(float)); hipMemcpy(feature_flipped_d, features[0], npoints*nfeatures*sizeof(float), hipMemcpyHostToDevice); hipMalloc((void**) &feature_d, npoints*nfeatures*sizeof(float)); /* invert the data array (kernel execution) */ hipLaunchKernelGGL(( invert_mapping), dim3(num_blocks),dim3(num_threads), 0, 0, feature_flipped_d,feature_d,npoints,nfeatures); /* allocate memory for membership_d[] and clusters_d[][] (device) */ hipMalloc((void**) &membership_d, npoints*sizeof(int)); hipMalloc((void**) &clusters_d, nclusters*nfeatures*sizeof(float)); #ifdef BLOCK_DELTA_REDUCE // allocate array to hold the per block deltas on the gpu side hipMalloc((void**) &block_deltas_d, num_blocks_perdim * num_blocks_perdim * sizeof(int)); //hipMemcpy(block_delta_d, &delta_h, sizeof(int), hipMemcpyHostToDevice); #endif #ifdef BLOCK_CENTER_REDUCE // allocate memory and copy to card cluster array in which to accumulate center points for the next iteration hipMalloc((void**) &block_clusters_d, num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float)); //hipMemcpy(new_clusters_d, new_centers[0], nclusters*nfeatures*sizeof(float), hipMemcpyHostToDevice); #endif } /* -------------- allocateMemory() end ------------------- */ /* -------------- deallocateMemory() ------------------- */ /* free host and device memory */ extern "C" void deallocateMemory() { free(membership_new); free(block_new_centers); hipFree(feature_d); hipFree(feature_flipped_d); hipFree(membership_d); hipFree(clusters_d); #ifdef BLOCK_CENTER_REDUCE hipFree(block_clusters_d); #endif #ifdef BLOCK_DELTA_REDUCE hipFree(block_deltas_d); #endif } /* -------------- deallocateMemory() end ------------------- */ //////////////////////////////////////////////////////////////////////////////// // Program main // int main( int argc, char** argv) { // make sure we're running on the big card hipSetDevice(1); // as done in the CUDA start/help document provided setup(argc, argv); } // // //////////////////////////////////////////////////////////////////////////////// /* ------------------- kmeansCuda() ------------------------ */ extern "C" int // delta -- had problems when return value was of float type kmeansCuda(float **feature, /* in: [npoints][nfeatures] */ int nfeatures, /* number of attributes for each point */ int npoints, /* number of data points */ int nclusters, /* number of clusters */ int *membership, /* which cluster the point belongs to */ float **clusters, /* coordinates of cluster centers */ int *new_centers_len, /* number of elements in each cluster */ float **new_centers /* sum of elements in each cluster */ ) { int delta = 0; /* if point has moved */ int i,j; /* counters */ hipSetDevice(1); /* copy membership (host to device) */ hipMemcpy(membership_d, membership_new, npoints*sizeof(int), hipMemcpyHostToDevice); /* copy clusters (host to device) */ hipMemcpy(clusters_d, clusters[0], nclusters*nfeatures*sizeof(float), hipMemcpyHostToDevice); /* set up texture */ hipChannelFormatDesc chDesc0 = hipCreateChannelDesc<float>(); t_features.filterMode = hipFilterModePoint; t_features.normalized = false; t_features.channelDesc = chDesc0; if(hipBindTexture(NULL, &t_features, feature_d, &chDesc0, npoints*nfeatures*sizeof(float)) != hipSuccess) printf("Couldn't bind features array to texture!\n"); hipChannelFormatDesc chDesc1 = hipCreateChannelDesc<float>(); t_features_flipped.filterMode = hipFilterModePoint; t_features_flipped.normalized = false; t_features_flipped.channelDesc = chDesc1; if(hipBindTexture(NULL, &t_features_flipped, feature_flipped_d, &chDesc1, npoints*nfeatures*sizeof(float)) != hipSuccess) printf("Couldn't bind features_flipped array to texture!\n"); hipChannelFormatDesc chDesc2 = hipCreateChannelDesc<float>(); t_clusters.filterMode = hipFilterModePoint; t_clusters.normalized = false; t_clusters.channelDesc = chDesc2; if(hipBindTexture(NULL, &t_clusters, clusters_d, &chDesc2, nclusters*nfeatures*sizeof(float)) != hipSuccess) printf("Couldn't bind clusters array to texture!\n"); /* copy clusters to constant memory */ // Fix - modern versions of CUDA SDK use symbol names directly, not strings. //hipMemcpyToSymbol("c_clusters",clusters[0],nclusters*nfeatures*sizeof(float),0,hipMemcpyHostToDevice); hipMemcpyToSymbol(c_clusters,clusters[0],nclusters*nfeatures*sizeof(float),0,hipMemcpyHostToDevice); /* setup execution parameters. changed to 2d (source code on NVIDIA CUDA Programming Guide) */ dim3 grid( num_blocks_perdim, num_blocks_perdim ); dim3 threads( num_threads_perdim*num_threads_perdim ); /* execute the kernel */ hipLaunchKernelGGL(( kmeansPoint), dim3(grid), dim3(threads) , 0, 0, feature_d, nfeatures, npoints, nclusters, membership_d, clusters_d, block_clusters_d, block_deltas_d); hipDeviceSynchronize(); /* copy back membership (device to host) */ hipMemcpy(membership_new, membership_d, npoints*sizeof(int), hipMemcpyDeviceToHost); #ifdef BLOCK_CENTER_REDUCE /*** Copy back arrays of per block sums ***/ float * block_clusters_h = (float *) malloc( num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float)); hipMemcpy(block_clusters_h, block_clusters_d, num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float), hipMemcpyDeviceToHost); #endif #ifdef BLOCK_DELTA_REDUCE int * block_deltas_h = (int *) malloc( num_blocks_perdim * num_blocks_perdim * sizeof(int)); hipMemcpy(block_deltas_h, block_deltas_d, num_blocks_perdim * num_blocks_perdim * sizeof(int), hipMemcpyDeviceToHost); #endif /* for each point, sum data points in each cluster and see if membership has changed: if so, increase delta and change old membership, and update new_centers; otherwise, update new_centers */ delta = 0; for (i = 0; i < npoints; i++) { int cluster_id = membership_new[i]; new_centers_len[cluster_id]++; if (membership_new[i] != membership[i]) { #ifdef CPU_DELTA_REDUCE delta++; #endif membership[i] = membership_new[i]; } #ifdef CPU_CENTER_REDUCE for (j = 0; j < nfeatures; j++) { new_centers[cluster_id][j] += feature[i][j]; } #endif } #ifdef BLOCK_DELTA_REDUCE /*** calculate global sums from per block sums for delta and the new centers ***/ //debug //printf("\t \t reducing %d block sums to global sum \n",num_blocks_perdim * num_blocks_perdim); for(i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) { //printf("block %d delta is %d \n",i,block_deltas_h[i]); delta += block_deltas_h[i]; } #endif #ifdef BLOCK_CENTER_REDUCE for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) { block_new_centers[j*nfeatures + k] = 0.f; } } for(i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) { for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) { block_new_centers[j*nfeatures + k] += block_clusters_h[i * nclusters*nfeatures + j * nfeatures + k]; } } } #ifdef CPU_CENTER_REDUCE //debug /*for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) { if(new_centers[j][k] > 1.001 * block_new_centers[j*nfeatures + k] || new_centers[j][k] < 0.999 * block_new_centers[j*nfeatures + k]) { printf("\t \t for %d:%d, normal value is %e and gpu reduced value id %e \n",j,k,new_centers[j][k],block_new_centers[j*nfeatures + k]); } } }*/ #endif #ifdef BLOCK_CENTER_REDUCE for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) new_centers[j][k]= block_new_centers[j*nfeatures + k]; } #endif #endif return delta; } /* ------------------- kmeansCuda() end ------------------------ */
76d99e48ee0ef76b56110e04d31bdf83e8b42ece.cu
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> #include <omp.h> #include <cuda.h> #define THREADS_PER_DIM 16 #define BLOCKS_PER_DIM 16 #define THREADS_PER_BLOCK THREADS_PER_DIM*THREADS_PER_DIM #include "kmeans_cuda_kernel.cu" //#define BLOCK_DELTA_REDUCE //#define BLOCK_CENTER_REDUCE #define CPU_DELTA_REDUCE #define CPU_CENTER_REDUCE extern "C" int setup(int argc, char** argv); /* function prototype */ // GLOBAL!!!!! unsigned int num_threads_perdim = THREADS_PER_DIM; /* sqrt(256) -- see references for this choice */ unsigned int num_blocks_perdim = BLOCKS_PER_DIM; /* temporary */ unsigned int num_threads = num_threads_perdim*num_threads_perdim; /* number of threads */ unsigned int num_blocks = num_blocks_perdim*num_blocks_perdim; /* number of blocks */ /* _d denotes it resides on the device */ int *membership_new; /* newly assignment membership */ float *feature_d; /* inverted data array */ float *feature_flipped_d; /* original (not inverted) data array */ int *membership_d; /* membership on the device */ float *block_new_centers; /* sum of points in a cluster (per block) */ float *clusters_d; /* cluster centers on the device */ float *block_clusters_d; /* per block calculation of cluster centers */ int *block_deltas_d; /* per block calculation of deltas */ /* -------------- allocateMemory() ------------------- */ /* allocate device memory, calculate number of blocks and threads, and invert the data array */ extern "C" void allocateMemory(int npoints, int nfeatures, int nclusters, float **features) { num_blocks = npoints / num_threads; if (npoints % num_threads > 0) /* defeat truncation */ num_blocks++; num_blocks_perdim = sqrt((double) num_blocks); while (num_blocks_perdim * num_blocks_perdim < num_blocks) // defeat truncation (should run once) num_blocks_perdim++; num_blocks = num_blocks_perdim*num_blocks_perdim; /* allocate memory for memory_new[] and initialize to -1 (host) */ membership_new = (int*) malloc(npoints * sizeof(int)); for(int i=0;i<npoints;i++) { membership_new[i] = -1; } /* allocate memory for block_new_centers[] (host) */ block_new_centers = (float *) malloc(nclusters*nfeatures*sizeof(float)); /* allocate memory for feature_flipped_d[][], feature_d[][] (device) */ cudaMalloc((void**) &feature_flipped_d, npoints*nfeatures*sizeof(float)); cudaMemcpy(feature_flipped_d, features[0], npoints*nfeatures*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**) &feature_d, npoints*nfeatures*sizeof(float)); /* invert the data array (kernel execution) */ invert_mapping<<<num_blocks,num_threads>>>(feature_flipped_d,feature_d,npoints,nfeatures); /* allocate memory for membership_d[] and clusters_d[][] (device) */ cudaMalloc((void**) &membership_d, npoints*sizeof(int)); cudaMalloc((void**) &clusters_d, nclusters*nfeatures*sizeof(float)); #ifdef BLOCK_DELTA_REDUCE // allocate array to hold the per block deltas on the gpu side cudaMalloc((void**) &block_deltas_d, num_blocks_perdim * num_blocks_perdim * sizeof(int)); //cudaMemcpy(block_delta_d, &delta_h, sizeof(int), cudaMemcpyHostToDevice); #endif #ifdef BLOCK_CENTER_REDUCE // allocate memory and copy to card cluster array in which to accumulate center points for the next iteration cudaMalloc((void**) &block_clusters_d, num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float)); //cudaMemcpy(new_clusters_d, new_centers[0], nclusters*nfeatures*sizeof(float), cudaMemcpyHostToDevice); #endif } /* -------------- allocateMemory() end ------------------- */ /* -------------- deallocateMemory() ------------------- */ /* free host and device memory */ extern "C" void deallocateMemory() { free(membership_new); free(block_new_centers); cudaFree(feature_d); cudaFree(feature_flipped_d); cudaFree(membership_d); cudaFree(clusters_d); #ifdef BLOCK_CENTER_REDUCE cudaFree(block_clusters_d); #endif #ifdef BLOCK_DELTA_REDUCE cudaFree(block_deltas_d); #endif } /* -------------- deallocateMemory() end ------------------- */ //////////////////////////////////////////////////////////////////////////////// // Program main // int main( int argc, char** argv) { // make sure we're running on the big card cudaSetDevice(1); // as done in the CUDA start/help document provided setup(argc, argv); } // // //////////////////////////////////////////////////////////////////////////////// /* ------------------- kmeansCuda() ------------------------ */ extern "C" int // delta -- had problems when return value was of float type kmeansCuda(float **feature, /* in: [npoints][nfeatures] */ int nfeatures, /* number of attributes for each point */ int npoints, /* number of data points */ int nclusters, /* number of clusters */ int *membership, /* which cluster the point belongs to */ float **clusters, /* coordinates of cluster centers */ int *new_centers_len, /* number of elements in each cluster */ float **new_centers /* sum of elements in each cluster */ ) { int delta = 0; /* if point has moved */ int i,j; /* counters */ cudaSetDevice(1); /* copy membership (host to device) */ cudaMemcpy(membership_d, membership_new, npoints*sizeof(int), cudaMemcpyHostToDevice); /* copy clusters (host to device) */ cudaMemcpy(clusters_d, clusters[0], nclusters*nfeatures*sizeof(float), cudaMemcpyHostToDevice); /* set up texture */ cudaChannelFormatDesc chDesc0 = cudaCreateChannelDesc<float>(); t_features.filterMode = cudaFilterModePoint; t_features.normalized = false; t_features.channelDesc = chDesc0; if(cudaBindTexture(NULL, &t_features, feature_d, &chDesc0, npoints*nfeatures*sizeof(float)) != CUDA_SUCCESS) printf("Couldn't bind features array to texture!\n"); cudaChannelFormatDesc chDesc1 = cudaCreateChannelDesc<float>(); t_features_flipped.filterMode = cudaFilterModePoint; t_features_flipped.normalized = false; t_features_flipped.channelDesc = chDesc1; if(cudaBindTexture(NULL, &t_features_flipped, feature_flipped_d, &chDesc1, npoints*nfeatures*sizeof(float)) != CUDA_SUCCESS) printf("Couldn't bind features_flipped array to texture!\n"); cudaChannelFormatDesc chDesc2 = cudaCreateChannelDesc<float>(); t_clusters.filterMode = cudaFilterModePoint; t_clusters.normalized = false; t_clusters.channelDesc = chDesc2; if(cudaBindTexture(NULL, &t_clusters, clusters_d, &chDesc2, nclusters*nfeatures*sizeof(float)) != CUDA_SUCCESS) printf("Couldn't bind clusters array to texture!\n"); /* copy clusters to constant memory */ // Fix - modern versions of CUDA SDK use symbol names directly, not strings. //cudaMemcpyToSymbol("c_clusters",clusters[0],nclusters*nfeatures*sizeof(float),0,cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_clusters,clusters[0],nclusters*nfeatures*sizeof(float),0,cudaMemcpyHostToDevice); /* setup execution parameters. changed to 2d (source code on NVIDIA CUDA Programming Guide) */ dim3 grid( num_blocks_perdim, num_blocks_perdim ); dim3 threads( num_threads_perdim*num_threads_perdim ); /* execute the kernel */ kmeansPoint<<< grid, threads >>>( feature_d, nfeatures, npoints, nclusters, membership_d, clusters_d, block_clusters_d, block_deltas_d); cudaThreadSynchronize(); /* copy back membership (device to host) */ cudaMemcpy(membership_new, membership_d, npoints*sizeof(int), cudaMemcpyDeviceToHost); #ifdef BLOCK_CENTER_REDUCE /*** Copy back arrays of per block sums ***/ float * block_clusters_h = (float *) malloc( num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float)); cudaMemcpy(block_clusters_h, block_clusters_d, num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float), cudaMemcpyDeviceToHost); #endif #ifdef BLOCK_DELTA_REDUCE int * block_deltas_h = (int *) malloc( num_blocks_perdim * num_blocks_perdim * sizeof(int)); cudaMemcpy(block_deltas_h, block_deltas_d, num_blocks_perdim * num_blocks_perdim * sizeof(int), cudaMemcpyDeviceToHost); #endif /* for each point, sum data points in each cluster and see if membership has changed: if so, increase delta and change old membership, and update new_centers; otherwise, update new_centers */ delta = 0; for (i = 0; i < npoints; i++) { int cluster_id = membership_new[i]; new_centers_len[cluster_id]++; if (membership_new[i] != membership[i]) { #ifdef CPU_DELTA_REDUCE delta++; #endif membership[i] = membership_new[i]; } #ifdef CPU_CENTER_REDUCE for (j = 0; j < nfeatures; j++) { new_centers[cluster_id][j] += feature[i][j]; } #endif } #ifdef BLOCK_DELTA_REDUCE /*** calculate global sums from per block sums for delta and the new centers ***/ //debug //printf("\t \t reducing %d block sums to global sum \n",num_blocks_perdim * num_blocks_perdim); for(i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) { //printf("block %d delta is %d \n",i,block_deltas_h[i]); delta += block_deltas_h[i]; } #endif #ifdef BLOCK_CENTER_REDUCE for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) { block_new_centers[j*nfeatures + k] = 0.f; } } for(i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) { for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) { block_new_centers[j*nfeatures + k] += block_clusters_h[i * nclusters*nfeatures + j * nfeatures + k]; } } } #ifdef CPU_CENTER_REDUCE //debug /*for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) { if(new_centers[j][k] > 1.001 * block_new_centers[j*nfeatures + k] || new_centers[j][k] < 0.999 * block_new_centers[j*nfeatures + k]) { printf("\t \t for %d:%d, normal value is %e and gpu reduced value id %e \n",j,k,new_centers[j][k],block_new_centers[j*nfeatures + k]); } } }*/ #endif #ifdef BLOCK_CENTER_REDUCE for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) new_centers[j][k]= block_new_centers[j*nfeatures + k]; } #endif #endif return delta; } /* ------------------- kmeansCuda() end ------------------------ */
32b9fad0cef6e07729947ca3ac99be3a56055469.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void blur(unsigned char* input_image, unsigned char* output_image, int width, int height) { const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x; int x = offset % width; int y = (offset - x) / width; int fsize = 5; // Filter size if (offset < width*height) { float output_red = 0; float output_green = 0; float output_blue = 0; int hits = 0; for (int ox = -fsize; ox < fsize + 1; ++ox) { for (int oy = -fsize; oy < fsize + 1; ++oy) { if ((x + ox) > -1 && (x + ox) < width && (y + oy) > -1 && (y + oy) < height) { const int currentoffset = (offset + ox + oy * width) * 3; output_red += input_image[currentoffset]; output_green += input_image[currentoffset + 1]; output_blue += input_image[currentoffset + 2]; hits++; } } } output_image[offset * 3] = output_red / hits; output_image[offset * 3 + 1] = output_green / hits; output_image[offset * 3 + 2] = output_blue / hits; } } void filter(unsigned char* input_image, unsigned char* output_image, int width, int height) { }
32b9fad0cef6e07729947ca3ac99be3a56055469.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void blur(unsigned char* input_image, unsigned char* output_image, int width, int height) { const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x; int x = offset % width; int y = (offset - x) / width; int fsize = 5; // Filter size if (offset < width*height) { float output_red = 0; float output_green = 0; float output_blue = 0; int hits = 0; for (int ox = -fsize; ox < fsize + 1; ++ox) { for (int oy = -fsize; oy < fsize + 1; ++oy) { if ((x + ox) > -1 && (x + ox) < width && (y + oy) > -1 && (y + oy) < height) { const int currentoffset = (offset + ox + oy * width) * 3; output_red += input_image[currentoffset]; output_green += input_image[currentoffset + 1]; output_blue += input_image[currentoffset + 2]; hits++; } } } output_image[offset * 3] = output_red / hits; output_image[offset * 3 + 1] = output_green / hits; output_image[offset * 3 + 2] = output_blue / hits; } } void filter(unsigned char* input_image, unsigned char* output_image, int width, int height) { }
575b86737fe994504e7890dc34d16073a7b49acc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/maxouting.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace math { template <typename T> __global__ void KernelMaxOut(const int nthreads, const T* input_data, const int channels, const int input_height, const int input_width, const int groups, const int axis, T* output_data) { const int size = input_height * input_width * channels / groups; const int feat_len = input_height * input_width; int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { int batch_idx = i / size; int batch_offset = i % size; int channel_idx, feat_idx, data_idx; if (axis == 1) { channel_idx = batch_offset / feat_len; feat_idx = batch_offset % feat_len; data_idx = (batch_idx * size + channel_idx * feat_len) * groups + feat_idx; } else { channel_idx = batch_offset % channels; feat_idx = batch_offset / channels; data_idx = (batch_idx * size + feat_idx * channels + channel_idx) * groups; } T ele = static_cast<T>(-FLT_MAX); for (int g = 0; g < groups; ++g) { int idx_offset = (axis == 1 ? g * feat_len : g); T x = input_data[data_idx + idx_offset]; ele = ele > x ? ele : x; } output_data[i] = ele; } } template <typename T> __global__ void KernelMaxoutGrad(const int nthreads, const T* input_data, const T* output_data, const T* output_grad, T* input_grad, const int channels, const int input_height, const int input_width, const int groups, const int axis) { const int size = input_height * input_width * channels / groups; const int feat_len = input_height * input_width; int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { int batch_idx = i / size; int batch_offset = i % size; int channel_idx, feat_idx, data_idx; if (axis == 1) { channel_idx = batch_offset / feat_len; feat_idx = batch_offset % feat_len; data_idx = (batch_idx * size + channel_idx * feat_len) * groups + feat_idx; } else { channel_idx = batch_offset % channels; feat_idx = batch_offset / channels; data_idx = (batch_idx * size + feat_idx * channels + channel_idx) * groups; } int max_index = -1; bool continue_match = true; for (int g = 0; g < groups && continue_match; ++g) { int idx_offset = (axis == 1 ? g * feat_len : g); if (input_data[data_idx + idx_offset] == output_data[i]) { max_index = data_idx + idx_offset; continue_match = false; break; } } if (max_index != -1) { input_grad[max_index] += output_grad[index]; } } } /* * All tensors are in NCHW or NHWC format. */ template <typename T> class MaxOutFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, framework::Tensor* output, const int groups, const int axis) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[axis]; const int input_height = (axis == 1 ? input.dims()[2] : input.dims()[1]); const int input_width = (axis == 1 ? input.dims()[3] : input.dims()[2]); const int output_channels = output->dims()[axis]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = output->numel(); int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxOut<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_height, input_width, groups, axis, output_data); } }; /* * All tensors are in NCHW or NHWC format. */ template <typename T> class MaxOutGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, framework::Tensor* input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, const int groups, const int axis) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[axis]; const int input_height = (axis == 1 ? input.dims()[2] : input.dims()[1]); const int input_width = (axis == 1 ? input.dims()[3] : input.dims()[2]); const int output_channels = output.dims()[axis]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = output.numel(); int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxoutGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_grad_data, input_channels, input_height, input_width, groups, axis); } }; template class MaxOutGradFunctor<platform::CUDADeviceContext, float>; template class MaxOutGradFunctor<platform::CUDADeviceContext, double>; template class MaxOutFunctor<platform::CUDADeviceContext, float>; template class MaxOutFunctor<platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
575b86737fe994504e7890dc34d16073a7b49acc.cu
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/maxouting.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace math { template <typename T> __global__ void KernelMaxOut(const int nthreads, const T* input_data, const int channels, const int input_height, const int input_width, const int groups, const int axis, T* output_data) { const int size = input_height * input_width * channels / groups; const int feat_len = input_height * input_width; int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { int batch_idx = i / size; int batch_offset = i % size; int channel_idx, feat_idx, data_idx; if (axis == 1) { channel_idx = batch_offset / feat_len; feat_idx = batch_offset % feat_len; data_idx = (batch_idx * size + channel_idx * feat_len) * groups + feat_idx; } else { channel_idx = batch_offset % channels; feat_idx = batch_offset / channels; data_idx = (batch_idx * size + feat_idx * channels + channel_idx) * groups; } T ele = static_cast<T>(-FLT_MAX); for (int g = 0; g < groups; ++g) { int idx_offset = (axis == 1 ? g * feat_len : g); T x = input_data[data_idx + idx_offset]; ele = ele > x ? ele : x; } output_data[i] = ele; } } template <typename T> __global__ void KernelMaxoutGrad(const int nthreads, const T* input_data, const T* output_data, const T* output_grad, T* input_grad, const int channels, const int input_height, const int input_width, const int groups, const int axis) { const int size = input_height * input_width * channels / groups; const int feat_len = input_height * input_width; int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { int batch_idx = i / size; int batch_offset = i % size; int channel_idx, feat_idx, data_idx; if (axis == 1) { channel_idx = batch_offset / feat_len; feat_idx = batch_offset % feat_len; data_idx = (batch_idx * size + channel_idx * feat_len) * groups + feat_idx; } else { channel_idx = batch_offset % channels; feat_idx = batch_offset / channels; data_idx = (batch_idx * size + feat_idx * channels + channel_idx) * groups; } int max_index = -1; bool continue_match = true; for (int g = 0; g < groups && continue_match; ++g) { int idx_offset = (axis == 1 ? g * feat_len : g); if (input_data[data_idx + idx_offset] == output_data[i]) { max_index = data_idx + idx_offset; continue_match = false; break; } } if (max_index != -1) { input_grad[max_index] += output_grad[index]; } } } /* * All tensors are in NCHW or NHWC format. */ template <typename T> class MaxOutFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, framework::Tensor* output, const int groups, const int axis) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[axis]; const int input_height = (axis == 1 ? input.dims()[2] : input.dims()[1]); const int input_width = (axis == 1 ? input.dims()[3] : input.dims()[2]); const int output_channels = output->dims()[axis]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = output->numel(); int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxOut<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_height, input_width, groups, axis, output_data); } }; /* * All tensors are in NCHW or NHWC format. */ template <typename T> class MaxOutGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, framework::Tensor* input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, const int groups, const int axis) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[axis]; const int input_height = (axis == 1 ? input.dims()[2] : input.dims()[1]); const int input_width = (axis == 1 ? input.dims()[3] : input.dims()[2]); const int output_channels = output.dims()[axis]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = output.numel(); int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxoutGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_grad_data, input_channels, input_height, input_width, groups, axis); } }; template class MaxOutGradFunctor<platform::CUDADeviceContext, float>; template class MaxOutGradFunctor<platform::CUDADeviceContext, double>; template class MaxOutFunctor<platform::CUDADeviceContext, float>; template class MaxOutFunctor<platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
75ecbd279ab6cf6e5da4b0776de5dde0ed2d46a1.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/conv_grad_grad_kernel.h" #include "paddle/fluid/framework/eigen.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" #ifdef PADDLE_WITH_HIP #include "paddle/fluid/operators/conv_miopen_helper.h" #else #include "paddle/fluid/operators/conv_cudnn_helper.h" #endif #include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/phi/common/bfloat16.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/kernels/cpu/conv_util.h" #include "paddle/phi/kernels/funcs/batch_norm_utils.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/padding.h" #include "paddle/phi/kernels/impl/conv_cudnn_impl.h" namespace phi { template <typename T, typename Context> void ConvCudnnGradGradKernel( const Context& ctx, const DenseTensor& input, const DenseTensor& filter, const DenseTensor& out_grad, const paddle::optional<DenseTensor>& input_grad_grad, const paddle::optional<DenseTensor>& filter_grad_grad, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, DenseTensor* input_grad, DenseTensor* filter_grad, DenseTensor* out_grad_grad) { auto X = &input; auto W = &filter; auto dO = &out_grad; auto ddX = input_grad_grad.get_ptr(); auto ddW = filter_grad_grad.get_ptr(); auto ddO = out_grad_grad; auto dW = filter_grad; auto dX = input_grad; if (ddO) { ctx.template Alloc<T>(ddO); phi::funcs::SetConstant<Context, T> set_zero; set_zero(ctx, ddO, static_cast<T>(0)); } if (dW) { ctx.template Alloc<T>(dW); } if (dX) { ctx.template Alloc<T>(dX); } // const T* x = X->data<T>(); const T* dy = dO->data<T>(); const T* w = W->data<T>(); const T* ddx = nullptr; const T* ddw = nullptr; T *dw, *dx, *ddy; dw = dx = ddy = nullptr; T* transformed_dx = nullptr; std::vector<int> dilations = dilations_t; bool exhaustive_search = FLAGS_cudnn_exhaustive_search || exhaustive_search_t; bool deterministic = FLAGS_cudnn_deterministic; auto exhaustive_deterministic = exhaustive_search && deterministic; PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, phi::errors::InvalidArgument( "Cann't set exhaustive_search True and " "FLAGS_cudnn_deterministic True at same time.")); std::vector<int> paddings = paddings_t; const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); // transform Tensors to channel first----------- DenseTensor transformed_X_channel(X->type()); DenseTensor transformed_dO_channel(dO->type()); DenseTensor transformed_ddX_channel(X->type()); DenseTensor transformed_ddO_channel(dO->type()); DenseTensor transformed_dX_channel(X->type()); if (channel_last) { ResizeToChannelFirst<Context, T>(ctx, X, &transformed_X_channel); TransToChannelFirst<Context, T>(ctx, X, &transformed_X_channel); ResizeToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel); TransToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel); if (ddX) { ResizeToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel); TransToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel); } if (ddO) { ResizeToChannelFirst<Context, T>(ctx, ddO, &transformed_ddO_channel); } if (dX) { ResizeToChannelFirst<Context, T>(ctx, dX, &transformed_dX_channel); ctx.template Alloc<T>(&transformed_dX_channel); } } else { transformed_X_channel = *X; transformed_dO_channel = *dO; if (ddX) { transformed_ddX_channel = *ddX; } if (ddO) { transformed_ddO_channel.ShareDataWith(*ddO); } if (dX) { transformed_dX_channel.ShareDataWith(*dX); } } auto in_dims = transformed_X_channel.dims(); auto filter_dims = W->dims(); DDim in_data_dims = slice_ddim(in_dims, 2, in_dims.size()); DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); std::vector<int> ksize = vectorize<int>(filter_data_dims); UpdatePaddingAndDilation( &paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = funcs::IsSymmetricPadding(paddings, data_dim); DenseTensor transformed_X(X->type()); DenseTensor transformed_ddX(X->type()); DenseTensor transformed_dX(X->type()); std::vector<int> padding_common(data_dim, 0); std::vector<int> input_pad(X->dims().size() * 2, 0); if (!is_sys_pad) { // get pad std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = transformed_X_channel.dims()[0]; new_input_shape_vec[1] = transformed_X_channel.dims()[1]; for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]); new_input_shape_vec[i + 2] = transformed_X_channel.dims()[i + 2] + padding_diff[i]; input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } DDim new_input_shape(make_ddim(new_input_shape_vec)); transformed_X.Resize(new_input_shape); transformed_ddX.Resize(new_input_shape); transformed_dX.Resize(new_input_shape); ctx.template Alloc<T>(&transformed_X); if (ddX) { ctx.template Alloc<T>(&transformed_ddX); } if (dX) { ctx.template Alloc<T>(&transformed_dX); } // pad for input const int rank = X->dims().size(); T pad_value(0.0); switch (rank) { case 4: { funcs::PadFunction<Context, T, 4>( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); if (ddX) { funcs::PadFunction<Context, T, 4>(ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } } break; case 5: { funcs::PadFunction<Context, T, 5>( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); if (ddX) { funcs::PadFunction<Context, T, 5>(ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } } break; default: PADDLE_THROW(phi::errors::InvalidArgument( "ConvOp only support tensors with 4 or 5 dimensions.")); } } else { transformed_X.ShareDataWith(transformed_X_channel); if (ddX) { transformed_ddX.ShareDataWith(transformed_ddX_channel); } if (dX) { transformed_dX.ShareDataWith(transformed_dX_channel); } if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* x = transformed_X.data<T>(); int iwo_group = groups; int c_group = 1; #if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) iwo_group = 1; c_group = groups; groups = 1; #endif auto dtype = paddle::platform::CudnnDataType<T>::type; auto handle = ctx.cudnn_handle(); paddle::operators::ConvArgs args1{&transformed_ddX, W, &transformed_ddO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args2{&transformed_X, ddW, &transformed_ddO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args3{&transformed_ddX, dW, &transformed_dO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args4{&transformed_dX, ddW, &transformed_dO_channel, strides, padding_common, dilations, dtype}; #ifdef PADDLE_WITH_HIP paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result1; paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result2; paddle::operators::SearchResult<miopenConvBwdDataAlgorithm_t> data_result; paddle::operators::SearchResult<miopenConvBwdWeightsAlgorithm_t> filter_result; #else paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result1; paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result2; paddle::operators::SearchResult<cudnnConvolutionBwdDataAlgo_t> data_result; paddle::operators::SearchResult<cudnnConvolutionBwdFilterAlgo_t> filter_result; #endif auto layout = paddle::platform::GetCudnnTensorFormat( paddle::platform::DataLayout::kNCHW); // ddo = conv(ddI, W) + conv(I, ddW) size_t workspace_size = 0; T* transformed_ddy_channel = nullptr; if (ddO) { ddy = ddO->data<T>(); transformed_ddy_channel = transformed_ddO_channel.data<T>(); if (ddX) { args1.handle = handle; args1.idesc.set(transformed_ddX, iwo_group); args1.wdesc.set(*W, layout, iwo_group); args1.odesc.set(transformed_ddO_channel, iwo_group); args1.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search1 = paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = search1::GetWorkspaceSize(args1); fwd_result1.algo = search1::Find<T>( args1, exhaustive_search, false, workspace_size, ctx); #else using search1 = paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_result1 = search1::Find<T>(args1, exhaustive_search, false, ctx); workspace_size = search1::GetWorkspaceSize(args1, fwd_result1.algo); #endif } if (ddW) { ddw = ddW->data<T>(); args2.handle = handle; args2.idesc.set(transformed_X, iwo_group); args2.wdesc.set(*ddW, layout, iwo_group); args2.odesc.set(transformed_ddO_channel, iwo_group); args2.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search2 = paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = ::max(workspace_size, search2::GetWorkspaceSize(args2)); fwd_result2.algo = search2::Find<T>( args2, exhaustive_search, false, workspace_size, ctx); #else using search2 = paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_result2 = search2::Find<T>(args2, exhaustive_search, false, ctx); workspace_size = ::max( workspace_size, search2::GetWorkspaceSize(args2, fwd_result2.algo)); #endif } } if (dW && ddX) { dw = dW->data<T>(); args3.handle = handle; args3.idesc.set(transformed_ddX, iwo_group); args3.wdesc.set(*dW, layout, iwo_group); args3.odesc.set(transformed_dO_channel, iwo_group); args3.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search3 = paddle::operators::SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>; workspace_size = ::max(workspace_size, search3::GetWorkspaceSize(args3)); filter_result.algo = search3::Find<T>( args3, exhaustive_search, deterministic, workspace_size, ctx); #else using search3 = paddle::operators::SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>; filter_result = search3::Find<T>(args3, exhaustive_search, deterministic, ctx); workspace_size = ::max( workspace_size, search3::GetWorkspaceSize(args3, filter_result.algo)); #endif } if (ddW && dX) { transformed_dx = transformed_dX.data<T>(); args4.handle = handle; args4.idesc.set(transformed_dX, iwo_group); args4.wdesc.set(*ddW, layout, iwo_group); args4.odesc.set(transformed_dO_channel, iwo_group); args4.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search4 = paddle::operators::SearchAlgorithm<miopenConvBwdDataAlgorithm_t>; workspace_size = ::max(workspace_size, search4::GetWorkspaceSize(args4)); data_result.algo = search4::Find<T>( args4, exhaustive_search, deterministic, workspace_size, ctx); #else using search4 = paddle::operators::SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>; data_result = search4::Find<T>(args4, exhaustive_search, deterministic, ctx); workspace_size = ::max( workspace_size, search4::GetWorkspaceSize(args4, data_result.algo)); #endif } int i_n, i_c, i_d, i_h, i_w; GetNCDHW( transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w); int o_n, o_c, o_d, o_h, o_w; GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d, &o_h, &o_w); int group_offset_in = i_c / groups * i_h * i_w * i_d; int group_offset_out = o_c / groups * o_h * o_w * o_d; int group_offset_filter = W->numel() / groups; paddle::operators::ScalingParamType<T> alpha = 1.0f; paddle::operators::ScalingParamType<T> beta = 0.0f; // NOTE(zhiqiu): inplace addto is not supportted in double grad yet. // ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : // 0.0f; // VLOG(4) << "Conv_grad_grad: use_addto = " << ctx.Attr<bool>("use_addto"); auto wkspace_handle = ctx.cudnn_workspace_handle(); if (ddO) { if (ddX) { ddx = transformed_ddX.data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx, args1.wdesc.desc(), w, args1.cdesc.desc(), fwd_result1.algo, &beta, args1.odesc.desc(), transformed_ddy_channel, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx + i * group_offset_in, args1.wdesc.desc(), w + i * group_offset_filter, args1.cdesc.desc(), fwd_result1.algo, workspace_ptr, workspace_size, &beta, args1.odesc.desc(), transformed_ddy_channel + i * group_offset_out)); }, workspace_size); } #endif } if (ddW) { #ifdef PADDLE_WITH_HIP // MIOPEN ONLY support beta to be 0.0f wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionForward( handle, &alpha, args2.idesc.desc(), x, args2.wdesc.desc(), ddw, args2.cdesc.desc(), fwd_result2.algo, &beta, args2.odesc.desc(), transformed_ddy_channel, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionForward( handle, &alpha, args2.idesc.desc(), x + i * group_offset_in, args2.wdesc.desc(), ddw + i * group_offset_filter, args2.cdesc.desc(), fwd_result2.algo, workspace_ptr, workspace_size, &alpha, args2.odesc.desc(), transformed_ddy_channel + i * group_offset_out)); }, workspace_size); } #endif } if (channel_last) { TransToChannelLast<Context, T>(ctx, &transformed_ddO_channel, ddO); } } T* transformed_dy_channel = transformed_dO_channel.data<T>(); if (dW && ddX) { ddx = transformed_ddX.data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionBackwardWeights( handle, &alpha, args3.odesc.desc(), transformed_dy_channel, args3.idesc.desc(), ddx, args3.cdesc.desc(), filter_result.algo, &beta, args3.wdesc.desc(), dw, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, args3.idesc.desc(), ddx + i * group_offset_in, args3.odesc.desc(), transformed_dy_channel + i * group_offset_out, args3.cdesc.desc(), filter_result.algo, workspace_ptr, workspace_size, &beta, args3.wdesc.desc(), dw + i * group_offset_filter)); }, workspace_size); } #endif } if (dX && ddW) { ddw = ddW->data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionBackwardData( handle, &alpha, args4.odesc.desc(), transformed_dy_channel, args4.wdesc.desc(), ddw, args4.cdesc.desc(), data_result.algo, &beta, args4.idesc.desc(), transformed_dx, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, args4.wdesc.desc(), ddw + i * group_offset_filter, args4.odesc.desc(), transformed_dy_channel + i * group_offset_out, args4.cdesc.desc(), data_result.algo, workspace_ptr, workspace_size, &beta, args4.idesc.desc(), transformed_dx + i * group_offset_in)); }, workspace_size); } #endif if (!is_sys_pad) { // reverse padded input std::vector<int> starts(X->dims().size(), 0); std::vector<int> axes(X->dims().size(), 0); for (size_t i = 0; i < X->dims().size(); ++i) { starts[i] = input_pad[2 * i]; axes[i] = i; } if (X->dims().size() == 4) { paddle::operators::RemovePaddingSlice<Context, T, 4>( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } else { paddle::operators::RemovePaddingSlice<Context, T, 5>( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } } if (channel_last) { TransToChannelLast<Context, T>(ctx, &transformed_dX_channel, dX); } } } template <typename T, typename Context> void DepthwiseConvDoubleGradGPUDNNKernel( const Context& ctx, const DenseTensor& input, const DenseTensor& filter, const DenseTensor& out_grad, const paddle::optional<DenseTensor>& input_grad_grad, const paddle::optional<DenseTensor>& filter_grad_grad, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, bool fuse_relu, DenseTensor* input_grad, DenseTensor* filter_grad, DenseTensor* out_grad_grad) { ConvCudnnGradGradKernel<T>(ctx, input, filter, out_grad, input_grad_grad, filter_grad_grad, strides, paddings_t, padding_algorithm, groups, dilations_t, data_format, use_addto, workspace_size_MB, exhaustive_search_t, input_grad, filter_grad, out_grad_grad); } template <typename T, typename Context> void Conv3DCudnnGradGradKernel( const Context& ctx, const DenseTensor& input, const DenseTensor& filter, const DenseTensor& out_grad, const paddle::optional<DenseTensor>& input_grad_grad, const paddle::optional<DenseTensor>& filter_grad_grad, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, DenseTensor* input_grad, DenseTensor* filter_grad, DenseTensor* out_grad_grad) { ConvCudnnGradGradKernel<T>(ctx, input, filter, out_grad, input_grad_grad, filter_grad_grad, strides, paddings_t, padding_algorithm, groups, dilations_t, data_format, use_addto, workspace_size_MB, exhaustive_search_t, input_grad, filter_grad, out_grad_grad); } } // namespace phi #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvDoubleGradGPUDNNKernel, float, phi::dtype::float16) {} #else #if CUDNN_VERSION_MIN(8, 1, 0) PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvDoubleGradGPUDNNKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} #else PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, double, phi::dtype::float16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, double, phi::dtype::float16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvDoubleGradGPUDNNKernel, float, double, phi::dtype::float16) {} #endif #endif
75ecbd279ab6cf6e5da4b0776de5dde0ed2d46a1.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/conv_grad_grad_kernel.h" #include "paddle/fluid/framework/eigen.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" #ifdef PADDLE_WITH_HIP #include "paddle/fluid/operators/conv_miopen_helper.h" #else #include "paddle/fluid/operators/conv_cudnn_helper.h" #endif #include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/phi/common/bfloat16.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/kernels/cpu/conv_util.h" #include "paddle/phi/kernels/funcs/batch_norm_utils.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/padding.h" #include "paddle/phi/kernels/impl/conv_cudnn_impl.h" namespace phi { template <typename T, typename Context> void ConvCudnnGradGradKernel( const Context& ctx, const DenseTensor& input, const DenseTensor& filter, const DenseTensor& out_grad, const paddle::optional<DenseTensor>& input_grad_grad, const paddle::optional<DenseTensor>& filter_grad_grad, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, DenseTensor* input_grad, DenseTensor* filter_grad, DenseTensor* out_grad_grad) { auto X = &input; auto W = &filter; auto dO = &out_grad; auto ddX = input_grad_grad.get_ptr(); auto ddW = filter_grad_grad.get_ptr(); auto ddO = out_grad_grad; auto dW = filter_grad; auto dX = input_grad; if (ddO) { ctx.template Alloc<T>(ddO); phi::funcs::SetConstant<Context, T> set_zero; set_zero(ctx, ddO, static_cast<T>(0)); } if (dW) { ctx.template Alloc<T>(dW); } if (dX) { ctx.template Alloc<T>(dX); } // const T* x = X->data<T>(); const T* dy = dO->data<T>(); const T* w = W->data<T>(); const T* ddx = nullptr; const T* ddw = nullptr; T *dw, *dx, *ddy; dw = dx = ddy = nullptr; T* transformed_dx = nullptr; std::vector<int> dilations = dilations_t; bool exhaustive_search = FLAGS_cudnn_exhaustive_search || exhaustive_search_t; bool deterministic = FLAGS_cudnn_deterministic; auto exhaustive_deterministic = exhaustive_search && deterministic; PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, phi::errors::InvalidArgument( "Cann't set exhaustive_search True and " "FLAGS_cudnn_deterministic True at same time.")); std::vector<int> paddings = paddings_t; const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); // transform Tensors to channel first----------- DenseTensor transformed_X_channel(X->type()); DenseTensor transformed_dO_channel(dO->type()); DenseTensor transformed_ddX_channel(X->type()); DenseTensor transformed_ddO_channel(dO->type()); DenseTensor transformed_dX_channel(X->type()); if (channel_last) { ResizeToChannelFirst<Context, T>(ctx, X, &transformed_X_channel); TransToChannelFirst<Context, T>(ctx, X, &transformed_X_channel); ResizeToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel); TransToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel); if (ddX) { ResizeToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel); TransToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel); } if (ddO) { ResizeToChannelFirst<Context, T>(ctx, ddO, &transformed_ddO_channel); } if (dX) { ResizeToChannelFirst<Context, T>(ctx, dX, &transformed_dX_channel); ctx.template Alloc<T>(&transformed_dX_channel); } } else { transformed_X_channel = *X; transformed_dO_channel = *dO; if (ddX) { transformed_ddX_channel = *ddX; } if (ddO) { transformed_ddO_channel.ShareDataWith(*ddO); } if (dX) { transformed_dX_channel.ShareDataWith(*dX); } } auto in_dims = transformed_X_channel.dims(); auto filter_dims = W->dims(); DDim in_data_dims = slice_ddim(in_dims, 2, in_dims.size()); DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); std::vector<int> ksize = vectorize<int>(filter_data_dims); UpdatePaddingAndDilation( &paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = funcs::IsSymmetricPadding(paddings, data_dim); DenseTensor transformed_X(X->type()); DenseTensor transformed_ddX(X->type()); DenseTensor transformed_dX(X->type()); std::vector<int> padding_common(data_dim, 0); std::vector<int> input_pad(X->dims().size() * 2, 0); if (!is_sys_pad) { // get pad std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = transformed_X_channel.dims()[0]; new_input_shape_vec[1] = transformed_X_channel.dims()[1]; for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]); new_input_shape_vec[i + 2] = transformed_X_channel.dims()[i + 2] + padding_diff[i]; input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } DDim new_input_shape(make_ddim(new_input_shape_vec)); transformed_X.Resize(new_input_shape); transformed_ddX.Resize(new_input_shape); transformed_dX.Resize(new_input_shape); ctx.template Alloc<T>(&transformed_X); if (ddX) { ctx.template Alloc<T>(&transformed_ddX); } if (dX) { ctx.template Alloc<T>(&transformed_dX); } // pad for input const int rank = X->dims().size(); T pad_value(0.0); switch (rank) { case 4: { funcs::PadFunction<Context, T, 4>( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); if (ddX) { funcs::PadFunction<Context, T, 4>(ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } } break; case 5: { funcs::PadFunction<Context, T, 5>( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); if (ddX) { funcs::PadFunction<Context, T, 5>(ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } } break; default: PADDLE_THROW(phi::errors::InvalidArgument( "ConvOp only support tensors with 4 or 5 dimensions.")); } } else { transformed_X.ShareDataWith(transformed_X_channel); if (ddX) { transformed_ddX.ShareDataWith(transformed_ddX_channel); } if (dX) { transformed_dX.ShareDataWith(transformed_dX_channel); } if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* x = transformed_X.data<T>(); int iwo_group = groups; int c_group = 1; #if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) iwo_group = 1; c_group = groups; groups = 1; #endif auto dtype = paddle::platform::CudnnDataType<T>::type; auto handle = ctx.cudnn_handle(); paddle::operators::ConvArgs args1{&transformed_ddX, W, &transformed_ddO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args2{&transformed_X, ddW, &transformed_ddO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args3{&transformed_ddX, dW, &transformed_dO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args4{&transformed_dX, ddW, &transformed_dO_channel, strides, padding_common, dilations, dtype}; #ifdef PADDLE_WITH_HIP paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result1; paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result2; paddle::operators::SearchResult<miopenConvBwdDataAlgorithm_t> data_result; paddle::operators::SearchResult<miopenConvBwdWeightsAlgorithm_t> filter_result; #else paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result1; paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result2; paddle::operators::SearchResult<cudnnConvolutionBwdDataAlgo_t> data_result; paddle::operators::SearchResult<cudnnConvolutionBwdFilterAlgo_t> filter_result; #endif auto layout = paddle::platform::GetCudnnTensorFormat( paddle::platform::DataLayout::kNCHW); // ddo = conv(ddI, W) + conv(I, ddW) size_t workspace_size = 0; T* transformed_ddy_channel = nullptr; if (ddO) { ddy = ddO->data<T>(); transformed_ddy_channel = transformed_ddO_channel.data<T>(); if (ddX) { args1.handle = handle; args1.idesc.set(transformed_ddX, iwo_group); args1.wdesc.set(*W, layout, iwo_group); args1.odesc.set(transformed_ddO_channel, iwo_group); args1.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search1 = paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = search1::GetWorkspaceSize(args1); fwd_result1.algo = search1::Find<T>( args1, exhaustive_search, false, workspace_size, ctx); #else using search1 = paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_result1 = search1::Find<T>(args1, exhaustive_search, false, ctx); workspace_size = search1::GetWorkspaceSize(args1, fwd_result1.algo); #endif } if (ddW) { ddw = ddW->data<T>(); args2.handle = handle; args2.idesc.set(transformed_X, iwo_group); args2.wdesc.set(*ddW, layout, iwo_group); args2.odesc.set(transformed_ddO_channel, iwo_group); args2.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search2 = paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = std::max(workspace_size, search2::GetWorkspaceSize(args2)); fwd_result2.algo = search2::Find<T>( args2, exhaustive_search, false, workspace_size, ctx); #else using search2 = paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_result2 = search2::Find<T>(args2, exhaustive_search, false, ctx); workspace_size = std::max( workspace_size, search2::GetWorkspaceSize(args2, fwd_result2.algo)); #endif } } if (dW && ddX) { dw = dW->data<T>(); args3.handle = handle; args3.idesc.set(transformed_ddX, iwo_group); args3.wdesc.set(*dW, layout, iwo_group); args3.odesc.set(transformed_dO_channel, iwo_group); args3.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search3 = paddle::operators::SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>; workspace_size = std::max(workspace_size, search3::GetWorkspaceSize(args3)); filter_result.algo = search3::Find<T>( args3, exhaustive_search, deterministic, workspace_size, ctx); #else using search3 = paddle::operators::SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>; filter_result = search3::Find<T>(args3, exhaustive_search, deterministic, ctx); workspace_size = std::max( workspace_size, search3::GetWorkspaceSize(args3, filter_result.algo)); #endif } if (ddW && dX) { transformed_dx = transformed_dX.data<T>(); args4.handle = handle; args4.idesc.set(transformed_dX, iwo_group); args4.wdesc.set(*ddW, layout, iwo_group); args4.odesc.set(transformed_dO_channel, iwo_group); args4.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search4 = paddle::operators::SearchAlgorithm<miopenConvBwdDataAlgorithm_t>; workspace_size = std::max(workspace_size, search4::GetWorkspaceSize(args4)); data_result.algo = search4::Find<T>( args4, exhaustive_search, deterministic, workspace_size, ctx); #else using search4 = paddle::operators::SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>; data_result = search4::Find<T>(args4, exhaustive_search, deterministic, ctx); workspace_size = std::max( workspace_size, search4::GetWorkspaceSize(args4, data_result.algo)); #endif } int i_n, i_c, i_d, i_h, i_w; GetNCDHW( transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w); int o_n, o_c, o_d, o_h, o_w; GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d, &o_h, &o_w); int group_offset_in = i_c / groups * i_h * i_w * i_d; int group_offset_out = o_c / groups * o_h * o_w * o_d; int group_offset_filter = W->numel() / groups; paddle::operators::ScalingParamType<T> alpha = 1.0f; paddle::operators::ScalingParamType<T> beta = 0.0f; // NOTE(zhiqiu): inplace addto is not supportted in double grad yet. // ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : // 0.0f; // VLOG(4) << "Conv_grad_grad: use_addto = " << ctx.Attr<bool>("use_addto"); auto wkspace_handle = ctx.cudnn_workspace_handle(); if (ddO) { if (ddX) { ddx = transformed_ddX.data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx, args1.wdesc.desc(), w, args1.cdesc.desc(), fwd_result1.algo, &beta, args1.odesc.desc(), transformed_ddy_channel, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx + i * group_offset_in, args1.wdesc.desc(), w + i * group_offset_filter, args1.cdesc.desc(), fwd_result1.algo, workspace_ptr, workspace_size, &beta, args1.odesc.desc(), transformed_ddy_channel + i * group_offset_out)); }, workspace_size); } #endif } if (ddW) { #ifdef PADDLE_WITH_HIP // MIOPEN ONLY support beta to be 0.0f wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionForward( handle, &alpha, args2.idesc.desc(), x, args2.wdesc.desc(), ddw, args2.cdesc.desc(), fwd_result2.algo, &beta, args2.odesc.desc(), transformed_ddy_channel, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionForward( handle, &alpha, args2.idesc.desc(), x + i * group_offset_in, args2.wdesc.desc(), ddw + i * group_offset_filter, args2.cdesc.desc(), fwd_result2.algo, workspace_ptr, workspace_size, &alpha, args2.odesc.desc(), transformed_ddy_channel + i * group_offset_out)); }, workspace_size); } #endif } if (channel_last) { TransToChannelLast<Context, T>(ctx, &transformed_ddO_channel, ddO); } } T* transformed_dy_channel = transformed_dO_channel.data<T>(); if (dW && ddX) { ddx = transformed_ddX.data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionBackwardWeights( handle, &alpha, args3.odesc.desc(), transformed_dy_channel, args3.idesc.desc(), ddx, args3.cdesc.desc(), filter_result.algo, &beta, args3.wdesc.desc(), dw, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, args3.idesc.desc(), ddx + i * group_offset_in, args3.odesc.desc(), transformed_dy_channel + i * group_offset_out, args3.cdesc.desc(), filter_result.algo, workspace_ptr, workspace_size, &beta, args3.wdesc.desc(), dw + i * group_offset_filter)); }, workspace_size); } #endif } if (dX && ddW) { ddw = ddW->data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionBackwardData( handle, &alpha, args4.odesc.desc(), transformed_dy_channel, args4.wdesc.desc(), ddw, args4.cdesc.desc(), data_result.algo, &beta, args4.idesc.desc(), transformed_dx, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, args4.wdesc.desc(), ddw + i * group_offset_filter, args4.odesc.desc(), transformed_dy_channel + i * group_offset_out, args4.cdesc.desc(), data_result.algo, workspace_ptr, workspace_size, &beta, args4.idesc.desc(), transformed_dx + i * group_offset_in)); }, workspace_size); } #endif if (!is_sys_pad) { // reverse padded input std::vector<int> starts(X->dims().size(), 0); std::vector<int> axes(X->dims().size(), 0); for (size_t i = 0; i < X->dims().size(); ++i) { starts[i] = input_pad[2 * i]; axes[i] = i; } if (X->dims().size() == 4) { paddle::operators::RemovePaddingSlice<Context, T, 4>( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } else { paddle::operators::RemovePaddingSlice<Context, T, 5>( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } } if (channel_last) { TransToChannelLast<Context, T>(ctx, &transformed_dX_channel, dX); } } } template <typename T, typename Context> void DepthwiseConvDoubleGradGPUDNNKernel( const Context& ctx, const DenseTensor& input, const DenseTensor& filter, const DenseTensor& out_grad, const paddle::optional<DenseTensor>& input_grad_grad, const paddle::optional<DenseTensor>& filter_grad_grad, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, bool fuse_relu, DenseTensor* input_grad, DenseTensor* filter_grad, DenseTensor* out_grad_grad) { ConvCudnnGradGradKernel<T>(ctx, input, filter, out_grad, input_grad_grad, filter_grad_grad, strides, paddings_t, padding_algorithm, groups, dilations_t, data_format, use_addto, workspace_size_MB, exhaustive_search_t, input_grad, filter_grad, out_grad_grad); } template <typename T, typename Context> void Conv3DCudnnGradGradKernel( const Context& ctx, const DenseTensor& input, const DenseTensor& filter, const DenseTensor& out_grad, const paddle::optional<DenseTensor>& input_grad_grad, const paddle::optional<DenseTensor>& filter_grad_grad, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, DenseTensor* input_grad, DenseTensor* filter_grad, DenseTensor* out_grad_grad) { ConvCudnnGradGradKernel<T>(ctx, input, filter, out_grad, input_grad_grad, filter_grad_grad, strides, paddings_t, padding_algorithm, groups, dilations_t, data_format, use_addto, workspace_size_MB, exhaustive_search_t, input_grad, filter_grad, out_grad_grad); } } // namespace phi #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvDoubleGradGPUDNNKernel, float, phi::dtype::float16) {} #else #if CUDNN_VERSION_MIN(8, 1, 0) PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvDoubleGradGPUDNNKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} #else PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, double, phi::dtype::float16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, double, phi::dtype::float16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvDoubleGradGPUDNNKernel, float, double, phi::dtype::float16) {} #endif #endif
d14a89d005473282ea2a9e1e2c2589b479c701f9.hip
// !!! This is a file automatically generated by hipify!!! #include<utility> #include<stdio.h> #include<assert.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <ostream> #include <iostream> #include <cstdio> #include <cstdlib> #include <cmath> #include <fstream> #include <omp.h> #include <time.h> #include <string.h> #include <utility> __constant__ float matrixElementsGpu[5]; enum method_type { JACOBI, GS, SOR }; __device__ float jacobi(const float leftMatrix, const float centerMatrix, const float rightMatrix, const float topMatrix, const float bottomMatrix, const float leftX, const float centerX, const float rightX, const float topX, const float bottomX, const float centerRhs) { float result = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix; return result; } template <typename method_type> __host__ __device__ float iterativeOperation(const float leftMatrix, const float centerMatrix, const float rightMatrix, const float topMatrix, const float bottomMatrix, float leftX, float centerX, float rightX, float topX, float bottomX, const float centerRhs, int gridPoint, method_type method) { float gridValue = centerX; switch(method) { case JACOBI: return gridValue = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix; case GS: if (gridPoint % 2 == 1) { return gridValue = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix; } case SOR: float relaxation = 1.9939; if (gridPoint % 2 == 1) { return gridValue = relaxation*((centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix) + (1.0-relaxation)*centerX; } } return gridValue; } template <typename method_type> __host__ __device__ float iterativeOperation2(const float leftMatrix, const float centerMatrix, const float rightMatrix, const float topMatrix, const float bottomMatrix, float leftX, float centerX, float rightX, float topX, float bottomX, const float centerRhs, int gridPoint, method_type method) { float gridValue = centerX; switch(method) { case JACOBI: return gridValue = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix; case GS: if (gridPoint % 2 == 0) { return gridValue = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix; } case SOR: float relaxation = 1.9939; if (gridPoint % 2 == 0) { return gridValue = relaxation*((centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix) + (1.0-relaxation)*centerX; } } return gridValue; } __host__ __device__ void boundaryConditions(int IGrid, int nxGrids, int nyGrids, float &leftX, float &rightX, float&bottomX, float &topX) { // Left if (IGrid % nxGrids == 0) { leftX = 0.0; } // Right if (((IGrid+1) % nxGrids) == 0) { rightX = 0.0; } // Bottom if (IGrid < nxGrids) { bottomX = 0.0; } // Top if (IGrid >= (nxGrids * nyGrids - nxGrids)) { topX = 0.0; } return; } float normFromRow(float leftMatrix, float centerMatrix, float rightMatrix, float topMatrix, float bottomMatrix, float leftX, float centerX, float rightX, float topX, float bottomX, float centerRhs) { return centerRhs - (leftMatrix*leftX + centerMatrix*centerX + rightMatrix*rightX + topMatrix*topX + bottomMatrix*bottomX); } float Residual(const float * solution, const float * rhs, const float * matrixElements, int nxGrids, int nyGrids) { int nDofs = nxGrids * nyGrids; float residual = 0.0; const float bottomMatrix = matrixElements[0]; const float leftMatrix = matrixElements[1]; const float centerMatrix = matrixElements[2]; const float rightMatrix = matrixElements[3]; const float topMatrix = matrixElements[4]; for (int iGrid = 0; iGrid < nDofs; iGrid++) { float leftX = solution[iGrid-1]; float centerX = solution[iGrid]; float rightX = solution[iGrid+1]; float bottomX = solution[iGrid-nxGrids]; float topX; if (iGrid + nxGrids < nDofs) { topX = solution[iGrid+nxGrids]; } boundaryConditions(iGrid, nxGrids, nyGrids, leftX, rightX, bottomX, topX); float residualContributionFromRow = normFromRow(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, rhs[iGrid]); residual = residual + residualContributionFromRow * residualContributionFromRow; } residual = sqrt(residual); return residual; } float * iterativeCpu(const float * initX, const float * rhs, const float * matrixElements, const int nxGrids, const int nyGrids, const int nIters, const int method) { int nDofs = nxGrids * nyGrids; float * x0 = new float[nDofs]; float * x1 = new float[nDofs]; memcpy(x0, initX, sizeof(float) * nDofs); memcpy(x1, initX, sizeof(float)* nDofs); const float bottomMatrix = matrixElements[0]; const float leftMatrix = matrixElements[1]; const float centerMatrix = matrixElements[2]; const float rightMatrix = matrixElements[3]; const float topMatrix = matrixElements[4]; for (int iIter = 0; iIter < nIters; ++ iIter) { for (int iGrid = 0; iGrid < nDofs; ++iGrid) { const float leftX = ((iGrid % nxGrids) == 0) ? 0.0f : x0[iGrid - 1]; const float centerX = x0[iGrid]; const float rightX = (((iGrid + 1) % nxGrids) == 0) ? 0.0f : x0[iGrid + 1]; const float bottomX = (iGrid < nxGrids) ? 0.0f : x0[iGrid - nxGrids]; const float topX = (iGrid < nDofs - nxGrids) ? x0[iGrid + nxGrids] : 0.0f; if (iIter % 2 == 0) { x1[iGrid] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, rhs[iGrid], iGrid, method); } else { x1[iGrid] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, rhs[iGrid], iGrid, method); } } float * tmp = x0; x0 = x1; x1 = tmp; } delete[] x1; return x0; } __global__ void _iterativeGpuClassicIteration(float * x1, const float * x0, const float * rhs, const float leftMatrix, const float centerMatrix, const float rightMatrix, const float topMatrix, const float bottomMatrix, const int nxGrids, const int nyGrids, const int iteration, const int method) { const int ixGrid = blockIdx.x * blockDim.x + threadIdx.x; // Col const int iyGrid = blockIdx.y * blockDim.y + threadIdx.y; // Row const int iGrid = iyGrid * (nxGrids) + ixGrid; const int nDofs = nxGrids * nyGrids; if (iGrid < nDofs) { const float leftX = (ixGrid == 0) ? 0.0f : x0[iGrid - 1] ; const float centerX = x0[iGrid]; const float rightX = (ixGrid == nxGrids - 1) ? 0.0f : x0[iGrid + 1]; const float topX = (iyGrid == nyGrids - 1) ? 0.0f : x0[iGrid + nxGrids]; const float bottomX = (iyGrid == 0) ? 0.0f : x0[iGrid - nxGrids]; if (iteration % 2 == 0) { x1[iGrid] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, rhs[iGrid], iGrid, method); } else { x1[iGrid] = iterativeOperation2(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, rhs[iGrid], iGrid, method); } } __syncthreads(); } float * iterativeGpuClassic(const float * initX, const float * rhs, const float * matrixElements, const int nxGrids, const int nyGrids, const int nIters, const int threadsPerBlock, const int method) { const int nDofs = nxGrids * nyGrids; // Allocate memory in the CPU for the solution float * x0Gpu, * x1Gpu; hipMalloc(&x0Gpu, sizeof(float) * nDofs); hipMalloc(&x1Gpu, sizeof(float) * nDofs); // Allocate CPU memory for other variables float * rhsGpu; hipMalloc(&rhsGpu, sizeof(float) * nDofs); // Allocate GPU memory hipMemcpy(x0Gpu, initX, sizeof(float) * nDofs, hipMemcpyHostToDevice); hipMemcpy(rhsGpu, rhs, sizeof(float) * nDofs, hipMemcpyHostToDevice); // Run the classic iteration for prescribed number of iterations // int threadsPerBlock = 16; const int nxBlocks = (int)ceil(nxGrids / (float)threadsPerBlock); const int nyBlocks = (int)ceil(nyGrids / (float)threadsPerBlock); dim3 grid(nxBlocks, nyBlocks); dim3 block(threadsPerBlock, threadsPerBlock); const float bottomMatrix = matrixElements[0]; const float leftMatrix = matrixElements[1]; const float centerMatrix = matrixElements[2]; const float rightMatrix = matrixElements[3]; const float topMatrix = matrixElements[4]; for (int iIter = 0; iIter < nIters; ++iIter) { // Jacobi iteration on the CPU (used to be <<<nBlocks, threadsPerBlock>>>) hipLaunchKernelGGL(( _iterativeGpuClassicIteration), dim3(grid), dim3(block), 0, 0, x1Gpu, x0Gpu, rhsGpu, leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids, iIter, method); float * tmp = x1Gpu; x0Gpu = x1Gpu; x1Gpu = tmp; } // Write solution from GPU to CPU variable float * solution = new float[nDofs]; hipMemcpy(solution, x0Gpu, sizeof(float) * nDofs, hipMemcpyDeviceToHost); // Free all memory hipFree(x0Gpu); hipFree(x1Gpu); hipFree(rhsGpu); return solution; } //// SWEPT METHODS HERE //// __device__ void __iterativeBlockUpdateToLeftRight(float * xLeftBlock, float * xRightBlock, const float *rhsBlock, const float leftMatrix, const float centerMatrix, const float rightMatrix, const float topMatrix, const float bottomMatrix, int nxGrids, int nyGrids, int iGrid, int method, int subdomainLength, bool diagonal, int maxSteps) { // Initialize shared memory and pointers to x0, x1 arrays containing Jacobi solutions extern __shared__ float sharedMemory[]; float * x0 = sharedMemory; int elemPerBlock = subdomainLength * subdomainLength; float * x1 = sharedMemory + elemPerBlock; // Define number of Jacobi steps to take, and current index and stride value int index = threadIdx.x + threadIdx.y * blockDim.x; int stride = blockDim.x * blockDim.y; // Perform Jacobi iterations for (int k = 0; k < maxSteps; k++) { for (int idx = index; idx < elemPerBlock; idx += stride) { if ((idx % subdomainLength != 0) && ((idx+1) % subdomainLength != 0) && (idx > subdomainLength-1) && (idx < elemPerBlock-(subdomainLength-1))) { // Define necessary constants float centerRhs = rhsBlock[idx]; float leftX = x0[idx-1]; float centerX = x0[idx]; float rightX = x0[idx+1]; float topX = x0[idx+subdomainLength]; float bottomX = x0[idx-subdomainLength]; // Apply boundary conditions int step = idx / stride; int Idx = (stride % subdomainLength) + (stride/subdomainLength) * nxGrids; int IGrid = iGrid + step * Idx; if (diagonal == true) { int nDofs = nxGrids * nyGrids; if ((blockIdx.y == gridDim.y-1) && idx/subdomainLength >= subdomainLength/2) { IGrid = IGrid - nDofs; } if ((blockIdx.x == gridDim.x-1) && (idx % subdomainLength) >= (subdomainLength/2)) { IGrid = IGrid - nxGrids; } } boundaryConditions(IGrid, nxGrids, nyGrids, leftX, rightX, bottomX, topX); //__syncthreads(); // Perform update // x1[idx] = increment(centerX); // x1[idx] = jacobi(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, // leftX, centerX, rightX, topX, bottomX, centerRhs); if (k % 2 == 0) { x1[idx] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, centerRhs, iGrid, method); } else { x1[idx] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, centerRhs, iGrid, method); } // Synchronize //__syncthreads(); } } __syncthreads(); float * tmp; tmp = x0; x0 = x1, x1 = tmp; } //__syncthreads(); index = threadIdx.x + threadIdx.y * blockDim.x; stride = blockDim.x * blockDim.y; for (int idx = index; idx < elemPerBlock/2; idx += stride) { xLeftBlock[idx] = x0[subdomainLength * (idx % subdomainLength) + (idx/subdomainLength)]; xRightBlock[idx] = x0[subdomainLength * (idx % subdomainLength) - (idx/subdomainLength) + (subdomainLength-1)]; } } __device__ void __iterativeBlockUpdateToNorthSouth(float * xTopBlock, float * xBottomBlock, const float *rhsBlock, const float leftMatrix, const float centerMatrix, const float rightMatrix, const float topMatrix, const float bottomMatrix, int nxGrids, int nyGrids, int iGrid, int method, int subdomainLength, bool vertical, int maxSteps) { extern __shared__ float sharedMemory[]; float * x0 = sharedMemory; int elemPerBlock = subdomainLength * subdomainLength; float * x1 = sharedMemory + elemPerBlock; int index = threadIdx.x + threadIdx.y * blockDim.x; int stride = blockDim.x * blockDim.y; __syncthreads(); for (int k = 0; k < maxSteps; k++) { for (int idx = index; idx < elemPerBlock; idx += stride) { if ((idx % subdomainLength != 0) && ((idx+1) % subdomainLength != 0) && (idx > subdomainLength-1) && (idx < elemPerBlock-subdomainLength-1)) { // Define necessary constants float centerRhs = rhsBlock[idx]; float leftX = x0[idx-1]; float centerX = x0[idx]; float rightX = x0[idx+1]; float topX = x0[idx+subdomainLength]; float bottomX = x0[idx-subdomainLength]; int step = idx / stride; int Idx = (stride % subdomainLength) + (stride/subdomainLength) * nxGrids; int IGrid = iGrid + step * Idx; if (vertical == true) { int nDofs = nxGrids * nyGrids; if ((blockIdx.y == gridDim.y-1) && idx/subdomainLength >= subdomainLength/2) { IGrid = IGrid - nDofs; } } else { if ((blockIdx.x == gridDim.x-1) && (idx % subdomainLength) >= (subdomainLength/2)) { IGrid = IGrid - nxGrids; } } boundaryConditions(IGrid, nxGrids, nyGrids, leftX, rightX, bottomX, topX); //__syncthreads(); // Perform update //x1[idx] = increment(centerX); // x1[idx] = jacobi(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, // leftX, centerX, rightX, topX, bottomX, centerRhs); if (k % 2 == 0) { x1[idx] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, centerRhs, iGrid, method); } else { x1[idx] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, centerRhs, iGrid, method); } // Synchronize //__syncthreads(); } } __syncthreads(); float * tmp; tmp = x0; x0 = x1, x1 = tmp; } // Return values for xTop and xBottom here for (int idx = index; idx < elemPerBlock/2; idx += stride) { xBottomBlock[idx] = x0[idx]; xTopBlock[idx] = x0[subdomainLength * (subdomainLength-1-idx/subdomainLength) + (idx % subdomainLength)]; } } __global__ void _iterativeGpuOriginal(float * xLeftGpu, float *xRightGpu, const float * x0Gpu, const float *rhsGpu, const int nxGrids, const int nyGrids, const int method, const int subdomainLength, const int maxSteps) { const int xShift = subdomainLength * blockIdx.x; const int yShift = subdomainLength * blockIdx.y; const int blockShift = xShift + yShift * nxGrids; const float * x0Block = x0Gpu + blockShift; const float * rhsBlock = rhsGpu + blockShift; const float bottomMatrix = matrixElementsGpu[0]; const float leftMatrix = matrixElementsGpu[1]; const float centerMatrix = matrixElementsGpu[2]; const float rightMatrix = matrixElementsGpu[3]; const float topMatrix = matrixElementsGpu[4]; int numElementsPerBlock = subdomainLength * subdomainLength; int blockID = blockIdx.x + blockIdx.y * gridDim.x; int arrayShift = (numElementsPerBlock*blockID)/2; float * xLeftBlock = xLeftGpu + arrayShift; float * xRightBlock = xRightGpu + arrayShift; extern __shared__ float sharedMemory[]; int index = threadIdx.x + threadIdx.y * blockDim.x; int stride = blockDim.x * blockDim.y; for (int idx = index; idx < numElementsPerBlock; idx += stride) { int Idx = (idx % subdomainLength) + (idx/subdomainLength) * nxGrids; sharedMemory[idx] = x0Block[Idx]; sharedMemory[idx + numElementsPerBlock] = x0Block[Idx]; } int iGrid = blockShift + (index/subdomainLength) * nxGrids + index % subdomainLength; __iterativeBlockUpdateToLeftRight(xLeftBlock, xRightBlock, rhsBlock, leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids, iGrid, method, subdomainLength, false, maxSteps); } __global__ void _iterativeGpuHorizontalShift(const float * xLeftGpu, const float * xRightGpu, float * xTopGpu, float * xBottomGpu, const float * x0Gpu, const float * rhsGpu, const int nxGrids, const int nyGrids, const int method, const int subdomainLength, const int maxSteps) { int xShift = subdomainLength * blockIdx.x; int yShift = subdomainLength * blockIdx.y; int blockShift = xShift + yShift * nxGrids; int horizontalShift = subdomainLength/2; const float * rhsBlock = rhsGpu + blockShift; //+ horizontalShift; const float bottomMatrix = matrixElementsGpu[0]; const float leftMatrix = matrixElementsGpu[1]; const float centerMatrix = matrixElementsGpu[2]; const float rightMatrix = matrixElementsGpu[3]; const float topMatrix = matrixElementsGpu[4]; int numElementsPerBlock = (subdomainLength * subdomainLength)/2; int blockID = blockIdx.x + blockIdx.y * gridDim.x; int arrayShift = numElementsPerBlock*blockID; const float * xLeftBlock = xRightGpu + arrayShift; const float * xRightBlock = (blockIdx.x != gridDim.x-1) ? xLeftGpu + arrayShift + numElementsPerBlock : xLeftGpu + (numElementsPerBlock * blockIdx.y * gridDim.x); float * xBottomBlock = xBottomGpu + arrayShift; float * xTopBlock = xTopGpu + arrayShift; extern __shared__ float sharedMemory[]; int index = threadIdx.x + threadIdx.y * blockDim.x; int stride = blockDim.x * blockDim.y; for (int idx = index; idx < subdomainLength * subdomainLength; idx += stride) { if (idx % subdomainLength < subdomainLength/2) { int Idx = ((subdomainLength-1)/2-(idx % subdomainLength)) * subdomainLength + idx/subdomainLength; sharedMemory[idx] = xLeftBlock[Idx]; sharedMemory[idx + subdomainLength * subdomainLength] = xLeftBlock[Idx]; } else { int Idx = ((idx % subdomainLength) - (subdomainLength-1)/2 - 1) * subdomainLength + idx/subdomainLength; sharedMemory[idx] = xRightBlock[Idx]; sharedMemory[idx + subdomainLength * subdomainLength] = xRightBlock[Idx]; } } int iGrid = blockShift + (index/subdomainLength) * nxGrids + index % subdomainLength + horizontalShift; //__syncthreads(); __iterativeBlockUpdateToNorthSouth(xTopBlock, xBottomBlock, rhsBlock, leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids, iGrid, method, subdomainLength, false, maxSteps); } __global__ void _iterativeGpuVerticalandHorizontalShift(float * xLeftGpu, float * xRightGpu, const float * xTopGpu, const float * xBottomGpu, const float * x0Gpu, const float * rhsGpu, const int nxGrids, const int nyGrids, const int method, const int subdomainLength, const int maxSteps) { int xShift = subdomainLength * blockIdx.x; int yShift = subdomainLength * blockIdx.y; int blockShift = xShift + yShift * nxGrids; int horizontalShift = subdomainLength/2; int verticalShift = subdomainLength/2 * nxGrids; const float * rhsBlock = rhsGpu + blockShift; //+ verticalShift; const float bottomMatrix = matrixElementsGpu[0]; const float leftMatrix = matrixElementsGpu[1]; const float centerMatrix = matrixElementsGpu[2]; const float rightMatrix = matrixElementsGpu[3]; const float topMatrix = matrixElementsGpu[4]; int numElementsPerBlock = (subdomainLength * subdomainLength)/2; int blockID = blockIdx.x + blockIdx.y * gridDim.x; int arrayShift = numElementsPerBlock*blockID; const float * xBottomBlock = xTopGpu + arrayShift; const float * xTopBlock = (blockIdx.y != gridDim.y-1) ? xBottomGpu + numElementsPerBlock * gridDim.x + arrayShift : xBottomGpu + (numElementsPerBlock * blockIdx.x); float * xLeftBlock = xLeftGpu + arrayShift; float * xRightBlock = xRightGpu + arrayShift; extern __shared__ float sharedMemory[]; int index = threadIdx.x + threadIdx.y * blockDim.x; int stride = blockDim.x * blockDim.y; for (int idx = index; idx < subdomainLength * subdomainLength; idx += stride) { if (idx < numElementsPerBlock) { sharedMemory[idx] = xBottomBlock[(subdomainLength/2-1-idx/subdomainLength) * subdomainLength + idx % subdomainLength]; sharedMemory[idx + subdomainLength * subdomainLength] = xBottomBlock[(subdomainLength/2-1-idx/subdomainLength) * subdomainLength + idx % subdomainLength]; } else { sharedMemory[idx] = xTopBlock[idx - numElementsPerBlock]; sharedMemory[idx + subdomainLength * subdomainLength] = xTopBlock[idx - numElementsPerBlock]; } } int iGrid = blockShift + (index/subdomainLength) * nxGrids + index % subdomainLength + horizontalShift + verticalShift; __iterativeBlockUpdateToLeftRight(xLeftBlock, xRightBlock, rhsBlock, leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids, iGrid, method, subdomainLength, true, maxSteps); } __global__ void _iterativeGpuVerticalShift(const float * xLeftGpu, const float * xRightGpu, float * xTopGpu, float * xBottomGpu, const float * x0Gpu, const float * rhsGpu, const int nxGrids, const int nyGrids, const int method, const int subdomainLength, int maxSteps) { int xShift = subdomainLength * blockIdx.x; int yShift = subdomainLength * blockIdx.y; int blockShift = xShift + yShift * nxGrids; int verticalShift = subdomainLength/2 * nxGrids; const float * rhsBlock = rhsGpu + blockShift; //+ verticalShift; const float bottomMatrix = matrixElementsGpu[0]; const float leftMatrix = matrixElementsGpu[1]; const float centerMatrix = matrixElementsGpu[2]; const float rightMatrix = matrixElementsGpu[3]; const float topMatrix = matrixElementsGpu[4]; int numElementsPerBlock = (subdomainLength * subdomainLength)/2; int blockID = blockIdx.x + blockIdx.y * gridDim.x; int arrayShift = numElementsPerBlock*blockID; const float * xRightBlock = xLeftGpu + arrayShift; const float * xLeftBlock = (blockIdx.x != 0) ? xRightGpu + arrayShift - numElementsPerBlock : xRightGpu + numElementsPerBlock * ((gridDim.x-1) + blockIdx.y * gridDim.x); float * xBottomBlock = xBottomGpu + arrayShift; float * xTopBlock = xTopGpu + arrayShift; extern __shared__ float sharedMemory[]; int index = threadIdx.x + threadIdx.y * blockDim.x; int stride = blockDim.x * blockDim.y; for (int idx = index; idx < subdomainLength * subdomainLength; idx += stride) { if (idx % subdomainLength < subdomainLength/2) { int Idx = ((subdomainLength-1)/2-(idx % subdomainLength)) * subdomainLength + idx/subdomainLength; sharedMemory[idx] = xLeftBlock[Idx]; sharedMemory[idx + subdomainLength * subdomainLength] = xLeftBlock[Idx]; } else { int Idx = ((idx % subdomainLength) - (subdomainLength-1)/2 - 1) * subdomainLength + idx/subdomainLength; sharedMemory[idx] = xRightBlock[Idx]; sharedMemory[idx + subdomainLength * subdomainLength] = xRightBlock[Idx]; } } int iGrid = blockShift + (index/subdomainLength) * nxGrids + index % subdomainLength + verticalShift; __iterativeBlockUpdateToNorthSouth(xTopBlock, xBottomBlock, rhsBlock, leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids, iGrid, method, subdomainLength, true, maxSteps); } __global__ void _finalSolution(const float * xTopGpu, const float * xBottomGpu, float * x0Gpu, const int nxGrids, const int subdomainLength) { extern __shared__ float sharedMemory[]; int numElementsPerBlock = (subdomainLength * subdomainLength)/2; int blockID = blockIdx.x + blockIdx.y * gridDim.x; int arrayShift = numElementsPerBlock*blockID; const float * xTopBlock = xBottomGpu + arrayShift; const float * xBottomBlock = (blockIdx.y != 0) ? xTopGpu + (blockIdx.x + (blockIdx.y-1) * gridDim.x) * numElementsPerBlock : xTopGpu + (gridDim.x * (gridDim.y-1) + blockIdx.x) * numElementsPerBlock; int xShift = subdomainLength * blockIdx.x; int yShift = subdomainLength * blockIdx.y; int blockShift = xShift + yShift * nxGrids; float * x0Block = x0Gpu + blockShift; int index = threadIdx.x + threadIdx.y * blockDim.x; int stride = blockDim.x * blockDim.y; for (int idx = index; idx < numElementsPerBlock; idx += stride) { sharedMemory[idx + numElementsPerBlock] = xTopBlock[idx]; sharedMemory[(subdomainLength/2 - 1 - idx/subdomainLength) * subdomainLength + idx % subdomainLength] = xBottomBlock[idx]; } __syncthreads(); for (int idx = index; idx < 2*numElementsPerBlock; idx += stride) { int Idx = (idx % subdomainLength) + (idx/subdomainLength) * nxGrids; x0Block[Idx] = sharedMemory[idx]; } } /////////////////////////////////////////////////// float * iterativeGpuSwept(const float * initX, const float * rhs, const float * matrixElements, const int nxGrids, const int nyGrids, const int nCycles, const int maxSteps, const int threadsPerBlock, const int method, const int subdomainLength) { // Determine number of threads and blocks const int nxBlocks = (int)ceil(nxGrids / (float)subdomainLength); const int nyBlocks = (int)ceil(nyGrids / (float)subdomainLength); const int nDofs = nxGrids * nyGrids; dim3 grid(nxBlocks, nyBlocks); dim3 block(threadsPerBlock, threadsPerBlock); // Allocate memory for solution and inputs float *xLeftGpu, *xRightGpu, *xTopGpu, *xBottomGpu; const int numSharedElemPerBlock = subdomainLength * subdomainLength / 2; hipMalloc(&xLeftGpu, sizeof(float) * numSharedElemPerBlock * nxBlocks * nyBlocks); hipMalloc(&xRightGpu, sizeof(float) * numSharedElemPerBlock * nxBlocks * nyBlocks); hipMalloc(&xTopGpu, sizeof(float) * numSharedElemPerBlock * nxBlocks * nyBlocks); hipMalloc(&xBottomGpu, sizeof(float) * numSharedElemPerBlock * nxBlocks * nyBlocks); float * x0Gpu, * rhsGpu; hipMalloc(&x0Gpu, sizeof(float) * nDofs); hipMalloc(&rhsGpu, sizeof(float) * nDofs); /* STORING MATRIX IN GLOBAL MEMORY float * matrixElementsGpu; hipMalloc(&matrixElementsGpu, sizeof(float) * 5); hipMemcpy(matrixElementsGpu, matrixElements, sizeof(float) * 5, hipMemcpyHostToDevice); */ // STORING MATRIX IN CONSTANT MEMORY hipMemcpyToSymbol(matrixElementsGpu, matrixElements, sizeof(float) * 5); // Allocate memory in the GPU hipMemcpy(x0Gpu, initX, sizeof(float) * nDofs, hipMemcpyHostToDevice); hipMemcpy(rhsGpu, rhs, sizeof(float) * nDofs, hipMemcpyHostToDevice); const int sharedBytes = 2 * subdomainLength * subdomainLength * sizeof(float); for (int i = 0; i < nCycles; i++) { // APPLY METHOD TO ADVANCE POINTS (NO SHIFT) hipLaunchKernelGGL(( _iterativeGpuOriginal) , dim3(grid), dim3(block), sharedBytes, 0, xLeftGpu, xRightGpu, x0Gpu, rhsGpu, nxGrids, nyGrids, method, subdomainLength, maxSteps); // APPLY HORIZONTAL SHIFT hipLaunchKernelGGL(( _iterativeGpuHorizontalShift) , dim3(grid), dim3(block), sharedBytes, 0, xLeftGpu, xRightGpu, xTopGpu, xBottomGpu, x0Gpu, rhsGpu, nxGrids, nyGrids, method, subdomainLength, maxSteps); // APPLY VERTICAL SHIFT (ALONG WITH PREVIOUS HORIZONTAL SHIFT) hipLaunchKernelGGL(( _iterativeGpuVerticalandHorizontalShift) , dim3(grid), dim3(block), sharedBytes, 0, xLeftGpu, xRightGpu, xTopGpu, xBottomGpu, x0Gpu, rhsGpu, nxGrids, nyGrids, method, subdomainLength, maxSteps); // APPLY VERTICAL SHIFT hipLaunchKernelGGL(( _iterativeGpuVerticalShift) , dim3(grid), dim3(block), sharedBytes, 0, xLeftGpu, xRightGpu, xTopGpu, xBottomGpu, x0Gpu, rhsGpu, nxGrids, nyGrids, method, subdomainLength, maxSteps); // APPLY FINAL STEP hipLaunchKernelGGL(( _finalSolution) , dim3(grid), dim3(block), sharedBytes, 0, xTopGpu, xBottomGpu, x0Gpu, nxGrids, subdomainLength); } float * solution = new float[nDofs]; hipMemcpy(solution, x0Gpu, sizeof(float) * nDofs, hipMemcpyDeviceToHost); hipFree(x0Gpu); hipFree(xLeftGpu); hipFree(xRightGpu); hipFree(xTopGpu); hipFree(xBottomGpu); hipFree(rhsGpu); // hipFree(matrixElementsGpu); return solution; } int main(int argc, char *argv[]) { // Ask user for inputs const int nxGrids = atoi(argv[1]); const int nyGrids = atoi(argv[1]); const int subdomainLength = atoi(argv[2]); const int threadsPerBlock = atoi(argv[3]); const int nCycles = atoi(argv[4]); const int maxSteps = atoi(argv[5]); const int nIters = atoi(argv[6]); method_type method = JACOBI; const int nDofs = nxGrids * nyGrids; // Declare arrays and population with values for Poisson equation float * initX = new float[nDofs]; float * rhs = new float[nDofs]; const float dx = 1.0f / (nxGrids + 1); const float dy = 1.0f / (nyGrids + 1); for (int iGrid = 0; iGrid < nDofs; ++iGrid) { initX[iGrid] = (float)iGrid; rhs[iGrid] = 1.0f; } float * matrixElements = new float[5]; matrixElements[0] = -1.0f / (dy * dy); matrixElements[1] = -1.0f / (dx * dx); matrixElements[2] = 2.0f / (dx * dx) + 2.0f / (dy * dy); matrixElements[3] = -1.0f / (dx * dx); matrixElements[4] = -1.0f / (dy * dy); // Amount of shared memory to be requested const int sharedMem = 2 * subdomainLength * subdomainLength * sizeof(float); // Run the CPU Implementation and measure the time required clock_t cpuStartTime = clock(); float * solutionCpu = iterativeCpu(initX, rhs, matrixElements, nxGrids, nyGrids, nIters, method); clock_t cpuEndTime = clock(); float cpuTime = (cpuEndTime - cpuStartTime) / (float) CLOCKS_PER_SEC; // Run the Classic GPU Implementation and measure the time required hipEvent_t startClassic, stopClassic; float timeClassic; hipEventCreate( &startClassic ); hipEventCreate( &stopClassic ); hipEventRecord(startClassic, 0); float * solutionGpuClassic = iterativeGpuClassic(initX, rhs, matrixElements, nxGrids, nyGrids, nIters, threadsPerBlock, method); hipEventRecord(stopClassic, 0); hipEventSynchronize(stopClassic); hipEventElapsedTime(&timeClassic, startClassic, stopClassic); // Run the Swept GPU Implementation and measure the time required hipEvent_t startSwept, stopSwept; float timeSwept; hipEventCreate( &startSwept ); hipEventCreate( &stopSwept ); hipEventRecord( startSwept, 0); float * solutionGpuSwept = iterativeGpuSwept(initX, rhs, matrixElements, nxGrids, nyGrids, nCycles, maxSteps, threadsPerBlock, method, subdomainLength); hipEventRecord(stopSwept, 0); hipEventSynchronize(stopSwept); hipEventElapsedTime(&timeSwept, startSwept, stopSwept); // Print parameters of the problem to screen printf("===============INFORMATION============================\n"); printf("Size of the linear system (Nx, Ny): %d (%d, %d) \n", nDofs, nxGrids, nyGrids); printf("Size of each subdomain handled by a block and threads per block: %d and %d\n", subdomainLength, threadsPerBlock); printf("Method used: %d\n", method); printf("Number of Iterations performed: %d\n", nIters); printf("Number of Swept Cycles performed: %d\n", nCycles); printf("Number of Iterations performed internally at each Swept Stage: %d\n", maxSteps); printf("Amount of shared memory to be requested: %d B\n", sharedMem); // Print out results to the screen, notify if any GPU Classic or Swept values differ significantly for (int iGrid = 0; iGrid < nDofs; ++iGrid) { printf("%d %f %f %f \n",iGrid, solutionCpu[iGrid], solutionGpuClassic[iGrid], solutionGpuSwept[iGrid]); //assert(solutionGpuClassic[iGrid] == solutionGpuSwept[iGrid]); // if (abs(solutionGpuClassic[iGrid] - solutionGpuSwept[iGrid]) > 1e-2) { // printf("For grid point %d, Classic and Swept give %f and %f respectively\n", iGrid, solutionGpuClassic[iGrid], solutionGpuSwept[iGrid]); // } } // Print out time for cpu, classic gpu, and swept gpu approaches // float cpuTimePerIteration = (cpuTime / nIters) * 1e3; // float classicTimePerIteration = timeClassic / nIters; // float sweptTimePerIteration = timeSwept / nIters; // float timeMultiplier = classicTimePerIteration / sweptTimePerIteration; /* printf("Time needed for the CPU (per iteration): %f ms\n", cpuTimePerIteration); printf("Time needed for the Classic GPU (per iteration) is %f ms\n", classicTimePerIteration); printf("Time needed for the Swept GPU (per iteration): %f ms\n", sweptTimePerIteration); */ printf("TIMING INFORMATION:\n"); printf("Total Time needed for the CPU: %f ms\n", cpuTime * 1e3); printf("Total Time needed for the Classic GPU is %f ms\n", timeClassic); printf("Total Time needed for the Swept GPU: %f ms\n", timeSwept); printf("Swept takes %f the time Classic takes\n", timeSwept / timeClassic); // Compute the residual of the resulting solution (||b-Ax||) float residualCPU = Residual(solutionGpuClassic, rhs, matrixElements, nxGrids, nyGrids); float residualClassicGPU = Residual(solutionGpuClassic, rhs, matrixElements, nxGrids, nyGrids); float residualSweptGPU = Residual(solutionGpuSwept, rhs, matrixElements, nxGrids, nyGrids); printf("Swept takes %f the time Classic takes\n", timeSwept / timeClassic); printf("RESIDUAL INFORMATION:\n"); printf("Residual of the CPU solution is %f\n", residualCPU); printf("Residual of the Classic GPU solution is %f\n", residualClassicGPU); printf("Residual of the Swept GPU solution is %f\n", residualSweptGPU); printf("The residual of Swept is %f times that of Classic\n", residualSweptGPU / residualClassicGPU); // Save residual to a file /* std::ofstream residuals; residuals.open("residual-gs.txt",std::ios_base::app); residuals << nGrids << "\t" << threadsPerBlock << "\t" << nIters << "\t" << residualSwept << "\n"; residuals.close(); */ // Save Results to a file "N tpb Iterations CPUTime/perstep ClassicTime/perstep SweptTime/perStep ClassicTime/SweptTime" std::ofstream timings; timings.open("time.txt",std::ios_base::app); // timings << nxGrids << "\t" << nyGrids << "\t" << threadsPerBlock << "\t" << nIters << "\t" << cpuTimePerIteration << "\t" << classicTimePerIteration << "\t" << sweptTimePerIteration << "\t" << timeMultiplier << "\n"; timings.close(); // Free memory hipEventDestroy(startClassic); hipEventDestroy(startSwept); delete[] initX; delete[] rhs; delete[] matrixElements; delete[] solutionCpu; delete[] solutionGpuClassic; delete[] solutionGpuSwept; }
d14a89d005473282ea2a9e1e2c2589b479c701f9.cu
#include<utility> #include<stdio.h> #include<assert.h> #include <cuda_runtime_api.h> #include <cuda_runtime.h> #include <ostream> #include <iostream> #include <cstdio> #include <cstdlib> #include <cmath> #include <fstream> #include <omp.h> #include <time.h> #include <string.h> #include <utility> __constant__ float matrixElementsGpu[5]; enum method_type { JACOBI, GS, SOR }; __device__ float jacobi(const float leftMatrix, const float centerMatrix, const float rightMatrix, const float topMatrix, const float bottomMatrix, const float leftX, const float centerX, const float rightX, const float topX, const float bottomX, const float centerRhs) { float result = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix; return result; } template <typename method_type> __host__ __device__ float iterativeOperation(const float leftMatrix, const float centerMatrix, const float rightMatrix, const float topMatrix, const float bottomMatrix, float leftX, float centerX, float rightX, float topX, float bottomX, const float centerRhs, int gridPoint, method_type method) { float gridValue = centerX; switch(method) { case JACOBI: return gridValue = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix; case GS: if (gridPoint % 2 == 1) { return gridValue = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix; } case SOR: float relaxation = 1.9939; if (gridPoint % 2 == 1) { return gridValue = relaxation*((centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix) + (1.0-relaxation)*centerX; } } return gridValue; } template <typename method_type> __host__ __device__ float iterativeOperation2(const float leftMatrix, const float centerMatrix, const float rightMatrix, const float topMatrix, const float bottomMatrix, float leftX, float centerX, float rightX, float topX, float bottomX, const float centerRhs, int gridPoint, method_type method) { float gridValue = centerX; switch(method) { case JACOBI: return gridValue = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix; case GS: if (gridPoint % 2 == 0) { return gridValue = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix; } case SOR: float relaxation = 1.9939; if (gridPoint % 2 == 0) { return gridValue = relaxation*((centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix) + (1.0-relaxation)*centerX; } } return gridValue; } __host__ __device__ void boundaryConditions(int IGrid, int nxGrids, int nyGrids, float &leftX, float &rightX, float&bottomX, float &topX) { // Left if (IGrid % nxGrids == 0) { leftX = 0.0; } // Right if (((IGrid+1) % nxGrids) == 0) { rightX = 0.0; } // Bottom if (IGrid < nxGrids) { bottomX = 0.0; } // Top if (IGrid >= (nxGrids * nyGrids - nxGrids)) { topX = 0.0; } return; } float normFromRow(float leftMatrix, float centerMatrix, float rightMatrix, float topMatrix, float bottomMatrix, float leftX, float centerX, float rightX, float topX, float bottomX, float centerRhs) { return centerRhs - (leftMatrix*leftX + centerMatrix*centerX + rightMatrix*rightX + topMatrix*topX + bottomMatrix*bottomX); } float Residual(const float * solution, const float * rhs, const float * matrixElements, int nxGrids, int nyGrids) { int nDofs = nxGrids * nyGrids; float residual = 0.0; const float bottomMatrix = matrixElements[0]; const float leftMatrix = matrixElements[1]; const float centerMatrix = matrixElements[2]; const float rightMatrix = matrixElements[3]; const float topMatrix = matrixElements[4]; for (int iGrid = 0; iGrid < nDofs; iGrid++) { float leftX = solution[iGrid-1]; float centerX = solution[iGrid]; float rightX = solution[iGrid+1]; float bottomX = solution[iGrid-nxGrids]; float topX; if (iGrid + nxGrids < nDofs) { topX = solution[iGrid+nxGrids]; } boundaryConditions(iGrid, nxGrids, nyGrids, leftX, rightX, bottomX, topX); float residualContributionFromRow = normFromRow(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, rhs[iGrid]); residual = residual + residualContributionFromRow * residualContributionFromRow; } residual = sqrt(residual); return residual; } float * iterativeCpu(const float * initX, const float * rhs, const float * matrixElements, const int nxGrids, const int nyGrids, const int nIters, const int method) { int nDofs = nxGrids * nyGrids; float * x0 = new float[nDofs]; float * x1 = new float[nDofs]; memcpy(x0, initX, sizeof(float) * nDofs); memcpy(x1, initX, sizeof(float)* nDofs); const float bottomMatrix = matrixElements[0]; const float leftMatrix = matrixElements[1]; const float centerMatrix = matrixElements[2]; const float rightMatrix = matrixElements[3]; const float topMatrix = matrixElements[4]; for (int iIter = 0; iIter < nIters; ++ iIter) { for (int iGrid = 0; iGrid < nDofs; ++iGrid) { const float leftX = ((iGrid % nxGrids) == 0) ? 0.0f : x0[iGrid - 1]; const float centerX = x0[iGrid]; const float rightX = (((iGrid + 1) % nxGrids) == 0) ? 0.0f : x0[iGrid + 1]; const float bottomX = (iGrid < nxGrids) ? 0.0f : x0[iGrid - nxGrids]; const float topX = (iGrid < nDofs - nxGrids) ? x0[iGrid + nxGrids] : 0.0f; if (iIter % 2 == 0) { x1[iGrid] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, rhs[iGrid], iGrid, method); } else { x1[iGrid] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, rhs[iGrid], iGrid, method); } } float * tmp = x0; x0 = x1; x1 = tmp; } delete[] x1; return x0; } __global__ void _iterativeGpuClassicIteration(float * x1, const float * x0, const float * rhs, const float leftMatrix, const float centerMatrix, const float rightMatrix, const float topMatrix, const float bottomMatrix, const int nxGrids, const int nyGrids, const int iteration, const int method) { const int ixGrid = blockIdx.x * blockDim.x + threadIdx.x; // Col const int iyGrid = blockIdx.y * blockDim.y + threadIdx.y; // Row const int iGrid = iyGrid * (nxGrids) + ixGrid; const int nDofs = nxGrids * nyGrids; if (iGrid < nDofs) { const float leftX = (ixGrid == 0) ? 0.0f : x0[iGrid - 1] ; const float centerX = x0[iGrid]; const float rightX = (ixGrid == nxGrids - 1) ? 0.0f : x0[iGrid + 1]; const float topX = (iyGrid == nyGrids - 1) ? 0.0f : x0[iGrid + nxGrids]; const float bottomX = (iyGrid == 0) ? 0.0f : x0[iGrid - nxGrids]; if (iteration % 2 == 0) { x1[iGrid] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, rhs[iGrid], iGrid, method); } else { x1[iGrid] = iterativeOperation2(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, rhs[iGrid], iGrid, method); } } __syncthreads(); } float * iterativeGpuClassic(const float * initX, const float * rhs, const float * matrixElements, const int nxGrids, const int nyGrids, const int nIters, const int threadsPerBlock, const int method) { const int nDofs = nxGrids * nyGrids; // Allocate memory in the CPU for the solution float * x0Gpu, * x1Gpu; cudaMalloc(&x0Gpu, sizeof(float) * nDofs); cudaMalloc(&x1Gpu, sizeof(float) * nDofs); // Allocate CPU memory for other variables float * rhsGpu; cudaMalloc(&rhsGpu, sizeof(float) * nDofs); // Allocate GPU memory cudaMemcpy(x0Gpu, initX, sizeof(float) * nDofs, cudaMemcpyHostToDevice); cudaMemcpy(rhsGpu, rhs, sizeof(float) * nDofs, cudaMemcpyHostToDevice); // Run the classic iteration for prescribed number of iterations // int threadsPerBlock = 16; const int nxBlocks = (int)ceil(nxGrids / (float)threadsPerBlock); const int nyBlocks = (int)ceil(nyGrids / (float)threadsPerBlock); dim3 grid(nxBlocks, nyBlocks); dim3 block(threadsPerBlock, threadsPerBlock); const float bottomMatrix = matrixElements[0]; const float leftMatrix = matrixElements[1]; const float centerMatrix = matrixElements[2]; const float rightMatrix = matrixElements[3]; const float topMatrix = matrixElements[4]; for (int iIter = 0; iIter < nIters; ++iIter) { // Jacobi iteration on the CPU (used to be <<<nBlocks, threadsPerBlock>>>) _iterativeGpuClassicIteration<<<grid, block>>>( x1Gpu, x0Gpu, rhsGpu, leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids, iIter, method); float * tmp = x1Gpu; x0Gpu = x1Gpu; x1Gpu = tmp; } // Write solution from GPU to CPU variable float * solution = new float[nDofs]; cudaMemcpy(solution, x0Gpu, sizeof(float) * nDofs, cudaMemcpyDeviceToHost); // Free all memory cudaFree(x0Gpu); cudaFree(x1Gpu); cudaFree(rhsGpu); return solution; } //// SWEPT METHODS HERE //// __device__ void __iterativeBlockUpdateToLeftRight(float * xLeftBlock, float * xRightBlock, const float *rhsBlock, const float leftMatrix, const float centerMatrix, const float rightMatrix, const float topMatrix, const float bottomMatrix, int nxGrids, int nyGrids, int iGrid, int method, int subdomainLength, bool diagonal, int maxSteps) { // Initialize shared memory and pointers to x0, x1 arrays containing Jacobi solutions extern __shared__ float sharedMemory[]; float * x0 = sharedMemory; int elemPerBlock = subdomainLength * subdomainLength; float * x1 = sharedMemory + elemPerBlock; // Define number of Jacobi steps to take, and current index and stride value int index = threadIdx.x + threadIdx.y * blockDim.x; int stride = blockDim.x * blockDim.y; // Perform Jacobi iterations for (int k = 0; k < maxSteps; k++) { for (int idx = index; idx < elemPerBlock; idx += stride) { if ((idx % subdomainLength != 0) && ((idx+1) % subdomainLength != 0) && (idx > subdomainLength-1) && (idx < elemPerBlock-(subdomainLength-1))) { // Define necessary constants float centerRhs = rhsBlock[idx]; float leftX = x0[idx-1]; float centerX = x0[idx]; float rightX = x0[idx+1]; float topX = x0[idx+subdomainLength]; float bottomX = x0[idx-subdomainLength]; // Apply boundary conditions int step = idx / stride; int Idx = (stride % subdomainLength) + (stride/subdomainLength) * nxGrids; int IGrid = iGrid + step * Idx; if (diagonal == true) { int nDofs = nxGrids * nyGrids; if ((blockIdx.y == gridDim.y-1) && idx/subdomainLength >= subdomainLength/2) { IGrid = IGrid - nDofs; } if ((blockIdx.x == gridDim.x-1) && (idx % subdomainLength) >= (subdomainLength/2)) { IGrid = IGrid - nxGrids; } } boundaryConditions(IGrid, nxGrids, nyGrids, leftX, rightX, bottomX, topX); //__syncthreads(); // Perform update // x1[idx] = increment(centerX); // x1[idx] = jacobi(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, // leftX, centerX, rightX, topX, bottomX, centerRhs); if (k % 2 == 0) { x1[idx] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, centerRhs, iGrid, method); } else { x1[idx] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, centerRhs, iGrid, method); } // Synchronize //__syncthreads(); } } __syncthreads(); float * tmp; tmp = x0; x0 = x1, x1 = tmp; } //__syncthreads(); index = threadIdx.x + threadIdx.y * blockDim.x; stride = blockDim.x * blockDim.y; for (int idx = index; idx < elemPerBlock/2; idx += stride) { xLeftBlock[idx] = x0[subdomainLength * (idx % subdomainLength) + (idx/subdomainLength)]; xRightBlock[idx] = x0[subdomainLength * (idx % subdomainLength) - (idx/subdomainLength) + (subdomainLength-1)]; } } __device__ void __iterativeBlockUpdateToNorthSouth(float * xTopBlock, float * xBottomBlock, const float *rhsBlock, const float leftMatrix, const float centerMatrix, const float rightMatrix, const float topMatrix, const float bottomMatrix, int nxGrids, int nyGrids, int iGrid, int method, int subdomainLength, bool vertical, int maxSteps) { extern __shared__ float sharedMemory[]; float * x0 = sharedMemory; int elemPerBlock = subdomainLength * subdomainLength; float * x1 = sharedMemory + elemPerBlock; int index = threadIdx.x + threadIdx.y * blockDim.x; int stride = blockDim.x * blockDim.y; __syncthreads(); for (int k = 0; k < maxSteps; k++) { for (int idx = index; idx < elemPerBlock; idx += stride) { if ((idx % subdomainLength != 0) && ((idx+1) % subdomainLength != 0) && (idx > subdomainLength-1) && (idx < elemPerBlock-subdomainLength-1)) { // Define necessary constants float centerRhs = rhsBlock[idx]; float leftX = x0[idx-1]; float centerX = x0[idx]; float rightX = x0[idx+1]; float topX = x0[idx+subdomainLength]; float bottomX = x0[idx-subdomainLength]; int step = idx / stride; int Idx = (stride % subdomainLength) + (stride/subdomainLength) * nxGrids; int IGrid = iGrid + step * Idx; if (vertical == true) { int nDofs = nxGrids * nyGrids; if ((blockIdx.y == gridDim.y-1) && idx/subdomainLength >= subdomainLength/2) { IGrid = IGrid - nDofs; } } else { if ((blockIdx.x == gridDim.x-1) && (idx % subdomainLength) >= (subdomainLength/2)) { IGrid = IGrid - nxGrids; } } boundaryConditions(IGrid, nxGrids, nyGrids, leftX, rightX, bottomX, topX); //__syncthreads(); // Perform update //x1[idx] = increment(centerX); // x1[idx] = jacobi(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, // leftX, centerX, rightX, topX, bottomX, centerRhs); if (k % 2 == 0) { x1[idx] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, centerRhs, iGrid, method); } else { x1[idx] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, leftX, centerX, rightX, topX, bottomX, centerRhs, iGrid, method); } // Synchronize //__syncthreads(); } } __syncthreads(); float * tmp; tmp = x0; x0 = x1, x1 = tmp; } // Return values for xTop and xBottom here for (int idx = index; idx < elemPerBlock/2; idx += stride) { xBottomBlock[idx] = x0[idx]; xTopBlock[idx] = x0[subdomainLength * (subdomainLength-1-idx/subdomainLength) + (idx % subdomainLength)]; } } __global__ void _iterativeGpuOriginal(float * xLeftGpu, float *xRightGpu, const float * x0Gpu, const float *rhsGpu, const int nxGrids, const int nyGrids, const int method, const int subdomainLength, const int maxSteps) { const int xShift = subdomainLength * blockIdx.x; const int yShift = subdomainLength * blockIdx.y; const int blockShift = xShift + yShift * nxGrids; const float * x0Block = x0Gpu + blockShift; const float * rhsBlock = rhsGpu + blockShift; const float bottomMatrix = matrixElementsGpu[0]; const float leftMatrix = matrixElementsGpu[1]; const float centerMatrix = matrixElementsGpu[2]; const float rightMatrix = matrixElementsGpu[3]; const float topMatrix = matrixElementsGpu[4]; int numElementsPerBlock = subdomainLength * subdomainLength; int blockID = blockIdx.x + blockIdx.y * gridDim.x; int arrayShift = (numElementsPerBlock*blockID)/2; float * xLeftBlock = xLeftGpu + arrayShift; float * xRightBlock = xRightGpu + arrayShift; extern __shared__ float sharedMemory[]; int index = threadIdx.x + threadIdx.y * blockDim.x; int stride = blockDim.x * blockDim.y; for (int idx = index; idx < numElementsPerBlock; idx += stride) { int Idx = (idx % subdomainLength) + (idx/subdomainLength) * nxGrids; sharedMemory[idx] = x0Block[Idx]; sharedMemory[idx + numElementsPerBlock] = x0Block[Idx]; } int iGrid = blockShift + (index/subdomainLength) * nxGrids + index % subdomainLength; __iterativeBlockUpdateToLeftRight(xLeftBlock, xRightBlock, rhsBlock, leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids, iGrid, method, subdomainLength, false, maxSteps); } __global__ void _iterativeGpuHorizontalShift(const float * xLeftGpu, const float * xRightGpu, float * xTopGpu, float * xBottomGpu, const float * x0Gpu, const float * rhsGpu, const int nxGrids, const int nyGrids, const int method, const int subdomainLength, const int maxSteps) { int xShift = subdomainLength * blockIdx.x; int yShift = subdomainLength * blockIdx.y; int blockShift = xShift + yShift * nxGrids; int horizontalShift = subdomainLength/2; const float * rhsBlock = rhsGpu + blockShift; //+ horizontalShift; const float bottomMatrix = matrixElementsGpu[0]; const float leftMatrix = matrixElementsGpu[1]; const float centerMatrix = matrixElementsGpu[2]; const float rightMatrix = matrixElementsGpu[3]; const float topMatrix = matrixElementsGpu[4]; int numElementsPerBlock = (subdomainLength * subdomainLength)/2; int blockID = blockIdx.x + blockIdx.y * gridDim.x; int arrayShift = numElementsPerBlock*blockID; const float * xLeftBlock = xRightGpu + arrayShift; const float * xRightBlock = (blockIdx.x != gridDim.x-1) ? xLeftGpu + arrayShift + numElementsPerBlock : xLeftGpu + (numElementsPerBlock * blockIdx.y * gridDim.x); float * xBottomBlock = xBottomGpu + arrayShift; float * xTopBlock = xTopGpu + arrayShift; extern __shared__ float sharedMemory[]; int index = threadIdx.x + threadIdx.y * blockDim.x; int stride = blockDim.x * blockDim.y; for (int idx = index; idx < subdomainLength * subdomainLength; idx += stride) { if (idx % subdomainLength < subdomainLength/2) { int Idx = ((subdomainLength-1)/2-(idx % subdomainLength)) * subdomainLength + idx/subdomainLength; sharedMemory[idx] = xLeftBlock[Idx]; sharedMemory[idx + subdomainLength * subdomainLength] = xLeftBlock[Idx]; } else { int Idx = ((idx % subdomainLength) - (subdomainLength-1)/2 - 1) * subdomainLength + idx/subdomainLength; sharedMemory[idx] = xRightBlock[Idx]; sharedMemory[idx + subdomainLength * subdomainLength] = xRightBlock[Idx]; } } int iGrid = blockShift + (index/subdomainLength) * nxGrids + index % subdomainLength + horizontalShift; //__syncthreads(); __iterativeBlockUpdateToNorthSouth(xTopBlock, xBottomBlock, rhsBlock, leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids, iGrid, method, subdomainLength, false, maxSteps); } __global__ void _iterativeGpuVerticalandHorizontalShift(float * xLeftGpu, float * xRightGpu, const float * xTopGpu, const float * xBottomGpu, const float * x0Gpu, const float * rhsGpu, const int nxGrids, const int nyGrids, const int method, const int subdomainLength, const int maxSteps) { int xShift = subdomainLength * blockIdx.x; int yShift = subdomainLength * blockIdx.y; int blockShift = xShift + yShift * nxGrids; int horizontalShift = subdomainLength/2; int verticalShift = subdomainLength/2 * nxGrids; const float * rhsBlock = rhsGpu + blockShift; //+ verticalShift; const float bottomMatrix = matrixElementsGpu[0]; const float leftMatrix = matrixElementsGpu[1]; const float centerMatrix = matrixElementsGpu[2]; const float rightMatrix = matrixElementsGpu[3]; const float topMatrix = matrixElementsGpu[4]; int numElementsPerBlock = (subdomainLength * subdomainLength)/2; int blockID = blockIdx.x + blockIdx.y * gridDim.x; int arrayShift = numElementsPerBlock*blockID; const float * xBottomBlock = xTopGpu + arrayShift; const float * xTopBlock = (blockIdx.y != gridDim.y-1) ? xBottomGpu + numElementsPerBlock * gridDim.x + arrayShift : xBottomGpu + (numElementsPerBlock * blockIdx.x); float * xLeftBlock = xLeftGpu + arrayShift; float * xRightBlock = xRightGpu + arrayShift; extern __shared__ float sharedMemory[]; int index = threadIdx.x + threadIdx.y * blockDim.x; int stride = blockDim.x * blockDim.y; for (int idx = index; idx < subdomainLength * subdomainLength; idx += stride) { if (idx < numElementsPerBlock) { sharedMemory[idx] = xBottomBlock[(subdomainLength/2-1-idx/subdomainLength) * subdomainLength + idx % subdomainLength]; sharedMemory[idx + subdomainLength * subdomainLength] = xBottomBlock[(subdomainLength/2-1-idx/subdomainLength) * subdomainLength + idx % subdomainLength]; } else { sharedMemory[idx] = xTopBlock[idx - numElementsPerBlock]; sharedMemory[idx + subdomainLength * subdomainLength] = xTopBlock[idx - numElementsPerBlock]; } } int iGrid = blockShift + (index/subdomainLength) * nxGrids + index % subdomainLength + horizontalShift + verticalShift; __iterativeBlockUpdateToLeftRight(xLeftBlock, xRightBlock, rhsBlock, leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids, iGrid, method, subdomainLength, true, maxSteps); } __global__ void _iterativeGpuVerticalShift(const float * xLeftGpu, const float * xRightGpu, float * xTopGpu, float * xBottomGpu, const float * x0Gpu, const float * rhsGpu, const int nxGrids, const int nyGrids, const int method, const int subdomainLength, int maxSteps) { int xShift = subdomainLength * blockIdx.x; int yShift = subdomainLength * blockIdx.y; int blockShift = xShift + yShift * nxGrids; int verticalShift = subdomainLength/2 * nxGrids; const float * rhsBlock = rhsGpu + blockShift; //+ verticalShift; const float bottomMatrix = matrixElementsGpu[0]; const float leftMatrix = matrixElementsGpu[1]; const float centerMatrix = matrixElementsGpu[2]; const float rightMatrix = matrixElementsGpu[3]; const float topMatrix = matrixElementsGpu[4]; int numElementsPerBlock = (subdomainLength * subdomainLength)/2; int blockID = blockIdx.x + blockIdx.y * gridDim.x; int arrayShift = numElementsPerBlock*blockID; const float * xRightBlock = xLeftGpu + arrayShift; const float * xLeftBlock = (blockIdx.x != 0) ? xRightGpu + arrayShift - numElementsPerBlock : xRightGpu + numElementsPerBlock * ((gridDim.x-1) + blockIdx.y * gridDim.x); float * xBottomBlock = xBottomGpu + arrayShift; float * xTopBlock = xTopGpu + arrayShift; extern __shared__ float sharedMemory[]; int index = threadIdx.x + threadIdx.y * blockDim.x; int stride = blockDim.x * blockDim.y; for (int idx = index; idx < subdomainLength * subdomainLength; idx += stride) { if (idx % subdomainLength < subdomainLength/2) { int Idx = ((subdomainLength-1)/2-(idx % subdomainLength)) * subdomainLength + idx/subdomainLength; sharedMemory[idx] = xLeftBlock[Idx]; sharedMemory[idx + subdomainLength * subdomainLength] = xLeftBlock[Idx]; } else { int Idx = ((idx % subdomainLength) - (subdomainLength-1)/2 - 1) * subdomainLength + idx/subdomainLength; sharedMemory[idx] = xRightBlock[Idx]; sharedMemory[idx + subdomainLength * subdomainLength] = xRightBlock[Idx]; } } int iGrid = blockShift + (index/subdomainLength) * nxGrids + index % subdomainLength + verticalShift; __iterativeBlockUpdateToNorthSouth(xTopBlock, xBottomBlock, rhsBlock, leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids, iGrid, method, subdomainLength, true, maxSteps); } __global__ void _finalSolution(const float * xTopGpu, const float * xBottomGpu, float * x0Gpu, const int nxGrids, const int subdomainLength) { extern __shared__ float sharedMemory[]; int numElementsPerBlock = (subdomainLength * subdomainLength)/2; int blockID = blockIdx.x + blockIdx.y * gridDim.x; int arrayShift = numElementsPerBlock*blockID; const float * xTopBlock = xBottomGpu + arrayShift; const float * xBottomBlock = (blockIdx.y != 0) ? xTopGpu + (blockIdx.x + (blockIdx.y-1) * gridDim.x) * numElementsPerBlock : xTopGpu + (gridDim.x * (gridDim.y-1) + blockIdx.x) * numElementsPerBlock; int xShift = subdomainLength * blockIdx.x; int yShift = subdomainLength * blockIdx.y; int blockShift = xShift + yShift * nxGrids; float * x0Block = x0Gpu + blockShift; int index = threadIdx.x + threadIdx.y * blockDim.x; int stride = blockDim.x * blockDim.y; for (int idx = index; idx < numElementsPerBlock; idx += stride) { sharedMemory[idx + numElementsPerBlock] = xTopBlock[idx]; sharedMemory[(subdomainLength/2 - 1 - idx/subdomainLength) * subdomainLength + idx % subdomainLength] = xBottomBlock[idx]; } __syncthreads(); for (int idx = index; idx < 2*numElementsPerBlock; idx += stride) { int Idx = (idx % subdomainLength) + (idx/subdomainLength) * nxGrids; x0Block[Idx] = sharedMemory[idx]; } } /////////////////////////////////////////////////// float * iterativeGpuSwept(const float * initX, const float * rhs, const float * matrixElements, const int nxGrids, const int nyGrids, const int nCycles, const int maxSteps, const int threadsPerBlock, const int method, const int subdomainLength) { // Determine number of threads and blocks const int nxBlocks = (int)ceil(nxGrids / (float)subdomainLength); const int nyBlocks = (int)ceil(nyGrids / (float)subdomainLength); const int nDofs = nxGrids * nyGrids; dim3 grid(nxBlocks, nyBlocks); dim3 block(threadsPerBlock, threadsPerBlock); // Allocate memory for solution and inputs float *xLeftGpu, *xRightGpu, *xTopGpu, *xBottomGpu; const int numSharedElemPerBlock = subdomainLength * subdomainLength / 2; cudaMalloc(&xLeftGpu, sizeof(float) * numSharedElemPerBlock * nxBlocks * nyBlocks); cudaMalloc(&xRightGpu, sizeof(float) * numSharedElemPerBlock * nxBlocks * nyBlocks); cudaMalloc(&xTopGpu, sizeof(float) * numSharedElemPerBlock * nxBlocks * nyBlocks); cudaMalloc(&xBottomGpu, sizeof(float) * numSharedElemPerBlock * nxBlocks * nyBlocks); float * x0Gpu, * rhsGpu; cudaMalloc(&x0Gpu, sizeof(float) * nDofs); cudaMalloc(&rhsGpu, sizeof(float) * nDofs); /* STORING MATRIX IN GLOBAL MEMORY float * matrixElementsGpu; cudaMalloc(&matrixElementsGpu, sizeof(float) * 5); cudaMemcpy(matrixElementsGpu, matrixElements, sizeof(float) * 5, cudaMemcpyHostToDevice); */ // STORING MATRIX IN CONSTANT MEMORY cudaMemcpyToSymbol(matrixElementsGpu, matrixElements, sizeof(float) * 5); // Allocate memory in the GPU cudaMemcpy(x0Gpu, initX, sizeof(float) * nDofs, cudaMemcpyHostToDevice); cudaMemcpy(rhsGpu, rhs, sizeof(float) * nDofs, cudaMemcpyHostToDevice); const int sharedBytes = 2 * subdomainLength * subdomainLength * sizeof(float); for (int i = 0; i < nCycles; i++) { // APPLY METHOD TO ADVANCE POINTS (NO SHIFT) _iterativeGpuOriginal <<<grid, block, sharedBytes>>> (xLeftGpu, xRightGpu, x0Gpu, rhsGpu, nxGrids, nyGrids, method, subdomainLength, maxSteps); // APPLY HORIZONTAL SHIFT _iterativeGpuHorizontalShift <<<grid, block, sharedBytes>>> (xLeftGpu, xRightGpu, xTopGpu, xBottomGpu, x0Gpu, rhsGpu, nxGrids, nyGrids, method, subdomainLength, maxSteps); // APPLY VERTICAL SHIFT (ALONG WITH PREVIOUS HORIZONTAL SHIFT) _iterativeGpuVerticalandHorizontalShift <<<grid, block, sharedBytes>>> (xLeftGpu, xRightGpu, xTopGpu, xBottomGpu, x0Gpu, rhsGpu, nxGrids, nyGrids, method, subdomainLength, maxSteps); // APPLY VERTICAL SHIFT _iterativeGpuVerticalShift <<<grid, block, sharedBytes>>> (xLeftGpu, xRightGpu, xTopGpu, xBottomGpu, x0Gpu, rhsGpu, nxGrids, nyGrids, method, subdomainLength, maxSteps); // APPLY FINAL STEP _finalSolution <<<grid, block, sharedBytes>>> (xTopGpu, xBottomGpu, x0Gpu, nxGrids, subdomainLength); } float * solution = new float[nDofs]; cudaMemcpy(solution, x0Gpu, sizeof(float) * nDofs, cudaMemcpyDeviceToHost); cudaFree(x0Gpu); cudaFree(xLeftGpu); cudaFree(xRightGpu); cudaFree(xTopGpu); cudaFree(xBottomGpu); cudaFree(rhsGpu); // cudaFree(matrixElementsGpu); return solution; } int main(int argc, char *argv[]) { // Ask user for inputs const int nxGrids = atoi(argv[1]); const int nyGrids = atoi(argv[1]); const int subdomainLength = atoi(argv[2]); const int threadsPerBlock = atoi(argv[3]); const int nCycles = atoi(argv[4]); const int maxSteps = atoi(argv[5]); const int nIters = atoi(argv[6]); method_type method = JACOBI; const int nDofs = nxGrids * nyGrids; // Declare arrays and population with values for Poisson equation float * initX = new float[nDofs]; float * rhs = new float[nDofs]; const float dx = 1.0f / (nxGrids + 1); const float dy = 1.0f / (nyGrids + 1); for (int iGrid = 0; iGrid < nDofs; ++iGrid) { initX[iGrid] = (float)iGrid; rhs[iGrid] = 1.0f; } float * matrixElements = new float[5]; matrixElements[0] = -1.0f / (dy * dy); matrixElements[1] = -1.0f / (dx * dx); matrixElements[2] = 2.0f / (dx * dx) + 2.0f / (dy * dy); matrixElements[3] = -1.0f / (dx * dx); matrixElements[4] = -1.0f / (dy * dy); // Amount of shared memory to be requested const int sharedMem = 2 * subdomainLength * subdomainLength * sizeof(float); // Run the CPU Implementation and measure the time required clock_t cpuStartTime = clock(); float * solutionCpu = iterativeCpu(initX, rhs, matrixElements, nxGrids, nyGrids, nIters, method); clock_t cpuEndTime = clock(); float cpuTime = (cpuEndTime - cpuStartTime) / (float) CLOCKS_PER_SEC; // Run the Classic GPU Implementation and measure the time required cudaEvent_t startClassic, stopClassic; float timeClassic; cudaEventCreate( &startClassic ); cudaEventCreate( &stopClassic ); cudaEventRecord(startClassic, 0); float * solutionGpuClassic = iterativeGpuClassic(initX, rhs, matrixElements, nxGrids, nyGrids, nIters, threadsPerBlock, method); cudaEventRecord(stopClassic, 0); cudaEventSynchronize(stopClassic); cudaEventElapsedTime(&timeClassic, startClassic, stopClassic); // Run the Swept GPU Implementation and measure the time required cudaEvent_t startSwept, stopSwept; float timeSwept; cudaEventCreate( &startSwept ); cudaEventCreate( &stopSwept ); cudaEventRecord( startSwept, 0); float * solutionGpuSwept = iterativeGpuSwept(initX, rhs, matrixElements, nxGrids, nyGrids, nCycles, maxSteps, threadsPerBlock, method, subdomainLength); cudaEventRecord(stopSwept, 0); cudaEventSynchronize(stopSwept); cudaEventElapsedTime(&timeSwept, startSwept, stopSwept); // Print parameters of the problem to screen printf("===============INFORMATION============================\n"); printf("Size of the linear system (Nx, Ny): %d (%d, %d) \n", nDofs, nxGrids, nyGrids); printf("Size of each subdomain handled by a block and threads per block: %d and %d\n", subdomainLength, threadsPerBlock); printf("Method used: %d\n", method); printf("Number of Iterations performed: %d\n", nIters); printf("Number of Swept Cycles performed: %d\n", nCycles); printf("Number of Iterations performed internally at each Swept Stage: %d\n", maxSteps); printf("Amount of shared memory to be requested: %d B\n", sharedMem); // Print out results to the screen, notify if any GPU Classic or Swept values differ significantly for (int iGrid = 0; iGrid < nDofs; ++iGrid) { printf("%d %f %f %f \n",iGrid, solutionCpu[iGrid], solutionGpuClassic[iGrid], solutionGpuSwept[iGrid]); //assert(solutionGpuClassic[iGrid] == solutionGpuSwept[iGrid]); // if (abs(solutionGpuClassic[iGrid] - solutionGpuSwept[iGrid]) > 1e-2) { // printf("For grid point %d, Classic and Swept give %f and %f respectively\n", iGrid, solutionGpuClassic[iGrid], solutionGpuSwept[iGrid]); // } } // Print out time for cpu, classic gpu, and swept gpu approaches // float cpuTimePerIteration = (cpuTime / nIters) * 1e3; // float classicTimePerIteration = timeClassic / nIters; // float sweptTimePerIteration = timeSwept / nIters; // float timeMultiplier = classicTimePerIteration / sweptTimePerIteration; /* printf("Time needed for the CPU (per iteration): %f ms\n", cpuTimePerIteration); printf("Time needed for the Classic GPU (per iteration) is %f ms\n", classicTimePerIteration); printf("Time needed for the Swept GPU (per iteration): %f ms\n", sweptTimePerIteration); */ printf("TIMING INFORMATION:\n"); printf("Total Time needed for the CPU: %f ms\n", cpuTime * 1e3); printf("Total Time needed for the Classic GPU is %f ms\n", timeClassic); printf("Total Time needed for the Swept GPU: %f ms\n", timeSwept); printf("Swept takes %f the time Classic takes\n", timeSwept / timeClassic); // Compute the residual of the resulting solution (||b-Ax||) float residualCPU = Residual(solutionGpuClassic, rhs, matrixElements, nxGrids, nyGrids); float residualClassicGPU = Residual(solutionGpuClassic, rhs, matrixElements, nxGrids, nyGrids); float residualSweptGPU = Residual(solutionGpuSwept, rhs, matrixElements, nxGrids, nyGrids); printf("Swept takes %f the time Classic takes\n", timeSwept / timeClassic); printf("RESIDUAL INFORMATION:\n"); printf("Residual of the CPU solution is %f\n", residualCPU); printf("Residual of the Classic GPU solution is %f\n", residualClassicGPU); printf("Residual of the Swept GPU solution is %f\n", residualSweptGPU); printf("The residual of Swept is %f times that of Classic\n", residualSweptGPU / residualClassicGPU); // Save residual to a file /* std::ofstream residuals; residuals.open("residual-gs.txt",std::ios_base::app); residuals << nGrids << "\t" << threadsPerBlock << "\t" << nIters << "\t" << residualSwept << "\n"; residuals.close(); */ // Save Results to a file "N tpb Iterations CPUTime/perstep ClassicTime/perstep SweptTime/perStep ClassicTime/SweptTime" std::ofstream timings; timings.open("time.txt",std::ios_base::app); // timings << nxGrids << "\t" << nyGrids << "\t" << threadsPerBlock << "\t" << nIters << "\t" << cpuTimePerIteration << "\t" << classicTimePerIteration << "\t" << sweptTimePerIteration << "\t" << timeMultiplier << "\n"; timings.close(); // Free memory cudaEventDestroy(startClassic); cudaEventDestroy(startSwept); delete[] initX; delete[] rhs; delete[] matrixElements; delete[] solutionCpu; delete[] solutionGpuClassic; delete[] solutionGpuSwept; }
296cc86bd0b8c95d8f81b734b4f0b1ea0a9f4aa6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/crop_layer.hpp" namespace caffe { // Copy (one line per thread) from one array to another, with arbitrary // strides in the last two dimensions. template <typename Dtype> __global__ void copy_kernel(const int n, const int height, const int width, const int src_outer_stride, const int src_inner_stride, const int dest_outer_stride, const int dest_inner_stride, const Dtype* src, Dtype* dest) { CUDA_KERNEL_LOOP(index, n) { int src_start = index / height * src_outer_stride + index % height * src_inner_stride; int dest_start = index / height * dest_outer_stride + index % height * dest_inner_stride; for (int i = 0; i < width; ++i) { dest[dest_start + i] = src[src_start + i]; } } } template <typename Dtype> void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int lines = top[0]->count() / top[0]->width(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( copy_kernel), dim3(CAFFE_GET_BLOCKS(lines)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, lines, top[0]->height(), top[0]->width(), bottom[0]->height() * bottom[0]->width(), bottom[0]->width(), top[0]->height() * top[0]->width(), top[0]->width(), bottom_data + bottom[0]->offset(0, 0, crop_h_, crop_w_), top_data); } template <typename Dtype> void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int lines = top[0]->count() / top[0]->width(); if (propagate_down[0]) { caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( copy_kernel), dim3(CAFFE_GET_BLOCKS(lines)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, lines, top[0]->height(), top[0]->width(), top[0]->height() * top[0]->width(), top[0]->width(), bottom[0]->height() * bottom[0]->width(), bottom[0]->width(), top_diff, bottom_diff + bottom[0]->offset(0, 0, crop_h_, crop_w_)); } } INSTANTIATE_LAYER_GPU_FUNCS(CropLayer); } // namespace caffe
296cc86bd0b8c95d8f81b734b4f0b1ea0a9f4aa6.cu
#include <vector> #include "caffe/layers/crop_layer.hpp" namespace caffe { // Copy (one line per thread) from one array to another, with arbitrary // strides in the last two dimensions. template <typename Dtype> __global__ void copy_kernel(const int n, const int height, const int width, const int src_outer_stride, const int src_inner_stride, const int dest_outer_stride, const int dest_inner_stride, const Dtype* src, Dtype* dest) { CUDA_KERNEL_LOOP(index, n) { int src_start = index / height * src_outer_stride + index % height * src_inner_stride; int dest_start = index / height * dest_outer_stride + index % height * dest_inner_stride; for (int i = 0; i < width; ++i) { dest[dest_start + i] = src[src_start + i]; } } } template <typename Dtype> void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int lines = top[0]->count() / top[0]->width(); // NOLINT_NEXT_LINE(whitespace/operators) copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>( lines, top[0]->height(), top[0]->width(), bottom[0]->height() * bottom[0]->width(), bottom[0]->width(), top[0]->height() * top[0]->width(), top[0]->width(), bottom_data + bottom[0]->offset(0, 0, crop_h_, crop_w_), top_data); } template <typename Dtype> void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int lines = top[0]->count() / top[0]->width(); if (propagate_down[0]) { caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff); // NOLINT_NEXT_LINE(whitespace/operators) copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>( lines, top[0]->height(), top[0]->width(), top[0]->height() * top[0]->width(), top[0]->width(), bottom[0]->height() * bottom[0]->width(), bottom[0]->width(), top_diff, bottom_diff + bottom[0]->offset(0, 0, crop_h_, crop_w_)); } } INSTANTIATE_LAYER_GPU_FUNCS(CropLayer); } // namespace caffe
6855a4ac21eda59ec272ab9b0da70f069ea5c23f.hip
// !!! This is a file automatically generated by hipify!!! #include <device_launch_parameters.h> #include <iostream> #include <cstdio> #include <cstdlib> #include <hip/hip_runtime.h> struct vertice { int d; int first; int indeg; int outdeg; int lastchangeit; }; struct edge { int s; int head; int tail; int next; }; int maxlength = 11000000; __global__ void findedge(struct vertice* v, struct edge* e, int* qv, int* qe,int* numv,int* nume) { int index = threadIdx.x+1+blockIdx.x*blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index;i <= (*numv);i = i + stride) { int u = qv[i]; for (int j = v[u].first;j != 0;j = e[j].next) { if (v[e[j].tail].indeg == 1) { v[e[j].tail].d = min(v[e[j].tail].d, v[e[j].head].d + e[j].s); } if (v[e[j].tail].outdeg == 0) { continue; } atomicExch(&(qe[atomicAdd(nume, 1) + 1]), j); } } } __global__ void release(struct vertice* v, struct edge* e, int* qv, int* qe, int* numv,int* nume,int *it) { int index = threadIdx.x + 1 + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index;i <= (*nume);i = i + stride) { //printf("%d ", i); int ee = qe[i]; //printf("%d ", ee); if (v[e[ee].tail].d > v[e[ee].head].d + e[ee].s) { //atomicExch(&(qv[atomicAdd(numv, 1) + 1]), e[ee].tail); atomicMin(&(v[e[ee].tail].d), v[e[ee].head].d + e[ee].s); v[e[ee].tail].lastchangeit = *it; /*if (*it != v[e[ee].tail].lastchangeit) { (*numv)++; qv[*numv] = e[ee].tail; } */ } } } __global__ void findvertice(struct vertice* v, struct edge* e, int* qv, int* qe, int* numv, int* nume, int* it,int n) { int index = threadIdx.x + 1 + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index;i <= n;i = i + stride) { if (v[i].lastchangeit == *it) { atomicExch(&(qv[atomicAdd(numv, 1) + 1]), i); } } } int main() { int n, m; freopen("hbf.txt", "r", stdin); scanf("%d %d", &n, &m); struct vertice* v; struct edge* e; int* qv; int* qe; int* numv; int* nume; int* it; hipMallocManaged((void**)&v, (n + 1) * sizeof(struct vertice)); hipMallocManaged((void**)&e, (m + 1) * sizeof(struct edge)); hipMallocManaged((void**)&qv, (n+1) * sizeof(int)); hipMallocManaged((void**)&qe, (m+1) * sizeof(int)); hipMallocManaged((void**)&numv, sizeof(int)); hipMallocManaged((void**)&nume, sizeof(int)); hipMallocManaged((void**)&it, sizeof(int)); for (int i = 1;i <= n;i++) { v[i].d = maxlength; v[i].first = 0; v[i].indeg = 0; v[i].outdeg = 0; v[i].lastchangeit = 0; } v[1].d = 0; for (int i = 1;i <= m;i++) { e[i].head = 0; e[i].tail = 0; e[i].next = 0; e[i].s = 0; } *numv = 0; *nume = 0; //qv[1] = 1; for (int i = 1;i <= m;i++) { int p, q, l; scanf("%d %d %d", &p, &q, &l); if (p != q) { v[q].indeg++; v[p].outdeg++; if (p == 1) { v[q].d= min(v[q].d, l); (*numv)++; //printf("%d ", p); //printf("%d ", *numv); //printf("%d ", qv[*numv]); //printf("%d -> ", q); qv[*numv] = q; //printf("%d %d %d\n", *numv, qv[*numv], q); } e[i].next = v[p].first; v[p].first = i; e[i].head = p; e[i].tail = q; e[i].s = l; } else { i--; m--; } } freopen("CON", "r", stdin); for (*it = 1;(*it) <= n-2;(*it)++) { dim3 blockSize(256); dim3 gridSize1(((*numv) + blockSize.x - 1) / blockSize.x); findedge << <gridSize1, blockSize >> > (v, e, qv, qe,numv,nume); hipDeviceSynchronize(); /*printf("\ne: "); for (int j = 1;j <= (*nume);j++) { printf("%d %d %d %d\n", qe[j],e[qe[j]].head, e[qe[j]].tail, e[qe[j]].s); } printf("v: ");*/ *numv = 0; dim3 gridSize2(((*nume) + blockSize.x - 1) / blockSize.x); release << <gridSize2, blockSize >> > (v, e, qv, qe, numv,nume,it); hipDeviceSynchronize(); *nume = 0; /*for (int j = 1;j <= (*numv);j++) { printf("%d ", qv[j]); }*/ dim3 gridSize3((n + blockSize.x - 1)/blockSize.x); findvertice << <gridSize3, blockSize >> > (v, e, qv, qe, numv, nume, it,n); hipDeviceSynchronize(); /*printf("\nd: "); for (int j = 1;j <= n;j++) { printf("%d ", v[j].d); }*/ if (*numv == 0) break; //printf("\n"); } for (int i = 1;i <= m;i++) { if (v[e[i].tail].outdeg == 0) { v[e[i].tail].d = min(v[e[i].tail].d, v[e[i].head].d + e[i].s); } } freopen("E:\\\\\\bf_no_cuda\\bf_no_cuda\\hbf_result.txt", "w", stdout); for (int j = 1;j <= n;j++) { printf("%d ", v[j].d); } hipFree(v); hipFree(e); hipFree(qv); hipFree(qe); hipFree(numv); hipFree(nume); fclose(stdin); fclose(stdout); }
6855a4ac21eda59ec272ab9b0da70f069ea5c23f.cu
#include <device_launch_parameters.h> #include <iostream> #include <cstdio> #include <cstdlib> #include <cuda_runtime.h> struct vertice { int d; int first; int indeg; int outdeg; int lastchangeit; }; struct edge { int s; int head; int tail; int next; }; int maxlength = 11000000; __global__ void findedge(struct vertice* v, struct edge* e, int* qv, int* qe,int* numv,int* nume) { int index = threadIdx.x+1+blockIdx.x*blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index;i <= (*numv);i = i + stride) { int u = qv[i]; for (int j = v[u].first;j != 0;j = e[j].next) { if (v[e[j].tail].indeg == 1) { v[e[j].tail].d = min(v[e[j].tail].d, v[e[j].head].d + e[j].s); } if (v[e[j].tail].outdeg == 0) { continue; } atomicExch(&(qe[atomicAdd(nume, 1) + 1]), j); } } } __global__ void release(struct vertice* v, struct edge* e, int* qv, int* qe, int* numv,int* nume,int *it) { int index = threadIdx.x + 1 + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index;i <= (*nume);i = i + stride) { //printf("%d ", i); int ee = qe[i]; //printf("%d ", ee); if (v[e[ee].tail].d > v[e[ee].head].d + e[ee].s) { //atomicExch(&(qv[atomicAdd(numv, 1) + 1]), e[ee].tail); atomicMin(&(v[e[ee].tail].d), v[e[ee].head].d + e[ee].s); v[e[ee].tail].lastchangeit = *it; /*if (*it != v[e[ee].tail].lastchangeit) { (*numv)++; qv[*numv] = e[ee].tail; } */ } } } __global__ void findvertice(struct vertice* v, struct edge* e, int* qv, int* qe, int* numv, int* nume, int* it,int n) { int index = threadIdx.x + 1 + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index;i <= n;i = i + stride) { if (v[i].lastchangeit == *it) { atomicExch(&(qv[atomicAdd(numv, 1) + 1]), i); } } } int main() { int n, m; freopen("hbf.txt", "r", stdin); scanf("%d %d", &n, &m); struct vertice* v; struct edge* e; int* qv; int* qe; int* numv; int* nume; int* it; cudaMallocManaged((void**)&v, (n + 1) * sizeof(struct vertice)); cudaMallocManaged((void**)&e, (m + 1) * sizeof(struct edge)); cudaMallocManaged((void**)&qv, (n+1) * sizeof(int)); cudaMallocManaged((void**)&qe, (m+1) * sizeof(int)); cudaMallocManaged((void**)&numv, sizeof(int)); cudaMallocManaged((void**)&nume, sizeof(int)); cudaMallocManaged((void**)&it, sizeof(int)); for (int i = 1;i <= n;i++) { v[i].d = maxlength; v[i].first = 0; v[i].indeg = 0; v[i].outdeg = 0; v[i].lastchangeit = 0; } v[1].d = 0; for (int i = 1;i <= m;i++) { e[i].head = 0; e[i].tail = 0; e[i].next = 0; e[i].s = 0; } *numv = 0; *nume = 0; //qv[1] = 1; for (int i = 1;i <= m;i++) { int p, q, l; scanf("%d %d %d", &p, &q, &l); if (p != q) { v[q].indeg++; v[p].outdeg++; if (p == 1) { v[q].d= min(v[q].d, l); (*numv)++; //printf("%d ", p); //printf("%d ", *numv); //printf("%d ", qv[*numv]); //printf("%d -> ", q); qv[*numv] = q; //printf("%d %d %d\n", *numv, qv[*numv], q); } e[i].next = v[p].first; v[p].first = i; e[i].head = p; e[i].tail = q; e[i].s = l; } else { i--; m--; } } freopen("CON", "r", stdin); for (*it = 1;(*it) <= n-2;(*it)++) { dim3 blockSize(256); dim3 gridSize1(((*numv) + blockSize.x - 1) / blockSize.x); findedge << <gridSize1, blockSize >> > (v, e, qv, qe,numv,nume); cudaDeviceSynchronize(); /*printf("\ne: "); for (int j = 1;j <= (*nume);j++) { printf("%d %d %d %d\n", qe[j],e[qe[j]].head, e[qe[j]].tail, e[qe[j]].s); } printf("v: ");*/ *numv = 0; dim3 gridSize2(((*nume) + blockSize.x - 1) / blockSize.x); release << <gridSize2, blockSize >> > (v, e, qv, qe, numv,nume,it); cudaDeviceSynchronize(); *nume = 0; /*for (int j = 1;j <= (*numv);j++) { printf("%d ", qv[j]); }*/ dim3 gridSize3((n + blockSize.x - 1)/blockSize.x); findvertice << <gridSize3, blockSize >> > (v, e, qv, qe, numv, nume, it,n); cudaDeviceSynchronize(); /*printf("\nd: "); for (int j = 1;j <= n;j++) { printf("%d ", v[j].d); }*/ if (*numv == 0) break; //printf("\n"); } for (int i = 1;i <= m;i++) { if (v[e[i].tail].outdeg == 0) { v[e[i].tail].d = min(v[e[i].tail].d, v[e[i].head].d + e[i].s); } } freopen("E:\\大三下\\实验室\\bf_no_cuda\\bf_no_cuda\\hbf_result.txt", "w", stdout); for (int j = 1;j <= n;j++) { printf("%d ", v[j].d); } cudaFree(v); cudaFree(e); cudaFree(qv); cudaFree(qe); cudaFree(numv); cudaFree(nume); fclose(stdin); fclose(stdout); }
dc5d74f8dcbaf9476665395068f0dd8a83461d7b.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <stdio.h> #include <iostream> #include "cudaHeader.h" __global__ void calculateHistByDevice(int* input, int input_size, int* histogram) { //CREATE HIST FOR EACH BLOCK __shared__ int private_hist[SIZE]; //CREATE INDEX FOR EACH THREAD IN EACH BLOCK int index = threadIdx.x + blockIdx.x * blockDim.x ; //INITIATE THE RESULTS private_hist[index] = 0; //COMPUTE HIST FOR EACH BLOCK if(index < input_size) atomicAdd(&private_hist[input[index]], 1); //MERGE ALL PRIVATE HISTS INTO OUTPUT __syncthreads(); histogram[index] = private_hist[index]; } void calculateHistByCuda(int* input, int size_of_input, int* result) { int num_blocks = size_of_input / NUM_THREADS_PER_BLOCK; if(size_of_input % NUM_THREADS_PER_BLOCK != 0) { num_blocks++; } //ALLOCATE DATA TO CUDA MEMORY int* cuda_input, *cuda_hist; int size_for_cuda_input = sizeof(int) * size_of_input; int size_for_cuda_res = sizeof(int) * SIZE; hipMalloc((void**)&cuda_input, size_for_cuda_input); hipMalloc((void**)&cuda_hist, size_for_cuda_res); //COPY INPUT INTO DEVICE hipMemcpy(cuda_input, input, size_for_cuda_input, hipMemcpyHostToDevice); hipMemcpy(cuda_hist, result, size_for_cuda_res, hipMemcpyHostToDevice); //LUNCH KERNEL hipLaunchKernelGGL(( calculateHistByDevice), dim3(num_blocks), dim3(NUM_THREADS_PER_BLOCK), 0, 0, cuda_input, size_of_input, cuda_hist); //COPY RESULT BACK TO HOST hipMemcpy(result, cuda_hist, size_for_cuda_res, hipMemcpyDeviceToHost); //FREE hipFree(cuda_input); hipFree(cuda_hist); }
dc5d74f8dcbaf9476665395068f0dd8a83461d7b.cu
#include <cuda_runtime.h> #include <helper_cuda.h> #include <stdio.h> #include <iostream> #include "cudaHeader.h" __global__ void calculateHistByDevice(int* input, int input_size, int* histogram) { //CREATE HIST FOR EACH BLOCK __shared__ int private_hist[SIZE]; //CREATE INDEX FOR EACH THREAD IN EACH BLOCK int index = threadIdx.x + blockIdx.x * blockDim.x ; //INITIATE THE RESULTS private_hist[index] = 0; //COMPUTE HIST FOR EACH BLOCK if(index < input_size) atomicAdd(&private_hist[input[index]], 1); //MERGE ALL PRIVATE HISTS INTO OUTPUT __syncthreads(); histogram[index] = private_hist[index]; } void calculateHistByCuda(int* input, int size_of_input, int* result) { int num_blocks = size_of_input / NUM_THREADS_PER_BLOCK; if(size_of_input % NUM_THREADS_PER_BLOCK != 0) { num_blocks++; } //ALLOCATE DATA TO CUDA MEMORY int* cuda_input, *cuda_hist; int size_for_cuda_input = sizeof(int) * size_of_input; int size_for_cuda_res = sizeof(int) * SIZE; cudaMalloc((void**)&cuda_input, size_for_cuda_input); cudaMalloc((void**)&cuda_hist, size_for_cuda_res); //COPY INPUT INTO DEVICE cudaMemcpy(cuda_input, input, size_for_cuda_input, cudaMemcpyHostToDevice); cudaMemcpy(cuda_hist, result, size_for_cuda_res, cudaMemcpyHostToDevice); //LUNCH KERNEL calculateHistByDevice<<<num_blocks, NUM_THREADS_PER_BLOCK>>>(cuda_input, size_of_input, cuda_hist); //COPY RESULT BACK TO HOST cudaMemcpy(result, cuda_hist, size_for_cuda_res, cudaMemcpyDeviceToHost); //FREE cudaFree(cuda_input); cudaFree(cuda_hist); }
3b8da1f3316881e880de28ec763867b99a827056.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <chrono> #include <hip/hip_runtime.h> __global__ void k0 (const float *__restrict__ a, float *__restrict__ o) { int t = blockIdx.x * blockDim.x + threadIdx.x; float x = a[t]; o[t] = coshf(x)/sinhf(x) - 1.f/x; } __global__ void k1 (const float *__restrict__ a, float *__restrict__ o) { int t = blockIdx.x * blockDim.x + threadIdx.x; float x = a[t]; o[t] = 1.f / tanhf(x) - 1.f/x; } /* Copyright (c) 2018-2021, Norbert Juffa All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ __global__ void k2 (const float *__restrict__ a, float *__restrict__ o) { int t = blockIdx.x * blockDim.x + threadIdx.x; float x = a[t]; float s, r; s = x * x; r = 7.70960469e-8f; r = fmaf (r, s, -1.65101926e-6f); r = fmaf (r, s, 2.03457112e-5f); r = fmaf (r, s, -2.10521728e-4f); r = fmaf (r, s, 2.11580913e-3f); r = fmaf (r, s, -2.22220998e-2f); r = fmaf (r, s, 8.33333284e-2f); r = fmaf (r, x, 0.25f * x); o[t] = r; } int main(int argc, char* argv[]) { if (argc != 3) { printf("Usage %s <n> <repeat>\n", argv[0]); return 1; } const int n = atoi(argv[1]); const int repeat = atoi(argv[2]); const size_t size = sizeof(float) * n; float *a, *o, *o0, *o1, *o2; a = (float*) malloc (size); o = (float*) malloc (size); // the range [-1.8, -0.00001) for (int i = 0; i < n; i++) { a[i] = -1.8f + i * (1.79999f / n); } o0 = (float*) malloc (size); o1 = (float*) malloc (size); o2 = (float*) malloc (size); float *d_a, *d_o0, *d_o1, *d_o2; hipMalloc((void**)&d_a, size); hipMalloc((void**)&d_o0, size); hipMalloc((void**)&d_o1, size); hipMalloc((void**)&d_o2, size); hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( k0), dim3(n/256), dim3(256), 0, 0, d_a, d_o0); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of k0: %f (s)\n", (time * 1e-9f) / repeat); start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( k1), dim3(n/256), dim3(256), 0, 0, d_a, d_o1); } hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of k1: %f (s)\n", (time * 1e-9f) / repeat); start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( k2), dim3(n/256), dim3(256), 0, 0, d_a, d_o2); } hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of k2: %f (s)\n", (time * 1e-9f) / repeat); hipMemcpy(o0, d_o0, size, hipMemcpyDeviceToHost); hipMemcpy(o1, d_o1, size, hipMemcpyDeviceToHost); hipMemcpy(o2, d_o2, size, hipMemcpyDeviceToHost); // https://en.wikipedia.org/wiki/Brillouin_and_Langevin_functions for (int i = 0; i < n; i++) { float x = a[i]; float x2 = x * x; float x4 = x2 * x2; float x6 = x4 * x2; o[i] = x * (1.f/3.f - 1.f/45.f * x2 + 2.f/945.f * x4 - 1.f/4725.f * x6); } float e[3] = {0,0,0}; for (int i = 0; i < n; i++) { e[0] += (o[i] - o0[i]) * (o[i] - o0[i]); e[1] += (o[i] - o1[i]) * (o[i] - o1[i]); e[2] += (o[i] - o2[i]) * (o[i] - o2[i]); } printf("\nError statistics for the kernels:\n"); for (int i = 0; i < 3; i++) { printf("%f ", sqrt(e[i])); } printf("\n"); free(a); free(o); free(o0); free(o1); free(o2); hipFree(d_a); hipFree(d_o0); hipFree(d_o1); hipFree(d_o2); return 0; }
3b8da1f3316881e880de28ec763867b99a827056.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <chrono> #include <hip/hip_runtime.h> __global__ void k0 (const float *__restrict__ a, float *__restrict__ o) { int t = blockIdx.x * blockDim.x + threadIdx.x; float x = a[t]; o[t] = coshf(x)/sinhf(x) - 1.f/x; } __global__ void k1 (const float *__restrict__ a, float *__restrict__ o) { int t = blockIdx.x * blockDim.x + threadIdx.x; float x = a[t]; o[t] = 1.f / tanhf(x) - 1.f/x; } /* Copyright (c) 2018-2021, Norbert Juffa All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ __global__ void k2 (const float *__restrict__ a, float *__restrict__ o) { int t = blockIdx.x * blockDim.x + threadIdx.x; float x = a[t]; float s, r; s = x * x; r = 7.70960469e-8f; r = fmaf (r, s, -1.65101926e-6f); r = fmaf (r, s, 2.03457112e-5f); r = fmaf (r, s, -2.10521728e-4f); r = fmaf (r, s, 2.11580913e-3f); r = fmaf (r, s, -2.22220998e-2f); r = fmaf (r, s, 8.33333284e-2f); r = fmaf (r, x, 0.25f * x); o[t] = r; } int main(int argc, char* argv[]) { if (argc != 3) { printf("Usage %s <n> <repeat>\n", argv[0]); return 1; } const int n = atoi(argv[1]); const int repeat = atoi(argv[2]); const size_t size = sizeof(float) * n; float *a, *o, *o0, *o1, *o2; a = (float*) malloc (size); o = (float*) malloc (size); // the range [-1.8, -0.00001) for (int i = 0; i < n; i++) { a[i] = -1.8f + i * (1.79999f / n); } o0 = (float*) malloc (size); o1 = (float*) malloc (size); o2 = (float*) malloc (size); float *d_a, *d_o0, *d_o1, *d_o2; hipMalloc((void**)&d_a, size); hipMalloc((void**)&d_o0, size); hipMalloc((void**)&d_o1, size); hipMalloc((void**)&d_o2, size); hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { k0<<<n/256, 256>>>(d_a, d_o0); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of k0: %f (s)\n", (time * 1e-9f) / repeat); start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { k1<<<n/256, 256>>>(d_a, d_o1); } hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of k1: %f (s)\n", (time * 1e-9f) / repeat); start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { k2<<<n/256, 256>>>(d_a, d_o2); } hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of k2: %f (s)\n", (time * 1e-9f) / repeat); hipMemcpy(o0, d_o0, size, hipMemcpyDeviceToHost); hipMemcpy(o1, d_o1, size, hipMemcpyDeviceToHost); hipMemcpy(o2, d_o2, size, hipMemcpyDeviceToHost); // https://en.wikipedia.org/wiki/Brillouin_and_Langevin_functions for (int i = 0; i < n; i++) { float x = a[i]; float x2 = x * x; float x4 = x2 * x2; float x6 = x4 * x2; o[i] = x * (1.f/3.f - 1.f/45.f * x2 + 2.f/945.f * x4 - 1.f/4725.f * x6); } float e[3] = {0,0,0}; for (int i = 0; i < n; i++) { e[0] += (o[i] - o0[i]) * (o[i] - o0[i]); e[1] += (o[i] - o1[i]) * (o[i] - o1[i]); e[2] += (o[i] - o2[i]) * (o[i] - o2[i]); } printf("\nError statistics for the kernels:\n"); for (int i = 0; i < 3; i++) { printf("%f ", sqrt(e[i])); } printf("\n"); free(a); free(o); free(o0); free(o1); free(o2); hipFree(d_a); hipFree(d_o0); hipFree(d_o1); hipFree(d_o2); return 0; }
7ba4809056416e73c8afd444b24cc5de0a2d9550.hip
// !!! This is a file automatically generated by hipify!!! #include "benchmark.h" #include <cuMat/Core> #include <iostream> #include <cstdlib> #include <thrust/inner_product.h> #include <thrust/execution_policy.h> void benchmark_Thrust( const std::vector<std::string>& parameterNames, const Json::Array& parameters, const std::vector<std::string>& returnNames, Json::Array& returnValues) { //number of runs for time measures const int runs = 10; const int subruns = 10; //test if the config is valid assert(parameterNames.size() == 1); assert(parameterNames[0] == "Vector-Size"); assert(returnNames.size() == 1); assert(returnNames[0] == "Time"); cuMat::SimpleRandom rand; int numConfigs = parameters.Size(); for (int config = 0; config < numConfigs; ++config) { //Input int vectorSize = parameters[config][0].AsInt32(); double totalTime = 0; std::cout << " VectorSize: " << vectorSize << std::flush; //Create matrices cuMat::VectorXf a(vectorSize); rand.fillUniform(a, 0, 1); cuMat::VectorXf b(vectorSize); rand.fillUniform(b, 0, 1); cuMat::Scalarf resultDevice; //Run it multiple times volatile float res; for (int run = 0; run < runs; ++run) { //Main logic hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < subruns; ++i) { res = thrust::inner_product(thrust::device, a.data(), a.data()+vectorSize, b.data(), float(0)); } hipDeviceSynchronize(); auto finish = std::chrono::steady_clock::now(); double elapsed = std::chrono::duration_cast< std::chrono::duration<double>>(finish - start).count() * 1000 / subruns; totalTime += elapsed; } //Result Json::Array result; double finalTime = totalTime / runs; result.PushBack(finalTime); returnValues.PushBack(result); std::cout << " -> " << finalTime << "ms" << std::endl; } }
7ba4809056416e73c8afd444b24cc5de0a2d9550.cu
#include "benchmark.h" #include <cuMat/Core> #include <iostream> #include <cstdlib> #include <thrust/inner_product.h> #include <thrust/execution_policy.h> void benchmark_Thrust( const std::vector<std::string>& parameterNames, const Json::Array& parameters, const std::vector<std::string>& returnNames, Json::Array& returnValues) { //number of runs for time measures const int runs = 10; const int subruns = 10; //test if the config is valid assert(parameterNames.size() == 1); assert(parameterNames[0] == "Vector-Size"); assert(returnNames.size() == 1); assert(returnNames[0] == "Time"); cuMat::SimpleRandom rand; int numConfigs = parameters.Size(); for (int config = 0; config < numConfigs; ++config) { //Input int vectorSize = parameters[config][0].AsInt32(); double totalTime = 0; std::cout << " VectorSize: " << vectorSize << std::flush; //Create matrices cuMat::VectorXf a(vectorSize); rand.fillUniform(a, 0, 1); cuMat::VectorXf b(vectorSize); rand.fillUniform(b, 0, 1); cuMat::Scalarf resultDevice; //Run it multiple times volatile float res; for (int run = 0; run < runs; ++run) { //Main logic cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < subruns; ++i) { res = thrust::inner_product(thrust::device, a.data(), a.data()+vectorSize, b.data(), float(0)); } cudaDeviceSynchronize(); auto finish = std::chrono::steady_clock::now(); double elapsed = std::chrono::duration_cast< std::chrono::duration<double>>(finish - start).count() * 1000 / subruns; totalTime += elapsed; } //Result Json::Array result; double finalTime = totalTime / runs; result.PushBack(finalTime); returnValues.PushBack(result); std::cout << " -> " << finalTime << "ms" << std::endl; } }
f62e992a1798e7416c83c36df6826e47617d0fe7.hip
// !!! This is a file automatically generated by hipify!!! #include "cupoch/io/class_io/pointcloud_io.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/utility/helper.h" using namespace cupoch; using namespace cupoch::io; void HostPointCloud::FromDevice(const geometry::PointCloud& pointcloud) { points_.resize(pointcloud.points_.size()); normals_.resize(pointcloud.normals_.size()); colors_.resize(pointcloud.colors_.size()); utility::CopyFromDeviceMultiStream(pointcloud.points_, points_); utility::CopyFromDeviceMultiStream(pointcloud.normals_, normals_); utility::CopyFromDeviceMultiStream(pointcloud.colors_, colors_); hipDeviceSynchronize(); } void HostPointCloud::ToDevice(geometry::PointCloud& pointcloud) const { pointcloud.points_.resize(points_.size()); pointcloud.normals_.resize(normals_.size()); pointcloud.colors_.resize(colors_.size()); utility::CopyToDeviceMultiStream(points_, pointcloud.points_); utility::CopyToDeviceMultiStream(normals_, pointcloud.normals_); utility::CopyToDeviceMultiStream(colors_, pointcloud.colors_); hipDeviceSynchronize(); } void HostPointCloud::Clear() { points_.clear(); normals_.clear(); colors_.clear(); }
f62e992a1798e7416c83c36df6826e47617d0fe7.cu
#include "cupoch/io/class_io/pointcloud_io.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/utility/helper.h" using namespace cupoch; using namespace cupoch::io; void HostPointCloud::FromDevice(const geometry::PointCloud& pointcloud) { points_.resize(pointcloud.points_.size()); normals_.resize(pointcloud.normals_.size()); colors_.resize(pointcloud.colors_.size()); utility::CopyFromDeviceMultiStream(pointcloud.points_, points_); utility::CopyFromDeviceMultiStream(pointcloud.normals_, normals_); utility::CopyFromDeviceMultiStream(pointcloud.colors_, colors_); cudaDeviceSynchronize(); } void HostPointCloud::ToDevice(geometry::PointCloud& pointcloud) const { pointcloud.points_.resize(points_.size()); pointcloud.normals_.resize(normals_.size()); pointcloud.colors_.resize(colors_.size()); utility::CopyToDeviceMultiStream(points_, pointcloud.points_); utility::CopyToDeviceMultiStream(normals_, pointcloud.normals_); utility::CopyToDeviceMultiStream(colors_, pointcloud.colors_); cudaDeviceSynchronize(); } void HostPointCloud::Clear() { points_.clear(); normals_.clear(); colors_.clear(); }
ab160a55b509045aead17bb1dc15706b28bb1c09.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <omp.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_cooperative_groups.h> #ifdef USE_MPI #include <mpi.h> #endif #include "../utils/common.h" static size_t N = 1000; void init(int *p, size_t size) { for (size_t i = 0; i < size; ++i) { p[i] = i; } } void output(int *p, size_t size) { for (size_t i = 0; i < size; ++i) { printf("index %zu: %d\n", i, p[i]); } } __global__ void vecAdd(int *l, int *r, int *p, size_t N) { size_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < N) { p[idx] = l[idx] + r[idx]; } cooperative_groups::this_grid().sync(); } int main(int argc, char *argv[]) { #ifdef USE_MPI int numtasks, rank; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &numtasks); MPI_Comm_rank(MPI_COMM_WORLD, &rank); printf("MPI task %d/%d\n", rank, numtasks); #endif // Init device int device_id = 0; if (argc > 1) { device_id = atoi(argv[1]); } cuda_init_device(device_id); #pragma omp parallel { int l[N], r[N], p[N]; int *dl, *dr, *dp; init(l, N); init(r, N); RUNTIME_API_CALL(hipMalloc(&dl, N * sizeof(int))); RUNTIME_API_CALL(hipMalloc(&dr, N * sizeof(int))); RUNTIME_API_CALL(hipMalloc(&dp, N * sizeof(int))); RUNTIME_API_CALL(hipMemcpy(dl, l, N * sizeof(int), hipMemcpyHostToDevice)); RUNTIME_API_CALL(hipMemcpy(dr, r, N * sizeof(int), hipMemcpyHostToDevice)); dim3 threads(256, 1, 1); dim3 blocks((N - 1) / 256 + 1, 1, 1); void *args[] = {&dl, &dr, &dp, &N}; GPU_TEST_FOR((hipLaunchCooperativeKernel((void*)vecAdd, blocks, threads, args))); RUNTIME_API_CALL(hipMemcpy(p, dp, N * sizeof(int), hipMemcpyDeviceToHost)); RUNTIME_API_CALL(hipFree(dl)); RUNTIME_API_CALL(hipFree(dr)); RUNTIME_API_CALL(hipFree(dp)); #pragma omp critical { printf("Thread %d\n", omp_get_thread_num()); output(p, N); } } hipDeviceSynchronize(); #ifdef USE_MPI MPI_Finalize(); #endif return 0; }
ab160a55b509045aead17bb1dc15706b28bb1c09.cu
#include <cstdio> #include <omp.h> #include <cuda.h> #include <cuda_runtime.h> #include <cooperative_groups.h> #ifdef USE_MPI #include <mpi.h> #endif #include "../utils/common.h" static size_t N = 1000; void init(int *p, size_t size) { for (size_t i = 0; i < size; ++i) { p[i] = i; } } void output(int *p, size_t size) { for (size_t i = 0; i < size; ++i) { printf("index %zu: %d\n", i, p[i]); } } __global__ void vecAdd(int *l, int *r, int *p, size_t N) { size_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < N) { p[idx] = l[idx] + r[idx]; } cooperative_groups::this_grid().sync(); } int main(int argc, char *argv[]) { #ifdef USE_MPI int numtasks, rank; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &numtasks); MPI_Comm_rank(MPI_COMM_WORLD, &rank); printf("MPI task %d/%d\n", rank, numtasks); #endif // Init device int device_id = 0; if (argc > 1) { device_id = atoi(argv[1]); } cuda_init_device(device_id); #pragma omp parallel { int l[N], r[N], p[N]; int *dl, *dr, *dp; init(l, N); init(r, N); RUNTIME_API_CALL(cudaMalloc(&dl, N * sizeof(int))); RUNTIME_API_CALL(cudaMalloc(&dr, N * sizeof(int))); RUNTIME_API_CALL(cudaMalloc(&dp, N * sizeof(int))); RUNTIME_API_CALL(cudaMemcpy(dl, l, N * sizeof(int), cudaMemcpyHostToDevice)); RUNTIME_API_CALL(cudaMemcpy(dr, r, N * sizeof(int), cudaMemcpyHostToDevice)); dim3 threads(256, 1, 1); dim3 blocks((N - 1) / 256 + 1, 1, 1); void *args[] = {&dl, &dr, &dp, &N}; GPU_TEST_FOR((cudaLaunchCooperativeKernel((void*)vecAdd, blocks, threads, args))); RUNTIME_API_CALL(cudaMemcpy(p, dp, N * sizeof(int), cudaMemcpyDeviceToHost)); RUNTIME_API_CALL(cudaFree(dl)); RUNTIME_API_CALL(cudaFree(dr)); RUNTIME_API_CALL(cudaFree(dp)); #pragma omp critical { printf("Thread %d\n", omp_get_thread_num()); output(p, N); } } cudaDeviceSynchronize(); #ifdef USE_MPI MPI_Finalize(); #endif return 0; }
9c2ab1f949e298994e6ecc002f3768d63a290cd1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "timerc.h" #include <math.h> __host__ void merge(int *a, int left_start, int right_end) { // a is source array int left1, right1, left2, right2, k, i, j; int size = 1; int len_arr = right_end - left_start + 1; int* b = (int *) malloc(sizeof(int) * len_arr); // b is temp array while (size < len_arr) { k = left_start; left1 = left_start; while (left1 + size < len_arr) { right1 = left1 + size - 1; left2 = right1 + 1; right2 = left2 + size - 1; if (right2 >= len_arr) { right2 = len_arr - 1; } // start merging the list i = left1; j = left2; while (i <= right1 && j <= right2) { if (a[i] < a[j]) { b[k] = a[i]; i++; k++; //printf("a[i] = %d\n", a[i]); } else { b[k] = a[j]; j++; k++; //printf("a[i] = %d\n", a[i]); } } // if either left or right still has remaining while (i <= right1) { b[k] = a[i]; i++; k++; } while (j <= right2) { b[k] = a[j]; j++; k++; } //merge and sort other pairs left1 = right2 + 1; } // if there is any pair left that is unmerged i = left1; while (k < len_arr) { b[k] = a[i]; i++; k++; } for (i = 0; i < len_arr; i++) { a[i] = b[i]; } size = size * 2; } free(b); } __device__ __host__ void merge_gpu(int *a, int *b, int left_start, int right_end, int lev) { // a is source array, b is temp array int left1, right1, left2, right2, k, i, j; //int len_arr = right_end - left_start + 1; int size = pow(2, lev); k = left_start; left1 = left_start; right1 = left1 + size - 1; left2 = right1 + 1; right2 = left2 + size - 1; // start merging the list i = left1; j = left2; while (i <= right1 && j <= right2) { if (a[i] < a[j]) { b[k] = a[i]; i++; k++; } else { b[k] = a[j]; j++; k++; } } // if either left or right still has remaining while (i <= right1) { b[k] = a[i]; i++; k++; } while (j <= right2) { b[k] = a[j]; j++; k++; } } __global__ void gpu_mergesort_serial_merge(int *a, int *b, int level) { int thread_i = threadIdx.x + blockIdx.x * blockDim.x; int left_start = pow(2,level) * 2 * thread_i; int right_end = pow(2,level) * ( 2 * thread_i + 1 ) + pow(2,level) - 1; merge_gpu(a, b, left_start, right_end, level); } void test_merge(){ int a[8] ={4,2,4,7,9,1,3,8}; int* b = (int *) malloc(sizeof(int) * 8); merge_gpu(a, b, 0, 1, 0); merge_gpu(a, b, 2, 3, 0); merge_gpu(a, b, 4, 5, 0); merge_gpu(a, b, 6, 7, 0); printf("result should be {2,4,4,7,1,9,3,8}\n"); printf("test result: \n"); for (int i = 0; i < 8; i++){ printf("%d ", b[i]); } printf("\n"); } void test_pow_2(){ printf("%lf\n", pow(2,3)); } void test_log_2(){ printf("%lf\n", log2(2)); } int main() { /* test functions test_log_2(); test_pow_2(); test_merge(); */ int n = pow(2,13); int num_threads_per_block = 1; int* h_arr = (int *) malloc(sizeof(int)*n); //generate an array with numbers for (int i = 0; i < n; i++) { h_arr[i] = (n-1) - i; } // call gpu_mergesort and generate gpu_result int * d_arr; int * d_temp; int * gpu_result = (int *) malloc( n * sizeof(int) ); hipMalloc( (void**) &d_arr, n * sizeof(int) ); hipMemcpy( d_arr, h_arr, n * sizeof(int), hipMemcpyHostToDevice ); hipMalloc( (void**) &d_temp, n * sizeof(int) ); float gpu_time; gstart(); int flag; for (int lev = 0; lev < log2(n); lev++) { // lev means level /* if ( ( n / num_threads_per_block / pow(2, lev+1) ) == 1) { num_threads_per_block = num_threads_per_block / 2; } */ hipLaunchKernelGGL(( gpu_mergesort_serial_merge), dim3(n / num_threads_per_block / pow(2, lev+1)) , dim3(num_threads_per_block) , 0, 0, d_arr, d_temp, lev); flag = 0; lev++; hipLaunchKernelGGL(( gpu_mergesort_serial_merge), dim3(n / num_threads_per_block / pow(2, lev+1)) , dim3(num_threads_per_block) , 0, 0, d_temp, d_arr, lev); flag = 1; } gend(&gpu_time); printf("GPU time = %f\n",gpu_time); if (flag == 0) { // finish on d_temp hipMemcpy(gpu_result, d_temp, ( n * sizeof(int) ), hipMemcpyDeviceToHost); } else { // finish on d_arr hipMemcpy(gpu_result, d_arr, ( n * sizeof(int) ), hipMemcpyDeviceToHost); } // ---------------------------------------------------------------------------------------- // merge sort on CPU float cpu_time; cstart(); merge(h_arr, 0, n-1); cend(&cpu_time); printf("CPU time = %f\n",cpu_time); // debug /* printf("gpu result: \n"); for (int i = 0; i < n; i++) { printf("%d ", gpu_result[i]); } printf("\n"); // debug printf("cpu result: \n"); for (int i = 0; i < n; i++) { printf("%d ", h_arr[i]); } printf("\n"); */ // compare cpu_result with gpu_result int error = 0; for (int i = 0; i < n; i++) { if (gpu_result[i] != h_arr[i]) { printf("ERROR\n"); error = 1; break; } } if (error == 0) printf("gpu operation has the same result as the cpu operation\n"); else printf("GPU and CPU results are different"); hipFree(d_temp); hipFree(d_arr); free(h_arr); }
9c2ab1f949e298994e6ecc002f3768d63a290cd1.cu
#include "timerc.h" #include <math.h> __host__ void merge(int *a, int left_start, int right_end) { // a is source array int left1, right1, left2, right2, k, i, j; int size = 1; int len_arr = right_end - left_start + 1; int* b = (int *) malloc(sizeof(int) * len_arr); // b is temp array while (size < len_arr) { k = left_start; left1 = left_start; while (left1 + size < len_arr) { right1 = left1 + size - 1; left2 = right1 + 1; right2 = left2 + size - 1; if (right2 >= len_arr) { right2 = len_arr - 1; } // start merging the list i = left1; j = left2; while (i <= right1 && j <= right2) { if (a[i] < a[j]) { b[k] = a[i]; i++; k++; //printf("a[i] = %d\n", a[i]); } else { b[k] = a[j]; j++; k++; //printf("a[i] = %d\n", a[i]); } } // if either left or right still has remaining while (i <= right1) { b[k] = a[i]; i++; k++; } while (j <= right2) { b[k] = a[j]; j++; k++; } //merge and sort other pairs left1 = right2 + 1; } // if there is any pair left that is unmerged i = left1; while (k < len_arr) { b[k] = a[i]; i++; k++; } for (i = 0; i < len_arr; i++) { a[i] = b[i]; } size = size * 2; } free(b); } __device__ __host__ void merge_gpu(int *a, int *b, int left_start, int right_end, int lev) { // a is source array, b is temp array int left1, right1, left2, right2, k, i, j; //int len_arr = right_end - left_start + 1; int size = pow(2, lev); k = left_start; left1 = left_start; right1 = left1 + size - 1; left2 = right1 + 1; right2 = left2 + size - 1; // start merging the list i = left1; j = left2; while (i <= right1 && j <= right2) { if (a[i] < a[j]) { b[k] = a[i]; i++; k++; } else { b[k] = a[j]; j++; k++; } } // if either left or right still has remaining while (i <= right1) { b[k] = a[i]; i++; k++; } while (j <= right2) { b[k] = a[j]; j++; k++; } } __global__ void gpu_mergesort_serial_merge(int *a, int *b, int level) { int thread_i = threadIdx.x + blockIdx.x * blockDim.x; int left_start = pow(2,level) * 2 * thread_i; int right_end = pow(2,level) * ( 2 * thread_i + 1 ) + pow(2,level) - 1; merge_gpu(a, b, left_start, right_end, level); } void test_merge(){ int a[8] ={4,2,4,7,9,1,3,8}; int* b = (int *) malloc(sizeof(int) * 8); merge_gpu(a, b, 0, 1, 0); merge_gpu(a, b, 2, 3, 0); merge_gpu(a, b, 4, 5, 0); merge_gpu(a, b, 6, 7, 0); printf("result should be {2,4,4,7,1,9,3,8}\n"); printf("test result: \n"); for (int i = 0; i < 8; i++){ printf("%d ", b[i]); } printf("\n"); } void test_pow_2(){ printf("%lf\n", pow(2,3)); } void test_log_2(){ printf("%lf\n", log2(2)); } int main() { /* test functions test_log_2(); test_pow_2(); test_merge(); */ int n = pow(2,13); int num_threads_per_block = 1; int* h_arr = (int *) malloc(sizeof(int)*n); //generate an array with numbers for (int i = 0; i < n; i++) { h_arr[i] = (n-1) - i; } // call gpu_mergesort and generate gpu_result int * d_arr; int * d_temp; int * gpu_result = (int *) malloc( n * sizeof(int) ); cudaMalloc( (void**) &d_arr, n * sizeof(int) ); cudaMemcpy( d_arr, h_arr, n * sizeof(int), cudaMemcpyHostToDevice ); cudaMalloc( (void**) &d_temp, n * sizeof(int) ); float gpu_time; gstart(); int flag; for (int lev = 0; lev < log2(n); lev++) { // lev means level /* if ( ( n / num_threads_per_block / pow(2, lev+1) ) == 1) { num_threads_per_block = num_threads_per_block / 2; } */ gpu_mergesort_serial_merge<<< n / num_threads_per_block / pow(2, lev+1) , num_threads_per_block >>>(d_arr, d_temp, lev); flag = 0; lev++; gpu_mergesort_serial_merge<<< n / num_threads_per_block / pow(2, lev+1) , num_threads_per_block >>>(d_temp, d_arr, lev); flag = 1; } gend(&gpu_time); printf("GPU time = %f\n",gpu_time); if (flag == 0) { // finish on d_temp cudaMemcpy(gpu_result, d_temp, ( n * sizeof(int) ), cudaMemcpyDeviceToHost); } else { // finish on d_arr cudaMemcpy(gpu_result, d_arr, ( n * sizeof(int) ), cudaMemcpyDeviceToHost); } // ---------------------------------------------------------------------------------------- // merge sort on CPU float cpu_time; cstart(); merge(h_arr, 0, n-1); cend(&cpu_time); printf("CPU time = %f\n",cpu_time); // debug /* printf("gpu result: \n"); for (int i = 0; i < n; i++) { printf("%d ", gpu_result[i]); } printf("\n"); // debug printf("cpu result: \n"); for (int i = 0; i < n; i++) { printf("%d ", h_arr[i]); } printf("\n"); */ // compare cpu_result with gpu_result int error = 0; for (int i = 0; i < n; i++) { if (gpu_result[i] != h_arr[i]) { printf("ERROR\n"); error = 1; break; } } if (error == 0) printf("gpu operation has the same result as the cpu operation\n"); else printf("GPU and CPU results are different"); cudaFree(d_temp); cudaFree(d_arr); free(h_arr); }
b3e2ed052cab89945fa5a25c1f7e73623a85d316.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright [2016] <Contributors> * \file Correation.cu * \brief correlation1D operator * \author Xu Dong */ #include "./correlation1D-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> #define ROUND_OFF 50000 #define WARPS_PER_BLOCK 1 #define THREADS_PER_WARP 32 #define correlation1D_CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \ } while (0) #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) namespace mshadow { namespace cuda { // == correlation1D Kernel template <typename Dtype> __global__ void Correlate1DData(const int nthreads, int num, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const Dtype *bottom0, const Dtype *bottom1, Dtype *top) { extern __shared__ char patch_data_char[]; Dtype *patch_data = (Dtype *)patch_data_char; // First (upper left) position of kernel upper-left corner in current center position of neighborhood in image 1 int x1 = blockIdx.x*stride1 + max_displacement; int y1 = blockIdx.y*stride1; int item = blockIdx.z; int ch_off = threadIdx.x; // Load 3D patch into shared shared memory for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch; int idxPatchData = ji_off + ch; patch_data[idxPatchData] = bottom0[idx1]; } } } __syncthreads(); __shared__ Dtype sum[WARPS_PER_BLOCK*THREADS_PER_WARP]; // Compute for(int top_channel = 0; top_channel < topchannels; top_channel++) { sum[ch_off] = 0; int s2o = (top_channel % neighborhood_grid_width + x_shift) * stride2; for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int x2 = x1 + s2o; int idxPatchData = ji_off + ch; int idx2 = ((item * bottomheight + y1+j) * bottomwidth + x2+i) * bottomchannels + ch; sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2]; } } } __syncthreads(); if(ch_off == 0) { Dtype total_sum = 0; for(int idx = 0; idx < WARPS_PER_BLOCK*THREADS_PER_WARP; idx++) { total_sum += sum[idx]; } const int sumelems = kernel_size*kernel_size*bottomchannels; const int index = ((top_channel*topheight + blockIdx.y)*topwidth)+blockIdx.x; top[index + item*topcount] = total_sum / (float)sumelems; } } // Aggregate } // == correlation1D Backward Pass Kernel (For data1) template <typename Dtype> __global__ void Correlate1DDataBackward0(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, Dtype *bottom0diff, const Dtype *bottom1, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight; //h-pos //Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 Dtype sum = 0; if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); { for(int o = x_shift; o < x_shift + neighborhood_grid_width; o++) { // Get bottom1 data: int s2o = stride2 * o; int idxbot1 = ((item * pbottomheight + m) * pbottomwidth + (l+s2o)) * bottomchannels + n; Dtype bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m,n] // Index offset for topdiff in following loops: int op = (o-x_shift); // index [o,p] int idxopoffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot1tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot0index = ((n * bottomheight) + m) * bottomwidth + (l-pad_size); bottom0diff[bot0index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Backward Pass Kernel (For Blob 1) template <typename Dtype> __global__ void Correlate1DDataBackward1(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const Dtype *bottom0, Dtype *bottom1diff, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { //int l = index % bottomwidth + pad_size; //w-pos //int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos //int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight; //h-pos // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; Dtype sum = 0; { for(int o = x_shift; o < x_shift + neighborhood_grid_width; o++) { int s2o = stride2 * o; //Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - 0 - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - 0 - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - 0) / stride1 if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); // Get bottom0 data: int idxbot0 = ((item * pbottomheight + m) * pbottomwidth + (l-s2o)) * bottomchannels + n; Dtype bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m,n] // Index offset for topdiff in following loops: int op = (o-x_shift); // index [o,p] int idxOpOffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot0tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot1index = ((n * bottomheight) + m) * bottomwidth + (l-pad_size); bottom1diff[bot1index + item*bottomcount] = sum / (float)sumelems; } } // == Forward // == Dimension rearrangement Kernel template <typename Dtype> __global__ void blob_rearrange_kernel2_corr1D(const Dtype* in, Dtype* out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight) { // change shape from [batchsize,channel,y,x] to [batchsize,y,x,channel] int xy = blockIdx.x*blockDim.x + threadIdx.x; if(xy>=widthheight) return; int ch = blockIdx.y; int n = blockIdx.z; float value=in[(n*channels+ch)*widthheight+xy]; __syncthreads(); int xpad = (xy % width + padding); int ypad = (xy / width + 0); int xypad = ypad * (width+2*padding) + xpad; out[(n*pwidthheight+xypad)*channels + ch] = value; } template <typename Dtype> void Forward_gpu( const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data1, const Tensor<gpu, 4, Dtype> &data2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, int single_side, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, hipStream_t stream, hipStream_t stream_tmp1, hipStream_t stream_tmp2) { const Dtype *bottom_data1 = data1.dptr_; const Dtype *bottom_data2 = data2.dptr_; Dtype *rbot1 = tmp1.dptr_; Dtype *rbot2 = tmp2.dptr_; Dtype *top = out.dptr_; const int bnum = data1.size(0); const int bchannels = data1.size(1); const int bheight = data1.size(2); const int bwidth = data1.size(3); const int bwidthheight = bwidth * bheight; const int topcount = top_width_ * top_height_ * top_channels_; dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK); int threads_per_block = 16; dim3 totalBlocksRearr((bwidthheight - 1) / threads_per_block + 1, bchannels, bnum); const int pwidthheight = (bwidth + 2 * pad_size_) * (bheight); hipLaunchKernelGGL(( blob_rearrange_kernel2_corr1D<Dtype>), dim3(totalBlocksRearr), dim3(threads_per_block), 0, stream_tmp1, bottom_data1, rbot1, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight); hipLaunchKernelGGL(( blob_rearrange_kernel2_corr1D<Dtype>), dim3(totalBlocksRearr), dim3(threads_per_block), 0, stream_tmp2, bottom_data2, rbot2, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight); const int num = bnum; const int channels = bchannels; const int height = bheight; const int width = bwidth + 2 * pad_size_; const int shared_memory_per_block = (kernel_size_ * kernel_size_) * bchannels; int x_shift = - neighborhood_grid_radius_; if(single_side == -1) { // to the left x_shift = - neighborhood_grid_radius_; } else if(single_side == 1) { // to the right x_shift = 0; } // correlation1DLayer int topThreadCount = topcount; dim3 totalBlocksCorr(top_width_, top_height_, num); hipLaunchKernelGGL(( Correlate1DData<Dtype>), dim3(totalBlocksCorr), dim3(threadsPerBlock), shared_memory_per_block * sizeof(Dtype), stream, topThreadCount, num, top_width_, top_height_, top_channels_, topcount, max_displacement_, x_shift, neighborhood_grid_width_, kernel_radius_, kernel_size_, stride1_, stride2_, width, height, channels, rbot1, rbot2, top); correlation1D_CUDA_CHECK(hipPeekAtLastError()); } template <typename Dtype> void Backward_gpu( const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 4, Dtype> &in_grad1, const Tensor<gpu, 4, Dtype> &in_grad2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, int single_side, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, hipStream_t stream0, hipStream_t stream1, int num, int channels, int height, int width) { // Get top diff, compute bottom diff const Dtype* top_diff = out_grad.dptr_; Dtype* bottom0_diff = in_grad1.dptr_; Dtype* bottom1_diff = in_grad2.dptr_; const Dtype* rbot1 = tmp1.dptr_; const Dtype* rbot2 = tmp2.dptr_; const int paddedheight = height ; const int paddedwidth = width + 2 * pad_size_; const int bottomcount = channels * height * width; int botThreadCount = bottomcount; const int gridSize = (botThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; // correlation1DLayerBackward int x_shift = - neighborhood_grid_radius_; if (single_side == -1) { // to the left x_shift = -neighborhood_grid_radius_; } else if(single_side == 1) { // to the right x_shift = 0; } // == Run kernel Backward 0 dim3 totalBlocksBackward0(width, height, channels * num); // First dim is fastest const int buffer_size_backw0 = \ (static_cast<int>(ceil(static_cast<float>(2 * kernel_radius_)\ / static_cast<float>(stride1_))) + 1) * top_channels_; // == Run kernel Backward 0 for (int n = 0; n < num; n++) { hipLaunchKernelGGL(( Correlate1DDataBackward0<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream0, botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, x_shift, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, bottom0_diff, rbot2, top_diff); correlation1D_CUDA_CHECK(hipPeekAtLastError()); } // == Run kernel Backward 1 for (int n = 0; n < num; n++) { hipLaunchKernelGGL(( Correlate1DDataBackward1<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream1, botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, x_shift, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, rbot1, bottom1_diff, top_diff); correlation1D_CUDA_CHECK(hipPeekAtLastError()); } } } // namespace cuda template<typename Dtype> inline void Correlation1DForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data1, const Tensor<gpu, 4, Dtype> &data2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, int single_side, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_ ) { hipStream_t stream = Stream<gpu>::GetStream(out.stream_); hipStream_t stream_tmp1 = Stream<gpu>::GetStream(tmp1.stream_); hipStream_t stream_tmp2 = Stream<gpu>::GetStream(tmp2.stream_); cuda::Forward_gpu(out, data1, data2, tmp1, tmp2, top_channels_, top_height_, top_width_, pad_size_, single_side, max_displacement_, kernel_size_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, stream, stream_tmp1, stream_tmp2); } template<typename Dtype> inline void Correlation1DBackward(const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 4, Dtype> &in_grad1, const Tensor<gpu, 4, Dtype> &in_grad2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, int single_side, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, int num, int channels, int height, int width ) { hipStream_t stream0 = Stream<gpu>::GetStream(in_grad1.stream_); hipStream_t stream1 = Stream<gpu>::GetStream(in_grad2.stream_); cuda::Backward_gpu(out_grad, in_grad1, in_grad2, tmp1, tmp2, top_channels_, top_height_, top_width_, pad_size_, single_side, max_displacement_, kernel_size_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, stream0, stream1, num, channels, height, width); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(Correlation1DParam param) { return new Correlation1DOp<gpu>(param); } } // namespace op } // namespace mxnet
b3e2ed052cab89945fa5a25c1f7e73623a85d316.cu
/*! * Copyright [2016] <Contributors> * \file Correation.cu * \brief correlation1D operator * \author Xu Dong */ #include "./correlation1D-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> #define ROUND_OFF 50000 #define WARPS_PER_BLOCK 1 #define THREADS_PER_WARP 32 #define correlation1D_CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ } while (0) #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) namespace mshadow { namespace cuda { // == correlation1D Kernel template <typename Dtype> __global__ void Correlate1DData(const int nthreads, int num, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const Dtype *bottom0, const Dtype *bottom1, Dtype *top) { extern __shared__ char patch_data_char[]; Dtype *patch_data = (Dtype *)patch_data_char; // First (upper left) position of kernel upper-left corner in current center position of neighborhood in image 1 int x1 = blockIdx.x*stride1 + max_displacement; int y1 = blockIdx.y*stride1; int item = blockIdx.z; int ch_off = threadIdx.x; // Load 3D patch into shared shared memory for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch; int idxPatchData = ji_off + ch; patch_data[idxPatchData] = bottom0[idx1]; } } } __syncthreads(); __shared__ Dtype sum[WARPS_PER_BLOCK*THREADS_PER_WARP]; // Compute for(int top_channel = 0; top_channel < topchannels; top_channel++) { sum[ch_off] = 0; int s2o = (top_channel % neighborhood_grid_width + x_shift) * stride2; for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int x2 = x1 + s2o; int idxPatchData = ji_off + ch; int idx2 = ((item * bottomheight + y1+j) * bottomwidth + x2+i) * bottomchannels + ch; sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2]; } } } __syncthreads(); if(ch_off == 0) { Dtype total_sum = 0; for(int idx = 0; idx < WARPS_PER_BLOCK*THREADS_PER_WARP; idx++) { total_sum += sum[idx]; } const int sumelems = kernel_size*kernel_size*bottomchannels; const int index = ((top_channel*topheight + blockIdx.y)*topwidth)+blockIdx.x; top[index + item*topcount] = total_sum / (float)sumelems; } } // Aggregate } // == correlation1D Backward Pass Kernel (For data1) template <typename Dtype> __global__ void Correlate1DDataBackward0(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, Dtype *bottom0diff, const Dtype *bottom1, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight; //h-pos //Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 Dtype sum = 0; if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); { for(int o = x_shift; o < x_shift + neighborhood_grid_width; o++) { // Get bottom1 data: int s2o = stride2 * o; int idxbot1 = ((item * pbottomheight + m) * pbottomwidth + (l+s2o)) * bottomchannels + n; Dtype bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m,n] // Index offset for topdiff in following loops: int op = (o-x_shift); // index [o,p] int idxopoffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot1tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot0index = ((n * bottomheight) + m) * bottomwidth + (l-pad_size); bottom0diff[bot0index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Backward Pass Kernel (For Blob 1) template <typename Dtype> __global__ void Correlate1DDataBackward1(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const Dtype *bottom0, Dtype *bottom1diff, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { //int l = index % bottomwidth + pad_size; //w-pos //int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos //int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight; //h-pos // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; Dtype sum = 0; { for(int o = x_shift; o < x_shift + neighborhood_grid_width; o++) { int s2o = stride2 * o; //Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - 0 - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - 0 - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - 0) / stride1 if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); // Get bottom0 data: int idxbot0 = ((item * pbottomheight + m) * pbottomwidth + (l-s2o)) * bottomchannels + n; Dtype bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m,n] // Index offset for topdiff in following loops: int op = (o-x_shift); // index [o,p] int idxOpOffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot0tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot1index = ((n * bottomheight) + m) * bottomwidth + (l-pad_size); bottom1diff[bot1index + item*bottomcount] = sum / (float)sumelems; } } // == Forward // == Dimension rearrangement Kernel template <typename Dtype> __global__ void blob_rearrange_kernel2_corr1D(const Dtype* in, Dtype* out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight) { // change shape from [batchsize,channel,y,x] to [batchsize,y,x,channel] int xy = blockIdx.x*blockDim.x + threadIdx.x; if(xy>=widthheight) return; int ch = blockIdx.y; int n = blockIdx.z; float value=in[(n*channels+ch)*widthheight+xy]; __syncthreads(); int xpad = (xy % width + padding); int ypad = (xy / width + 0); int xypad = ypad * (width+2*padding) + xpad; out[(n*pwidthheight+xypad)*channels + ch] = value; } template <typename Dtype> void Forward_gpu( const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data1, const Tensor<gpu, 4, Dtype> &data2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, int single_side, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, cudaStream_t stream, cudaStream_t stream_tmp1, cudaStream_t stream_tmp2) { const Dtype *bottom_data1 = data1.dptr_; const Dtype *bottom_data2 = data2.dptr_; Dtype *rbot1 = tmp1.dptr_; Dtype *rbot2 = tmp2.dptr_; Dtype *top = out.dptr_; const int bnum = data1.size(0); const int bchannels = data1.size(1); const int bheight = data1.size(2); const int bwidth = data1.size(3); const int bwidthheight = bwidth * bheight; const int topcount = top_width_ * top_height_ * top_channels_; dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK); int threads_per_block = 16; dim3 totalBlocksRearr((bwidthheight - 1) / threads_per_block + 1, bchannels, bnum); const int pwidthheight = (bwidth + 2 * pad_size_) * (bheight); blob_rearrange_kernel2_corr1D<Dtype><<<totalBlocksRearr, threads_per_block, 0, stream_tmp1>>> (bottom_data1, rbot1, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight); blob_rearrange_kernel2_corr1D<Dtype><<<totalBlocksRearr, threads_per_block, 0, stream_tmp2>>> (bottom_data2, rbot2, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight); const int num = bnum; const int channels = bchannels; const int height = bheight; const int width = bwidth + 2 * pad_size_; const int shared_memory_per_block = (kernel_size_ * kernel_size_) * bchannels; int x_shift = - neighborhood_grid_radius_; if(single_side == -1) { // to the left x_shift = - neighborhood_grid_radius_; } else if(single_side == 1) { // to the right x_shift = 0; } // correlation1DLayer int topThreadCount = topcount; dim3 totalBlocksCorr(top_width_, top_height_, num); Correlate1DData<Dtype><<<totalBlocksCorr, threadsPerBlock, shared_memory_per_block * sizeof(Dtype), stream>>>( topThreadCount, num, top_width_, top_height_, top_channels_, topcount, max_displacement_, x_shift, neighborhood_grid_width_, kernel_radius_, kernel_size_, stride1_, stride2_, width, height, channels, rbot1, rbot2, top); correlation1D_CUDA_CHECK(cudaPeekAtLastError()); } template <typename Dtype> void Backward_gpu( const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 4, Dtype> &in_grad1, const Tensor<gpu, 4, Dtype> &in_grad2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, int single_side, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, cudaStream_t stream0, cudaStream_t stream1, int num, int channels, int height, int width) { // Get top diff, compute bottom diff const Dtype* top_diff = out_grad.dptr_; Dtype* bottom0_diff = in_grad1.dptr_; Dtype* bottom1_diff = in_grad2.dptr_; const Dtype* rbot1 = tmp1.dptr_; const Dtype* rbot2 = tmp2.dptr_; const int paddedheight = height ; const int paddedwidth = width + 2 * pad_size_; const int bottomcount = channels * height * width; int botThreadCount = bottomcount; const int gridSize = (botThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; // correlation1DLayerBackward int x_shift = - neighborhood_grid_radius_; if (single_side == -1) { // to the left x_shift = -neighborhood_grid_radius_; } else if(single_side == 1) { // to the right x_shift = 0; } // == Run kernel Backward 0 dim3 totalBlocksBackward0(width, height, channels * num); // First dim is fastest const int buffer_size_backw0 = \ (static_cast<int>(ceil(static_cast<float>(2 * kernel_radius_)\ / static_cast<float>(stride1_))) + 1) * top_channels_; // == Run kernel Backward 0 for (int n = 0; n < num; n++) { Correlate1DDataBackward0<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream0>>>( botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, x_shift, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, bottom0_diff, rbot2, top_diff); correlation1D_CUDA_CHECK(cudaPeekAtLastError()); } // == Run kernel Backward 1 for (int n = 0; n < num; n++) { Correlate1DDataBackward1<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream1>>>( botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, x_shift, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, rbot1, bottom1_diff, top_diff); correlation1D_CUDA_CHECK(cudaPeekAtLastError()); } } } // namespace cuda template<typename Dtype> inline void Correlation1DForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data1, const Tensor<gpu, 4, Dtype> &data2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, int single_side, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_ ) { cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); cudaStream_t stream_tmp1 = Stream<gpu>::GetStream(tmp1.stream_); cudaStream_t stream_tmp2 = Stream<gpu>::GetStream(tmp2.stream_); cuda::Forward_gpu(out, data1, data2, tmp1, tmp2, top_channels_, top_height_, top_width_, pad_size_, single_side, max_displacement_, kernel_size_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, stream, stream_tmp1, stream_tmp2); } template<typename Dtype> inline void Correlation1DBackward(const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 4, Dtype> &in_grad1, const Tensor<gpu, 4, Dtype> &in_grad2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, int single_side, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, int num, int channels, int height, int width ) { cudaStream_t stream0 = Stream<gpu>::GetStream(in_grad1.stream_); cudaStream_t stream1 = Stream<gpu>::GetStream(in_grad2.stream_); cuda::Backward_gpu(out_grad, in_grad1, in_grad2, tmp1, tmp2, top_channels_, top_height_, top_width_, pad_size_, single_side, max_displacement_, kernel_size_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, stream0, stream1, num, channels, height, width); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(Correlation1DParam param) { return new Correlation1DOp<gpu>(param); } } // namespace op } // namespace mxnet
9331a9b0e6791e1dbee9ef1748c027b4a736a5a8.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2018 XGBoost contributors */ #include "common.h" namespace xgboost { namespace common { int AllVisibleGPUs() { int n_visgpus = 0; try { // When compiled with CUDA but running on CPU only device, // hipGetDeviceCount will fail. dh::safe_cuda(hipGetDeviceCount(&n_visgpus)); } catch(const dmlc::Error &except) { return 0; } return n_visgpus; } } // namespace common } // namespace xgboost
9331a9b0e6791e1dbee9ef1748c027b4a736a5a8.cu
/*! * Copyright 2018 XGBoost contributors */ #include "common.h" namespace xgboost { namespace common { int AllVisibleGPUs() { int n_visgpus = 0; try { // When compiled with CUDA but running on CPU only device, // cudaGetDeviceCount will fail. dh::safe_cuda(cudaGetDeviceCount(&n_visgpus)); } catch(const dmlc::Error &except) { return 0; } return n_visgpus; } } // namespace common } // namespace xgboost
9b1e362617c8ab4e8fc20c787e22143a2a2e3eb1.hip
// !!! This is a file automatically generated by hipify!!! /** * @brief Breadth-first Search Top-Down test program * @file */ #include "Static/BreadthFirstSearch/TopDown2.cuh" #include <Graph/GraphStd.hpp> #include <Util/CommandLineParam.hpp> #include <hip/hip_runtime_api.h> //--profile-from-start off int main(int argc, char* argv[]) { using namespace timer; using namespace hornets_nest; graph::GraphStd<vid_t, eoff_t> graph; CommandLineParam cmd(graph, argc, argv); //graph.print(); HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges()); HornetGraph hornet_graph(hornet_init); //hornet_graph.print(); BfsTopDown2 bfs_top_down(hornet_graph); bfs_top_down.set_parameters(graph.max_out_degree_id()); Timer<DEVICE> TM; hipProfilerStart(); TM.start(); bfs_top_down.run(); TM.stop(); hipProfilerStop(); TM.print("TopDown2"); auto is_correct = bfs_top_down.validate(); std::cout << (is_correct ? "\nCorrect <>\n\n" : "\n! Not Correct\n\n"); return !is_correct; }
9b1e362617c8ab4e8fc20c787e22143a2a2e3eb1.cu
/** * @brief Breadth-first Search Top-Down test program * @file */ #include "Static/BreadthFirstSearch/TopDown2.cuh" #include <Graph/GraphStd.hpp> #include <Util/CommandLineParam.hpp> #include <cuda_profiler_api.h> //--profile-from-start off int main(int argc, char* argv[]) { using namespace timer; using namespace hornets_nest; graph::GraphStd<vid_t, eoff_t> graph; CommandLineParam cmd(graph, argc, argv); //graph.print(); HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges()); HornetGraph hornet_graph(hornet_init); //hornet_graph.print(); BfsTopDown2 bfs_top_down(hornet_graph); bfs_top_down.set_parameters(graph.max_out_degree_id()); Timer<DEVICE> TM; cudaProfilerStart(); TM.start(); bfs_top_down.run(); TM.stop(); cudaProfilerStop(); TM.print("TopDown2"); auto is_correct = bfs_top_down.validate(); std::cout << (is_correct ? "\nCorrect <>\n\n" : "\n! Not Correct\n\n"); return !is_correct; }
e0be5b8860360d010b4be2237dc5771a8fd08c5c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <collectives/all_reduce_comm.hpp> #include <collectives/ib_comm.hpp> #include <utils.hpp> namespace HugeCTR { std::shared_ptr<AllReduceInPlaceComm> AllReduceInPlaceComm::create_nccl( size_t num_process, bool use_mixed_precision, const std::vector<std::shared_ptr<GPUResource>>& gpu_resources) { if (use_mixed_precision) { return std::make_shared<NCCLARInplaceComm<__half>>(num_process, gpu_resources); } else { return std::make_shared<NCCLARInplaceComm<float>>(num_process, gpu_resources); } } #ifdef ENABLE_MPI std::shared_ptr<AllReduceInPlaceComm> AllReduceInPlaceComm::create_oneshot( size_t num_process, bool use_mixed_precision, const std::vector<std::shared_ptr<GPUResource>>& gpu_resources, IbComm* ib_comm) { if (num_process == 1) { CK_THROW_(Error_t::WrongInput, "Oneshot algo is not defined for single node"); } if (use_mixed_precision) { return std::make_shared<OneshotMultiARInplaceComm<__half>>(ib_comm, num_process, gpu_resources); } else { return std::make_shared<OneshotMultiARInplaceComm<float>>(ib_comm, num_process, gpu_resources); } } std::shared_ptr<AllReduceInPlaceComm> AllReduceInPlaceComm::create( size_t num_process, AllReduceAlgo algo, bool use_mixed_precision, const std::vector<std::shared_ptr<GPUResource>>& gpu_resources, IbComm* ib_comm) { return (algo == AllReduceAlgo::ONESHOT) ? create_oneshot(num_process, use_mixed_precision, gpu_resources, ib_comm) : create_nccl(num_process, use_mixed_precision, gpu_resources); } #else std::shared_ptr<AllReduceInPlaceComm> AllReduceInPlaceComm::create( size_t num_process, AllReduceAlgo algo, bool use_mixed_precision, const std::vector<std::shared_ptr<GPUResource>>& gpu_resources) { if (algo == AllReduceAlgo::ONESHOT) { CK_THROW_(Error_t::WrongInput, "Oneshot algo can't be used without MPI"); } return create_nccl(num_process, use_mixed_precision, gpu_resources); } #endif #ifdef ENABLE_MPI template <typename T> OneshotMultiARInplaceComm<T>::OneshotMultiARInplaceComm( IbComm* ib_comm, size_t num_procs, const std::vector<std::shared_ptr<GPUResource>>& gpu_resources): ib_comm_(ib_comm), num_procs_(num_procs), gpu_resources_(gpu_resources), num_gpus_(gpu_resources.size()) { } template <typename T> AllReduceInPlaceComm::Handle OneshotMultiARInplaceComm<T>::register_coll() { ar_ctx_.emplace_back(std::make_unique<ARContext>()); Handle handle = (Handle)(ar_ctx_.size() - 1); auto& ar_ctx_g = ar_ctx_[handle]; ar_ctx_g->ctx_.resize(num_gpus_); ar_ctx_[handle]->ib_comm_handle_ = ib_comm_->register_ar_coll(); return handle; } template<typename T> void OneshotMultiARInplaceComm<T>::set_coll_buf(Handle coll, void* ar_ptr, size_t ar_size, size_t g) { auto& ctx = ar_ctx_[coll]; auto& ctx_g = ctx->ctx_[g]; ctx_g.ar_ptr_ = ar_ptr; if ((ctx->ar_size_ != 0) && (ctx->ar_size_ != ar_size)) { CK_THROW_(Error_t::WrongInput, "AR size mismatch"); } ctx->ar_size_ = ar_size; ib_comm_->set_ar_coll_buf<T>(ctx->ib_comm_handle_, ar_ptr, ar_size, g); // MESSAGE_("Oneshot AR size: " + std::to_string(ar_size)); } template<typename T> void OneshotMultiARInplaceComm<T>::update_size(Handle coll, const size_t ar_size) { auto& ctx = ar_ctx_[coll]; ctx->ar_size_ = ar_size; ib_comm_->update_size(ctx->ib_comm_handle_, ar_size); // MESSAGE_("Oneshot AR size updated to: " + std::to_string(ar_size)); } template <typename T> void OneshotMultiARInplaceComm<T>::register_coll_buf(Handle coll) { auto& ctx = ar_ctx_[coll]; ib_comm_->register_ar_coll_buf(ctx->ib_comm_handle_); } template <typename T> void OneshotMultiARInplaceComm<T>::all_reduce(AllReduceInPlaceComm::Handle coll, hipStream_t stream, size_t g) { auto& ctx = ar_ctx_[coll]; ib_comm_->all_reduce<T>(ctx->ib_comm_handle_, stream, g); } template class OneshotMultiARInplaceComm<__half>; template class OneshotMultiARInplaceComm<float>; #endif template <typename T> NCCLARInplaceComm<T>::NCCLARInplaceComm( size_t num_procs, const std::vector<std::shared_ptr<GPUResource>>& gpu_resources): num_procs_(num_procs), gpu_resources_(gpu_resources), num_gpus_(gpu_resources.size()) { } template <typename T> AllReduceInPlaceComm::Handle NCCLARInplaceComm<T>::register_coll() { ar_ctx_.emplace_back(std::make_unique<ARContext>()); Handle handle = (Handle)(ar_ctx_.size() - 1); auto& ar_ctx_g = ar_ctx_[handle]; ar_ctx_g->ctx_.resize(num_gpus_); return handle; } template<typename T> void NCCLARInplaceComm<T>::set_coll_buf(Handle coll, void* ar_ptr, size_t ar_size, size_t g) { auto& ctx = ar_ctx_[coll]; auto& ctx_g = ctx->ctx_[g]; ctx_g.ar_ptr_ = ar_ptr; if ((ctx->ar_size_ != 0) && (ctx->ar_size_ != ar_size)) { CK_THROW_(Error_t::WrongInput, "AR size mismatch"); } ctx->ar_size_ = ar_size; // MESSAGE_("NCCL AR size: " + std::to_string(ar_size)); } template<typename T> void NCCLARInplaceComm<T>::update_size(Handle coll, const size_t ar_size) { auto& ctx = ar_ctx_[coll]; ctx->ar_size_ = ar_size; // MESSAGE_("NCCL AR size updated to: " + std::to_string(ar_size)); } template<typename T> void NCCLARInplaceComm<T>::register_coll_buf(Handle coll) { } template <typename T> void NCCLARInplaceComm<T>::all_reduce(AllReduceInPlaceComm::Handle coll, hipStream_t stream, size_t g) { auto& ctx = ar_ctx_[coll]; auto& ctx_g = ctx->ctx_[g]; CK_NCCL_THROW_(ncclAllReduce( (const void*) ctx_g.ar_ptr_, ctx_g.ar_ptr_, ctx->ar_size_ / sizeof(T), NcclDataType<T>::getType(), ncclSum, gpu_resources_[g]->get_nccl(), stream)); } template class NCCLARInplaceComm<__half>; template class NCCLARInplaceComm<float>; }
e0be5b8860360d010b4be2237dc5771a8fd08c5c.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <collectives/all_reduce_comm.hpp> #include <collectives/ib_comm.hpp> #include <utils.hpp> namespace HugeCTR { std::shared_ptr<AllReduceInPlaceComm> AllReduceInPlaceComm::create_nccl( size_t num_process, bool use_mixed_precision, const std::vector<std::shared_ptr<GPUResource>>& gpu_resources) { if (use_mixed_precision) { return std::make_shared<NCCLARInplaceComm<__half>>(num_process, gpu_resources); } else { return std::make_shared<NCCLARInplaceComm<float>>(num_process, gpu_resources); } } #ifdef ENABLE_MPI std::shared_ptr<AllReduceInPlaceComm> AllReduceInPlaceComm::create_oneshot( size_t num_process, bool use_mixed_precision, const std::vector<std::shared_ptr<GPUResource>>& gpu_resources, IbComm* ib_comm) { if (num_process == 1) { CK_THROW_(Error_t::WrongInput, "Oneshot algo is not defined for single node"); } if (use_mixed_precision) { return std::make_shared<OneshotMultiARInplaceComm<__half>>(ib_comm, num_process, gpu_resources); } else { return std::make_shared<OneshotMultiARInplaceComm<float>>(ib_comm, num_process, gpu_resources); } } std::shared_ptr<AllReduceInPlaceComm> AllReduceInPlaceComm::create( size_t num_process, AllReduceAlgo algo, bool use_mixed_precision, const std::vector<std::shared_ptr<GPUResource>>& gpu_resources, IbComm* ib_comm) { return (algo == AllReduceAlgo::ONESHOT) ? create_oneshot(num_process, use_mixed_precision, gpu_resources, ib_comm) : create_nccl(num_process, use_mixed_precision, gpu_resources); } #else std::shared_ptr<AllReduceInPlaceComm> AllReduceInPlaceComm::create( size_t num_process, AllReduceAlgo algo, bool use_mixed_precision, const std::vector<std::shared_ptr<GPUResource>>& gpu_resources) { if (algo == AllReduceAlgo::ONESHOT) { CK_THROW_(Error_t::WrongInput, "Oneshot algo can't be used without MPI"); } return create_nccl(num_process, use_mixed_precision, gpu_resources); } #endif #ifdef ENABLE_MPI template <typename T> OneshotMultiARInplaceComm<T>::OneshotMultiARInplaceComm( IbComm* ib_comm, size_t num_procs, const std::vector<std::shared_ptr<GPUResource>>& gpu_resources): ib_comm_(ib_comm), num_procs_(num_procs), gpu_resources_(gpu_resources), num_gpus_(gpu_resources.size()) { } template <typename T> AllReduceInPlaceComm::Handle OneshotMultiARInplaceComm<T>::register_coll() { ar_ctx_.emplace_back(std::make_unique<ARContext>()); Handle handle = (Handle)(ar_ctx_.size() - 1); auto& ar_ctx_g = ar_ctx_[handle]; ar_ctx_g->ctx_.resize(num_gpus_); ar_ctx_[handle]->ib_comm_handle_ = ib_comm_->register_ar_coll(); return handle; } template<typename T> void OneshotMultiARInplaceComm<T>::set_coll_buf(Handle coll, void* ar_ptr, size_t ar_size, size_t g) { auto& ctx = ar_ctx_[coll]; auto& ctx_g = ctx->ctx_[g]; ctx_g.ar_ptr_ = ar_ptr; if ((ctx->ar_size_ != 0) && (ctx->ar_size_ != ar_size)) { CK_THROW_(Error_t::WrongInput, "AR size mismatch"); } ctx->ar_size_ = ar_size; ib_comm_->set_ar_coll_buf<T>(ctx->ib_comm_handle_, ar_ptr, ar_size, g); // MESSAGE_("Oneshot AR size: " + std::to_string(ar_size)); } template<typename T> void OneshotMultiARInplaceComm<T>::update_size(Handle coll, const size_t ar_size) { auto& ctx = ar_ctx_[coll]; ctx->ar_size_ = ar_size; ib_comm_->update_size(ctx->ib_comm_handle_, ar_size); // MESSAGE_("Oneshot AR size updated to: " + std::to_string(ar_size)); } template <typename T> void OneshotMultiARInplaceComm<T>::register_coll_buf(Handle coll) { auto& ctx = ar_ctx_[coll]; ib_comm_->register_ar_coll_buf(ctx->ib_comm_handle_); } template <typename T> void OneshotMultiARInplaceComm<T>::all_reduce(AllReduceInPlaceComm::Handle coll, cudaStream_t stream, size_t g) { auto& ctx = ar_ctx_[coll]; ib_comm_->all_reduce<T>(ctx->ib_comm_handle_, stream, g); } template class OneshotMultiARInplaceComm<__half>; template class OneshotMultiARInplaceComm<float>; #endif template <typename T> NCCLARInplaceComm<T>::NCCLARInplaceComm( size_t num_procs, const std::vector<std::shared_ptr<GPUResource>>& gpu_resources): num_procs_(num_procs), gpu_resources_(gpu_resources), num_gpus_(gpu_resources.size()) { } template <typename T> AllReduceInPlaceComm::Handle NCCLARInplaceComm<T>::register_coll() { ar_ctx_.emplace_back(std::make_unique<ARContext>()); Handle handle = (Handle)(ar_ctx_.size() - 1); auto& ar_ctx_g = ar_ctx_[handle]; ar_ctx_g->ctx_.resize(num_gpus_); return handle; } template<typename T> void NCCLARInplaceComm<T>::set_coll_buf(Handle coll, void* ar_ptr, size_t ar_size, size_t g) { auto& ctx = ar_ctx_[coll]; auto& ctx_g = ctx->ctx_[g]; ctx_g.ar_ptr_ = ar_ptr; if ((ctx->ar_size_ != 0) && (ctx->ar_size_ != ar_size)) { CK_THROW_(Error_t::WrongInput, "AR size mismatch"); } ctx->ar_size_ = ar_size; // MESSAGE_("NCCL AR size: " + std::to_string(ar_size)); } template<typename T> void NCCLARInplaceComm<T>::update_size(Handle coll, const size_t ar_size) { auto& ctx = ar_ctx_[coll]; ctx->ar_size_ = ar_size; // MESSAGE_("NCCL AR size updated to: " + std::to_string(ar_size)); } template<typename T> void NCCLARInplaceComm<T>::register_coll_buf(Handle coll) { } template <typename T> void NCCLARInplaceComm<T>::all_reduce(AllReduceInPlaceComm::Handle coll, cudaStream_t stream, size_t g) { auto& ctx = ar_ctx_[coll]; auto& ctx_g = ctx->ctx_[g]; CK_NCCL_THROW_(ncclAllReduce( (const void*) ctx_g.ar_ptr_, ctx_g.ar_ptr_, ctx->ar_size_ / sizeof(T), NcclDataType<T>::getType(), ncclSum, gpu_resources_[g]->get_nccl(), stream)); } template class NCCLARInplaceComm<__half>; template class NCCLARInplaceComm<float>; }
974ac70c08ae1b0c196df0b3c10e790d1dfc772b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda.h> #include<iostream> #include<stdio.h> #include "omp.h" #include <unistd.h> #include <sys/time.h> #include "jacobi_kernels.h" #include "main_fcn.h" #include <stdlib.h> using namespace std; int main() { //define booleans needed for logic ctrl_flags CF; //define interface between helper and main i.e.: what is returned //double out_val =0.0; int i; double out[Ni]; double Amat[numElems]; int numBlocks, numThreads; gen_A_mat(Amat); static double inp1[Ni]; gen_b_vec(inp1); help_input_from_main test_input; test_input.initS(inp1, Amat); cout <<"Running CUDA init" << endl; double *x_now_d, *x_next_d, *A_d, *b_d; int k; //pointer of helper function return double* h_data; double* monitor_data; //asdfasdf // Allocate memory on the device hipMalloc((void **) &x_next_d, Ni*sizeof(double)); hipMalloc((void **) &A_d, numElems*sizeof(double)); hipMalloc((void **) &x_now_d, Ni*sizeof(double)); hipMalloc((void **) &b_d, Ni*sizeof(double)); hipMalloc((void**)&monitor_data, sizeof(double)*Ni); hipHostMalloc((void**)&h_data, sizeof(double)*Ni); test_input.x_next_d = x_next_d; test_input.A_d = A_d; test_input.b_d = b_d; test_input.x_now_d = x_now_d; test_input.nTiles = Ni/tileSize + (Ni%tileSize == 0?0:1); help_input_from_main* help_input = &test_input; hipStream_t stream1; hipStreamCreateWithFlags(&stream1, hipStreamNonBlocking); numBlocks = 1; numThreads = els_to_read; if(els_to_read > 1024){ //for now just assume numElems is multiple of 1024 or less numThreads = 1024; numBlocks = els_to_read/numThreads; } // Optimized kernel //int gridHeight = Nj/tileSize + (Nj%tileSize == 0?0:1); // int gridWidth = Ni/tileSize + (Ni%tileSize == 0?0:1); //dim3 dGrid(gridHeight, gridWidth), dBlock(tileSize, tileSize); #pragma omp parallel num_threads(3) shared(CF, help_input, out, monitor_data, x_now_d) { if(omp_get_thread_num() == 0){ cout <<"WHATDDUP IM LAUNCHING THAT MAIN" << endl; //code for master threads CF.main_done_cmd = main_fcn(CF, out, help_input); } if(omp_get_thread_num() == 1){ while(CF.main_done_cmd == 0){ if(CF.help_running_cmd == 1 && CF.request_val_cmd == 1){ cout <<"Launching Monitor Kernel" << endl; //hipStreamSynchronize(stream1); hipLaunchKernelGGL(( monitorKernel), dim3(numBlocks), dim3(numThreads),0, stream1, monitor_data, x_now_d); cout <<"Launching Async Mem Cpy" << endl; hipMemcpyAsync(h_data, monitor_data, Ni*sizeof(double), hipMemcpyDeviceToHost, stream1); hipStreamSynchronize(stream1); CF.request_val_cmd = 0; for(i = 0; i < Ni; i++){ out[i] = h_data[i]; if(i < 3) cout << "output from monitor: " << h_data[i] << endl; } CF.req_delivered_cmd = 1; } } } if(omp_get_thread_num() == 2){ while(CF.main_done_cmd == 0){ if(CF.call_help_cmd == 1 && CF.help_running_cmd == 0){ cout <<"Launching Helper Function" << endl; //*help_rdy = help_fcn(*help_input, out); CF.help_running_cmd = 1; CF.call_help_cmd = 0; CF.kernel_rdy_cmd = 0; CF.help_rdy_cmd = help_fcn(*help_input, out, &CF.kernel_rdy_cmd); //dataKernel<<<nTiles, tileSize >>>(dArray, 1000); } } } } hipFree(x_next_d); hipFree(A_d); hipFree(x_now_d); hipFree(b_d); hipFree(monitor_data); hipFree(h_data); return 0; }
974ac70c08ae1b0c196df0b3c10e790d1dfc772b.cu
#include<cuda.h> #include<iostream> #include<stdio.h> #include "omp.h" #include <unistd.h> #include <sys/time.h> #include "jacobi_kernels.h" #include "main_fcn.h" #include <stdlib.h> using namespace std; int main() { //define booleans needed for logic ctrl_flags CF; //define interface between helper and main i.e.: what is returned //double out_val =0.0; int i; double out[Ni]; double Amat[numElems]; int numBlocks, numThreads; gen_A_mat(Amat); static double inp1[Ni]; gen_b_vec(inp1); help_input_from_main test_input; test_input.initS(inp1, Amat); cout <<"Running CUDA init" << endl; double *x_now_d, *x_next_d, *A_d, *b_d; int k; //pointer of helper function return double* h_data; double* monitor_data; //asdfasdf // Allocate memory on the device cudaMalloc((void **) &x_next_d, Ni*sizeof(double)); cudaMalloc((void **) &A_d, numElems*sizeof(double)); cudaMalloc((void **) &x_now_d, Ni*sizeof(double)); cudaMalloc((void **) &b_d, Ni*sizeof(double)); cudaMalloc((void**)&monitor_data, sizeof(double)*Ni); cudaMallocHost((void**)&h_data, sizeof(double)*Ni); test_input.x_next_d = x_next_d; test_input.A_d = A_d; test_input.b_d = b_d; test_input.x_now_d = x_now_d; test_input.nTiles = Ni/tileSize + (Ni%tileSize == 0?0:1); help_input_from_main* help_input = &test_input; cudaStream_t stream1; cudaStreamCreateWithFlags(&stream1, cudaStreamNonBlocking); numBlocks = 1; numThreads = els_to_read; if(els_to_read > 1024){ //for now just assume numElems is multiple of 1024 or less numThreads = 1024; numBlocks = els_to_read/numThreads; } // Optimized kernel //int gridHeight = Nj/tileSize + (Nj%tileSize == 0?0:1); // int gridWidth = Ni/tileSize + (Ni%tileSize == 0?0:1); //dim3 dGrid(gridHeight, gridWidth), dBlock(tileSize, tileSize); #pragma omp parallel num_threads(3) shared(CF, help_input, out, monitor_data, x_now_d) { if(omp_get_thread_num() == 0){ cout <<"WHATDDUP IM LAUNCHING THAT MAIN" << endl; //code for master threads CF.main_done_cmd = main_fcn(CF, out, help_input); } if(omp_get_thread_num() == 1){ while(CF.main_done_cmd == 0){ if(CF.help_running_cmd == 1 && CF.request_val_cmd == 1){ cout <<"Launching Monitor Kernel" << endl; //cudaStreamSynchronize(stream1); monitorKernel<<<numBlocks, numThreads,0, stream1>>>(monitor_data, x_now_d); cout <<"Launching Async Mem Cpy" << endl; cudaMemcpyAsync(h_data, monitor_data, Ni*sizeof(double), cudaMemcpyDeviceToHost, stream1); cudaStreamSynchronize(stream1); CF.request_val_cmd = 0; for(i = 0; i < Ni; i++){ out[i] = h_data[i]; if(i < 3) cout << "output from monitor: " << h_data[i] << endl; } CF.req_delivered_cmd = 1; } } } if(omp_get_thread_num() == 2){ while(CF.main_done_cmd == 0){ if(CF.call_help_cmd == 1 && CF.help_running_cmd == 0){ cout <<"Launching Helper Function" << endl; //*help_rdy = help_fcn(*help_input, out); CF.help_running_cmd = 1; CF.call_help_cmd = 0; CF.kernel_rdy_cmd = 0; CF.help_rdy_cmd = help_fcn(*help_input, out, &CF.kernel_rdy_cmd); //dataKernel<<<nTiles, tileSize >>>(dArray, 1000); } } } } cudaFree(x_next_d); cudaFree(A_d); cudaFree(x_now_d); cudaFree(b_d); cudaFree(monitor_data); cudaFree(h_data); return 0; }
0276c6799a92e3e26b17acc77be8186bfd52be49.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // --------------- // CS3211 CUDA Lab // --------------- __device__ __host__ int distance(int x1, int y1, int x2, int y2) { return (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2); } /* The CPU version of Voronoi Diagram computation */ void cpuVoronoiDiagram(Point2D *points, int *output, int noPoints, int width, int height) { int id = 0; for (int j = 0; j < height; j++) for (int i = 0; i < width; i++) { int best = 0; int minDist = distance(i, j, points[0].x, points[0].y); for (int k = 1; k < noPoints; k++) { int myDist = distance(i, j, points[k].x, points[k].y); if (myDist < minDist) { minDist = myDist; best = k; } } output[id++] = best; } } /* GPU Version of Voronoi diagram computation * Naive implementation */ __global__ void kernelNaiveVoronoiDiagram(Point2D *points, int *result, int noPoints, int width) { int threadX = (blockIdx.x * blockDim.x) + threadIdx.x; int threadY = (blockIdx.y * blockDim.y) + threadIdx.y; int best = 0; int minDist = distance(threadX, threadY, points[0].x, points[0].y); for (int i = 1; i < noPoints; i++) { int myDist = distance(threadX, threadY, points[i].x, points[i].y); if (myDist < minDist) { minDist = myDist; best = i; } } result[(threadY * width) + threadX] = best; } /* GPU Version of Voronoi diagram computation * Using shared memory * We assume for simplicity that noPoints is divisible by TILE */ __global__ void kernelSharedVoronoiDiagram(Point2D *points, int *result, int noPoints, int width) { int threadX = (blockIdx.x * blockDim.x) + threadIdx.x; int threadY = (blockIdx.y * blockDim.y) + threadIdx.y; int best = 0; int minDist = distance(threadX, threadY, points[0].x, points[0].y); __shared__ Point2D interPoints[TILE]; for (int m = 0; m < noPoints; m += TILE) { int interPointIdx = (threadIdx.y * blockDim.x) + threadIdx.x; int pointIdx = m + interPointIdx; interPoints[interPointIdx] = points[pointIdx]; __syncthreads(); for (int k = 0; k < TILE; k++) { int myDist = distance(threadX, threadY, interPoints[k].x, interPoints[k].y); if (myDist < minDist) { minDist = myDist; best = m + k; } } __syncthreads(); } result[(threadY * width) + threadX] = best; }
0276c6799a92e3e26b17acc77be8186bfd52be49.cu
// --------------- // CS3211 CUDA Lab // --------------- __device__ __host__ int distance(int x1, int y1, int x2, int y2) { return (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2); } /* The CPU version of Voronoi Diagram computation */ void cpuVoronoiDiagram(Point2D *points, int *output, int noPoints, int width, int height) { int id = 0; for (int j = 0; j < height; j++) for (int i = 0; i < width; i++) { int best = 0; int minDist = distance(i, j, points[0].x, points[0].y); for (int k = 1; k < noPoints; k++) { int myDist = distance(i, j, points[k].x, points[k].y); if (myDist < minDist) { minDist = myDist; best = k; } } output[id++] = best; } } /* GPU Version of Voronoi diagram computation * Naive implementation */ __global__ void kernelNaiveVoronoiDiagram(Point2D *points, int *result, int noPoints, int width) { int threadX = (blockIdx.x * blockDim.x) + threadIdx.x; int threadY = (blockIdx.y * blockDim.y) + threadIdx.y; int best = 0; int minDist = distance(threadX, threadY, points[0].x, points[0].y); for (int i = 1; i < noPoints; i++) { int myDist = distance(threadX, threadY, points[i].x, points[i].y); if (myDist < minDist) { minDist = myDist; best = i; } } result[(threadY * width) + threadX] = best; } /* GPU Version of Voronoi diagram computation * Using shared memory * We assume for simplicity that noPoints is divisible by TILE */ __global__ void kernelSharedVoronoiDiagram(Point2D *points, int *result, int noPoints, int width) { int threadX = (blockIdx.x * blockDim.x) + threadIdx.x; int threadY = (blockIdx.y * blockDim.y) + threadIdx.y; int best = 0; int minDist = distance(threadX, threadY, points[0].x, points[0].y); __shared__ Point2D interPoints[TILE]; for (int m = 0; m < noPoints; m += TILE) { int interPointIdx = (threadIdx.y * blockDim.x) + threadIdx.x; int pointIdx = m + interPointIdx; interPoints[interPointIdx] = points[pointIdx]; __syncthreads(); for (int k = 0; k < TILE; k++) { int myDist = distance(threadX, threadY, interPoints[k].x, interPoints[k].y); if (myDist < minDist) { minDist = myDist; best = m + k; } } __syncthreads(); } result[(threadY * width) + threadX] = best; }
ccbbae35ac6f5633850eddbb55042a5590a61f6a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "segment_coo_cuda.h" #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include "reducer.cuh" #include "utils.cuh" #define THREADS 256 #define BLOCKS(TB, N) (TB * N + THREADS - 1) / THREADS #define FULL_MASK 0xffffffff template <typename scalar_t, ReductionType REDUCE, bool HAS_VAL> __global__ void segment_coo_kernel(const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> index_info, scalar_t *out_data, size_t E, size_t N) { // Each thread processes exactly one entry. Within a warp, we perform a // parallel reduction across equal indices, and write the intermediate // result via atomics. int row_idx = blockIdx.x * blockDim.x + threadIdx.x; int lane_idx = row_idx & (32 - 1); int D = index_info.sizes[index_info.dims - 1]; if (row_idx < E) { int offset = at::cuda::detail::IndexToOffset<int64_t, int, -1>::get( row_idx, index_info); int64_t idx = index_info.data[offset], next_idx; int out_idx = (row_idx / D) * N + idx; scalar_t val = HAS_VAL ? src_data[row_idx] : (scalar_t)1, tmp; #pragma unroll for (int i = 1; i < 32; i *= 2) { // Parallel reduction inside a single warp. tmp = __shfl_up_sync(FULL_MASK, val, i); next_idx = __shfl_up_sync(FULL_MASK, idx, i); if (lane_idx >= i && row_idx / D == (row_idx - i) / D) { assert(idx >= next_idx); if (idx == next_idx) Reducer<scalar_t, REDUCE>::update(&val, tmp); } } next_idx = __shfl_down_sync(FULL_MASK, idx, 1); if (lane_idx == 32 - 1 || row_idx / D != (row_idx + 1) / D || idx != next_idx) Reducer<scalar_t, REDUCE>::atomic_write(out_data + out_idx, val); } } template <typename scalar_t> __global__ void segment_coo_arg_kernel( const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> index_info, scalar_t *out_data, int64_t *arg_out_data, size_t E, size_t N) { int row_idx = blockIdx.x * blockDim.x + threadIdx.x; int D = index_info.sizes[index_info.dims - 1]; if (row_idx < E) { int offset = at::cuda::detail::IndexToOffset<int64_t, int, -1>::get( row_idx, index_info); int64_t idx = index_info.data[offset]; int out_idx = (row_idx / D) * N + idx; scalar_t val = __ldg(out_data + out_idx); if (src_data[row_idx] == val) arg_out_data[out_idx] = row_idx % D; } } template <typename scalar_t, ReductionType REDUCE, int TB> __global__ void segment_coo_broadcast_kernel( const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> index_info, scalar_t *out_data, size_t E, size_t K, size_t N) { // Each thread processes a single column and `TB` index entries. Coalesced // read and write is performed in column-major order. The intermediate // results are written via atomics. int D = index_info.sizes[index_info.dims - 1]; int E_1 = E / D; int E_2 = D + TB - (D % TB); int row_idx = blockIdx.x * blockDim.y + threadIdx.y; int col_idx = blockIdx.y * blockDim.x + threadIdx.x; int dim_start = (row_idx * TB) / E_2; int row_start = (row_idx * TB) % E_2; if (dim_start < E_1 && col_idx < K) { int offset = at::cuda::detail::IndexToOffset<int64_t, int, -1>::get( dim_start * D + row_start, index_info); int idx1 = __ldg(index_info.data + offset), idx2; scalar_t val = src_data[K * (dim_start * D + row_start) + col_idx]; #pragma unroll for (int i = 1; i < TB; i++) { if (row_start + i >= D) break; idx2 = __ldg(index_info.data + offset + i * index_info.strides[index_info.dims - 1]); assert(idx1 <= idx2); if (idx1 == idx2) { Reducer<scalar_t, REDUCE>::update( &val, src_data[K * (dim_start * D + row_start + i) + col_idx]); } else { Reducer<scalar_t, REDUCE>::atomic_write( out_data + (dim_start * N + idx1) * K + col_idx, val); val = src_data[K * (dim_start * D + row_start + i) + col_idx]; } idx1 = idx2; } Reducer<scalar_t, REDUCE>::atomic_write( out_data + (dim_start * N + idx1) * K + col_idx, val); } } template <typename scalar_t> __global__ void segment_coo_arg_broadcast_kernel( const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> index_info, scalar_t *out_data, int64_t *arg_out_data, size_t E, size_t K, size_t N) { int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; int row_idx = thread_idx / K; int col_idx = thread_idx % K; int D = index_info.sizes[index_info.dims - 1]; if (row_idx < E && col_idx < K) { int offset = at::cuda::detail::IndexToOffset<int64_t, int, -1>::get( row_idx, index_info); int idx = __ldg(index_info.data + offset); int out_idx = ((row_idx / D) * N + idx) * K + col_idx; scalar_t val = __ldg(out_data + out_idx); if (src_data[thread_idx] == val) arg_out_data[out_idx] = row_idx % D; } } std::tuple<torch::Tensor, torch::optional<torch::Tensor>> segment_coo_cuda(torch::Tensor src, torch::Tensor index, torch::optional<torch::Tensor> optional_out, torch::optional<int64_t> dim_size, std::string reduce) { CHECK_CUDA(src); CHECK_CUDA(index); if (optional_out.has_value()) CHECK_CUDA(optional_out.value()); hipSetDevice(src.get_device()); CHECK_INPUT(src.dim() >= index.dim()); auto sizes = index.sizes().vec(); for (int i = 0; i < index.dim(); i++) { sizes[i] = src.size(i); } index = index.expand(sizes); auto dim = index.dim() - 1; src = src.contiguous(); torch::Tensor out; if (optional_out.has_value()) { out = optional_out.value().contiguous(); for (int i = 0; i < out.dim(); i++) if (i != dim) CHECK_INPUT(src.size(i) == out.size(i)); } else { sizes = src.sizes().vec(); if (dim_size.has_value()) sizes[dim] = dim_size.value(); else if (index.numel() == 0) sizes[dim] = 0; else { auto tmp = index.select(dim, index.size(dim) - 1); tmp = tmp.numel() > 1 ? tmp.max() : tmp; auto d_size = tmp.data_ptr<int64_t>(); auto h_size = (int64_t *)malloc(sizeof(int64_t)); hipMemcpy(h_size, d_size, sizeof(int64_t), hipMemcpyDeviceToHost); sizes[dim] = 1 + *h_size; } out = torch::zeros(sizes, src.options()); } torch::optional<torch::Tensor> arg_out = torch::nullopt; int64_t *arg_out_data = nullptr; if (reduce2REDUCE.at(reduce) == MIN || reduce2REDUCE.at(reduce) == MAX) { arg_out = torch::full_like(out, src.size(dim), index.options()); arg_out_data = arg_out.value().data_ptr<int64_t>(); } else if (reduce2REDUCE.at(reduce) == MEAN) { auto sizes = index.sizes().vec(); sizes[dim] = out.size(dim); arg_out = torch::zeros(sizes, out.options()); } if (index.numel() == 0) return std::make_tuple(out, arg_out); auto E = index.numel(); auto E_2 = index.size(dim); auto E_1 = index.numel() / E_2; auto K = src.numel() / E; auto N = out.size(dim); auto avg_len = (float)E_2 / (float)N; auto index_info = at::cuda::detail::getTensorInfo<int64_t, int>(index); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_ALL_TYPES(src.scalar_type(), "segment_coo_kernel", [&] { auto src_data = src.data_ptr<scalar_t>(); auto out_data = out.data_ptr<scalar_t>(); AT_DISPATCH_REDUCTION_TYPES(reduce, [&] { if (!optional_out.has_value()) out.fill_(Reducer<scalar_t, REDUCE>::init()); if (K == 1) hipLaunchKernelGGL(( segment_coo_kernel<scalar_t, REDUCE, true>) , dim3(BLOCKS(1, E)), dim3(THREADS), 0, stream, src_data, index_info, out_data, E, N); else if (avg_len <= 8) hipLaunchKernelGGL(( segment_coo_broadcast_kernel<scalar_t, REDUCE, 4>) , dim3(dim3((E_1 * ((E_2 + 3) / 4) + 7) / 8, (K + 31) / 32)), dim3(dim3(32, 8)), 0, stream, src_data, index_info, out_data, E, K, N); else if (avg_len <= 16) hipLaunchKernelGGL(( segment_coo_broadcast_kernel<scalar_t, REDUCE, 8>) , dim3(dim3((E_1 * ((E_2 + 7) / 8) + 7) / 8, (K + 31) / 32)), dim3(dim3(32, 8)), 0, stream, src_data, index_info, out_data, E, K, N); else if (avg_len <= 32) hipLaunchKernelGGL(( segment_coo_broadcast_kernel<scalar_t, REDUCE, 16>) , dim3(dim3((E_1 * ((E_2 + 15) / 16) + 7) / 8, (K + 31) / 32)), dim3(dim3(32, 8)), 0, stream, src_data, index_info, out_data, E, K, N); else hipLaunchKernelGGL(( segment_coo_broadcast_kernel<scalar_t, REDUCE, 32>) , dim3(dim3((E_1 * ((E_2 + 31) / 32) + 7) / 8, (K + 31) / 32)), dim3(dim3(32, 8)), 0, stream, src_data, index_info, out_data, E, K, N); if (!optional_out.has_value() && (REDUCE == MIN || REDUCE == MAX)) out.masked_fill_(out == Reducer<scalar_t, REDUCE>::init(), (scalar_t)0); if (REDUCE == MIN || REDUCE == MAX) { if (K == 1) hipLaunchKernelGGL(( segment_coo_arg_kernel<scalar_t>) , dim3(BLOCKS(1, E)), dim3(THREADS), 0, stream, src_data, index_info, out_data, arg_out_data, E, N); else hipLaunchKernelGGL(( segment_coo_arg_broadcast_kernel<scalar_t>) , dim3(BLOCKS(1, E * K)), dim3(THREADS), 0, stream, src_data, index_info, out_data, arg_out_data, E, K, N); } if (REDUCE == MEAN) { auto count_data = arg_out.value().data_ptr<scalar_t>(); hipLaunchKernelGGL(( segment_coo_kernel<scalar_t, SUM, false>) , dim3(BLOCKS(1, E)), dim3(THREADS), 0, stream, nullptr, index_info, count_data, E, N); arg_out.value().clamp_(1); auto count = arg_out.value(); for (int i = dim + 1; i < out.dim(); i++) count = count.unsqueeze(-1); out.div_(count); } }); }); return std::make_tuple(out, arg_out); } template <typename scalar_t> __global__ void gather_coo_kernel(const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> index_info, scalar_t *out_data, size_t E, size_t N) { int row_idx = blockIdx.x * blockDim.x + threadIdx.x; if (row_idx < E) { int offset = at::cuda::detail::IndexToOffset<int64_t, int, -1>::get( row_idx, index_info); int row = index_info.data[offset]; offset = (row_idx / index_info.sizes[index_info.dims - 1]) * N; scalar_t val = __ldg(src_data + offset + row); out_data[row_idx] = val; } } template <typename scalar_t> __global__ void gather_coo_broadcast_kernel( const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> index_info, scalar_t *out_data, size_t E, size_t K, size_t N) { int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; int row_idx = thread_idx / K; int col_idx = thread_idx % K; if (thread_idx < E * K) { int offset = at::cuda::detail::IndexToOffset<int64_t, int, -1>::get( row_idx, index_info); int row = index_info.data[offset]; offset = (row_idx / index_info.sizes[index_info.dims - 1]) * N * K; scalar_t val = __ldg(src_data + offset + K * row + col_idx); out_data[thread_idx] = val; } } torch::Tensor gather_coo_cuda(torch::Tensor src, torch::Tensor index, torch::optional<torch::Tensor> optional_out) { CHECK_CUDA(src); CHECK_CUDA(index); if (optional_out.has_value()) CHECK_CUDA(optional_out.value()); hipSetDevice(src.get_device()); CHECK_INPUT(src.dim() >= index.dim()); auto sizes = index.sizes().vec(); for (auto i = 0; i < index.dim() - 1; i++) sizes[i] = src.size(i); index = index.expand(sizes); auto dim = index.dim() - 1; src = src.contiguous(); torch::Tensor out; if (optional_out.has_value()) { out = optional_out.value().contiguous(); for (auto i = 0; i < src.dim(); i++) if (i != dim) CHECK_INPUT(src.size(i) == out.size(i)); CHECK_INPUT(index.size(dim) == out.size(dim)); } else { auto sizes = src.sizes().vec(); sizes[dim] = index.size(dim); out = torch::empty(sizes, src.options()); } if (index.numel() == 0) return out; auto E = index.numel(); auto K = out.numel() / E; auto N = src.size(dim); auto index_info = at::cuda::detail::getTensorInfo<int64_t, int>(index); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_ALL_TYPES(src.scalar_type(), "gather_coo_kernel", [&] { auto src_data = src.data_ptr<scalar_t>(); auto out_data = out.data_ptr<scalar_t>(); if (K == 1) hipLaunchKernelGGL(( gather_coo_kernel<scalar_t>), dim3(BLOCKS(1, E)), dim3(THREADS), 0, stream, src_data, index_info, out_data, E, N); else hipLaunchKernelGGL(( gather_coo_broadcast_kernel<scalar_t>) , dim3(BLOCKS(1, E * K)), dim3(THREADS), 0, stream, src_data, index_info, out_data, E, K, N); }); return out; }
ccbbae35ac6f5633850eddbb55042a5590a61f6a.cu
#include "segment_coo_cuda.h" #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include "reducer.cuh" #include "utils.cuh" #define THREADS 256 #define BLOCKS(TB, N) (TB * N + THREADS - 1) / THREADS #define FULL_MASK 0xffffffff template <typename scalar_t, ReductionType REDUCE, bool HAS_VAL> __global__ void segment_coo_kernel(const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> index_info, scalar_t *out_data, size_t E, size_t N) { // Each thread processes exactly one entry. Within a warp, we perform a // parallel reduction across equal indices, and write the intermediate // result via atomics. int row_idx = blockIdx.x * blockDim.x + threadIdx.x; int lane_idx = row_idx & (32 - 1); int D = index_info.sizes[index_info.dims - 1]; if (row_idx < E) { int offset = at::cuda::detail::IndexToOffset<int64_t, int, -1>::get( row_idx, index_info); int64_t idx = index_info.data[offset], next_idx; int out_idx = (row_idx / D) * N + idx; scalar_t val = HAS_VAL ? src_data[row_idx] : (scalar_t)1, tmp; #pragma unroll for (int i = 1; i < 32; i *= 2) { // Parallel reduction inside a single warp. tmp = __shfl_up_sync(FULL_MASK, val, i); next_idx = __shfl_up_sync(FULL_MASK, idx, i); if (lane_idx >= i && row_idx / D == (row_idx - i) / D) { assert(idx >= next_idx); if (idx == next_idx) Reducer<scalar_t, REDUCE>::update(&val, tmp); } } next_idx = __shfl_down_sync(FULL_MASK, idx, 1); if (lane_idx == 32 - 1 || row_idx / D != (row_idx + 1) / D || idx != next_idx) Reducer<scalar_t, REDUCE>::atomic_write(out_data + out_idx, val); } } template <typename scalar_t> __global__ void segment_coo_arg_kernel( const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> index_info, scalar_t *out_data, int64_t *arg_out_data, size_t E, size_t N) { int row_idx = blockIdx.x * blockDim.x + threadIdx.x; int D = index_info.sizes[index_info.dims - 1]; if (row_idx < E) { int offset = at::cuda::detail::IndexToOffset<int64_t, int, -1>::get( row_idx, index_info); int64_t idx = index_info.data[offset]; int out_idx = (row_idx / D) * N + idx; scalar_t val = __ldg(out_data + out_idx); if (src_data[row_idx] == val) arg_out_data[out_idx] = row_idx % D; } } template <typename scalar_t, ReductionType REDUCE, int TB> __global__ void segment_coo_broadcast_kernel( const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> index_info, scalar_t *out_data, size_t E, size_t K, size_t N) { // Each thread processes a single column and `TB` index entries. Coalesced // read and write is performed in column-major order. The intermediate // results are written via atomics. int D = index_info.sizes[index_info.dims - 1]; int E_1 = E / D; int E_2 = D + TB - (D % TB); int row_idx = blockIdx.x * blockDim.y + threadIdx.y; int col_idx = blockIdx.y * blockDim.x + threadIdx.x; int dim_start = (row_idx * TB) / E_2; int row_start = (row_idx * TB) % E_2; if (dim_start < E_1 && col_idx < K) { int offset = at::cuda::detail::IndexToOffset<int64_t, int, -1>::get( dim_start * D + row_start, index_info); int idx1 = __ldg(index_info.data + offset), idx2; scalar_t val = src_data[K * (dim_start * D + row_start) + col_idx]; #pragma unroll for (int i = 1; i < TB; i++) { if (row_start + i >= D) break; idx2 = __ldg(index_info.data + offset + i * index_info.strides[index_info.dims - 1]); assert(idx1 <= idx2); if (idx1 == idx2) { Reducer<scalar_t, REDUCE>::update( &val, src_data[K * (dim_start * D + row_start + i) + col_idx]); } else { Reducer<scalar_t, REDUCE>::atomic_write( out_data + (dim_start * N + idx1) * K + col_idx, val); val = src_data[K * (dim_start * D + row_start + i) + col_idx]; } idx1 = idx2; } Reducer<scalar_t, REDUCE>::atomic_write( out_data + (dim_start * N + idx1) * K + col_idx, val); } } template <typename scalar_t> __global__ void segment_coo_arg_broadcast_kernel( const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> index_info, scalar_t *out_data, int64_t *arg_out_data, size_t E, size_t K, size_t N) { int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; int row_idx = thread_idx / K; int col_idx = thread_idx % K; int D = index_info.sizes[index_info.dims - 1]; if (row_idx < E && col_idx < K) { int offset = at::cuda::detail::IndexToOffset<int64_t, int, -1>::get( row_idx, index_info); int idx = __ldg(index_info.data + offset); int out_idx = ((row_idx / D) * N + idx) * K + col_idx; scalar_t val = __ldg(out_data + out_idx); if (src_data[thread_idx] == val) arg_out_data[out_idx] = row_idx % D; } } std::tuple<torch::Tensor, torch::optional<torch::Tensor>> segment_coo_cuda(torch::Tensor src, torch::Tensor index, torch::optional<torch::Tensor> optional_out, torch::optional<int64_t> dim_size, std::string reduce) { CHECK_CUDA(src); CHECK_CUDA(index); if (optional_out.has_value()) CHECK_CUDA(optional_out.value()); cudaSetDevice(src.get_device()); CHECK_INPUT(src.dim() >= index.dim()); auto sizes = index.sizes().vec(); for (int i = 0; i < index.dim(); i++) { sizes[i] = src.size(i); } index = index.expand(sizes); auto dim = index.dim() - 1; src = src.contiguous(); torch::Tensor out; if (optional_out.has_value()) { out = optional_out.value().contiguous(); for (int i = 0; i < out.dim(); i++) if (i != dim) CHECK_INPUT(src.size(i) == out.size(i)); } else { sizes = src.sizes().vec(); if (dim_size.has_value()) sizes[dim] = dim_size.value(); else if (index.numel() == 0) sizes[dim] = 0; else { auto tmp = index.select(dim, index.size(dim) - 1); tmp = tmp.numel() > 1 ? tmp.max() : tmp; auto d_size = tmp.data_ptr<int64_t>(); auto h_size = (int64_t *)malloc(sizeof(int64_t)); cudaMemcpy(h_size, d_size, sizeof(int64_t), cudaMemcpyDeviceToHost); sizes[dim] = 1 + *h_size; } out = torch::zeros(sizes, src.options()); } torch::optional<torch::Tensor> arg_out = torch::nullopt; int64_t *arg_out_data = nullptr; if (reduce2REDUCE.at(reduce) == MIN || reduce2REDUCE.at(reduce) == MAX) { arg_out = torch::full_like(out, src.size(dim), index.options()); arg_out_data = arg_out.value().data_ptr<int64_t>(); } else if (reduce2REDUCE.at(reduce) == MEAN) { auto sizes = index.sizes().vec(); sizes[dim] = out.size(dim); arg_out = torch::zeros(sizes, out.options()); } if (index.numel() == 0) return std::make_tuple(out, arg_out); auto E = index.numel(); auto E_2 = index.size(dim); auto E_1 = index.numel() / E_2; auto K = src.numel() / E; auto N = out.size(dim); auto avg_len = (float)E_2 / (float)N; auto index_info = at::cuda::detail::getTensorInfo<int64_t, int>(index); auto stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_ALL_TYPES(src.scalar_type(), "segment_coo_kernel", [&] { auto src_data = src.data_ptr<scalar_t>(); auto out_data = out.data_ptr<scalar_t>(); AT_DISPATCH_REDUCTION_TYPES(reduce, [&] { if (!optional_out.has_value()) out.fill_(Reducer<scalar_t, REDUCE>::init()); if (K == 1) segment_coo_kernel<scalar_t, REDUCE, true> <<<BLOCKS(1, E), THREADS, 0, stream>>>(src_data, index_info, out_data, E, N); else if (avg_len <= 8) segment_coo_broadcast_kernel<scalar_t, REDUCE, 4> <<<dim3((E_1 * ((E_2 + 3) / 4) + 7) / 8, (K + 31) / 32), dim3(32, 8), 0, stream>>>(src_data, index_info, out_data, E, K, N); else if (avg_len <= 16) segment_coo_broadcast_kernel<scalar_t, REDUCE, 8> <<<dim3((E_1 * ((E_2 + 7) / 8) + 7) / 8, (K + 31) / 32), dim3(32, 8), 0, stream>>>(src_data, index_info, out_data, E, K, N); else if (avg_len <= 32) segment_coo_broadcast_kernel<scalar_t, REDUCE, 16> <<<dim3((E_1 * ((E_2 + 15) / 16) + 7) / 8, (K + 31) / 32), dim3(32, 8), 0, stream>>>(src_data, index_info, out_data, E, K, N); else segment_coo_broadcast_kernel<scalar_t, REDUCE, 32> <<<dim3((E_1 * ((E_2 + 31) / 32) + 7) / 8, (K + 31) / 32), dim3(32, 8), 0, stream>>>(src_data, index_info, out_data, E, K, N); if (!optional_out.has_value() && (REDUCE == MIN || REDUCE == MAX)) out.masked_fill_(out == Reducer<scalar_t, REDUCE>::init(), (scalar_t)0); if (REDUCE == MIN || REDUCE == MAX) { if (K == 1) segment_coo_arg_kernel<scalar_t> <<<BLOCKS(1, E), THREADS, 0, stream>>>( src_data, index_info, out_data, arg_out_data, E, N); else segment_coo_arg_broadcast_kernel<scalar_t> <<<BLOCKS(1, E * K), THREADS, 0, stream>>>( src_data, index_info, out_data, arg_out_data, E, K, N); } if (REDUCE == MEAN) { auto count_data = arg_out.value().data_ptr<scalar_t>(); segment_coo_kernel<scalar_t, SUM, false> <<<BLOCKS(1, E), THREADS, 0, stream>>>(nullptr, index_info, count_data, E, N); arg_out.value().clamp_(1); auto count = arg_out.value(); for (int i = dim + 1; i < out.dim(); i++) count = count.unsqueeze(-1); out.div_(count); } }); }); return std::make_tuple(out, arg_out); } template <typename scalar_t> __global__ void gather_coo_kernel(const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> index_info, scalar_t *out_data, size_t E, size_t N) { int row_idx = blockIdx.x * blockDim.x + threadIdx.x; if (row_idx < E) { int offset = at::cuda::detail::IndexToOffset<int64_t, int, -1>::get( row_idx, index_info); int row = index_info.data[offset]; offset = (row_idx / index_info.sizes[index_info.dims - 1]) * N; scalar_t val = __ldg(src_data + offset + row); out_data[row_idx] = val; } } template <typename scalar_t> __global__ void gather_coo_broadcast_kernel( const scalar_t *src_data, const at::cuda::detail::TensorInfo<int64_t, int> index_info, scalar_t *out_data, size_t E, size_t K, size_t N) { int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; int row_idx = thread_idx / K; int col_idx = thread_idx % K; if (thread_idx < E * K) { int offset = at::cuda::detail::IndexToOffset<int64_t, int, -1>::get( row_idx, index_info); int row = index_info.data[offset]; offset = (row_idx / index_info.sizes[index_info.dims - 1]) * N * K; scalar_t val = __ldg(src_data + offset + K * row + col_idx); out_data[thread_idx] = val; } } torch::Tensor gather_coo_cuda(torch::Tensor src, torch::Tensor index, torch::optional<torch::Tensor> optional_out) { CHECK_CUDA(src); CHECK_CUDA(index); if (optional_out.has_value()) CHECK_CUDA(optional_out.value()); cudaSetDevice(src.get_device()); CHECK_INPUT(src.dim() >= index.dim()); auto sizes = index.sizes().vec(); for (auto i = 0; i < index.dim() - 1; i++) sizes[i] = src.size(i); index = index.expand(sizes); auto dim = index.dim() - 1; src = src.contiguous(); torch::Tensor out; if (optional_out.has_value()) { out = optional_out.value().contiguous(); for (auto i = 0; i < src.dim(); i++) if (i != dim) CHECK_INPUT(src.size(i) == out.size(i)); CHECK_INPUT(index.size(dim) == out.size(dim)); } else { auto sizes = src.sizes().vec(); sizes[dim] = index.size(dim); out = torch::empty(sizes, src.options()); } if (index.numel() == 0) return out; auto E = index.numel(); auto K = out.numel() / E; auto N = src.size(dim); auto index_info = at::cuda::detail::getTensorInfo<int64_t, int>(index); auto stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_ALL_TYPES(src.scalar_type(), "gather_coo_kernel", [&] { auto src_data = src.data_ptr<scalar_t>(); auto out_data = out.data_ptr<scalar_t>(); if (K == 1) gather_coo_kernel<scalar_t><<<BLOCKS(1, E), THREADS, 0, stream>>>( src_data, index_info, out_data, E, N); else gather_coo_broadcast_kernel<scalar_t> <<<BLOCKS(1, E * K), THREADS, 0, stream>>>(src_data, index_info, out_data, E, K, N); }); return out; }
abd4fc691e329ea2459a08b1be1dfaaaf9652a44.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include <opencv2/opencv.hpp> #include <cfloat> #include <opencv2/core/cuda/common.hpp> #include <opencv2/core/cuda/border_interpolate.hpp> #include <opencv2/core/cuda/vec_traits.hpp> #include <opencv2/core/cuda/vec_math.hpp> /** * Converts a uchar3 element to a float3. * * @param elem: element to convert. * @return input a float 3 with the input element fields casted to float. */ __device__ float3 uchar3_to_float3(uchar3 elem) { float3 result; result.x = (float)elem.x; result.y = (float)elem.y; result.z = (float)elem.z; return result; } /** * Returns the linear interpolation of a and b by a factor of t. * Performs the following operation: * result = a + t*(b - a) * * @param a: first value to interpolate. * @param b: second value to interpolate. * @param t: interpolation factor. * @return result of the interpolation. */ __device__ float lerp(float a, float b, float t) { return a + t * (b - a); } /** * Returns the bilinear interpolation of four pixels. * Performs the following operation: * inpH1 = lerp(pix1, pix2, intpX) * inpH2 = lerp(pix3, pix4, intpX) * inpV = lerp(inpH1, inpH2, intpY) * * @param pix1: first pixel to interpolate. * @param pix2: second pixel to interpolate. * @param pix3: third pixel to interpolate. * @param pix4: fourth pixel to interpolate. * @param intpX: horizontal interpolation factor. * @param intpY: vertical interpolation factor. * @return result of the bilinear interpolation. */ __device__ float3 interpolate_pix(float3 pix1, float3 pix2, float3 pix3, float3 pix4, float intpX, float intpY) { // First horizontal interpolation float B_h1 = lerp(pix1.x, pix2.x, intpX); float G_h1 = lerp(pix1.y, pix2.y, intpX); float R_h1 = lerp(pix1.z, pix2.z, intpX); // Second horizontal interpolation float B_h2 = lerp(pix3.x, pix4.x, intpX); float G_h2 = lerp(pix3.y, pix4.y, intpX); float R_h2 = lerp(pix3.z, pix4.z, intpX); // Vertical interpolation float B = lerp(B_h1, B_h2, intpY); float G = lerp(G_h1, G_h2, intpY); float R = lerp(R_h1, R_h2, intpY); return make_float3(B, G, R); } __global__ void process(const cv::cuda::PtrStep<uchar3> src, cv::cuda::PtrStep<uchar3> dst, int rows, int cols, float scaleX, float scaleY) { const int dst_x = blockDim.x * blockIdx.x + threadIdx.x; const int dst_y = blockDim.y * blockIdx.y + threadIdx.y; if (dst_x < cols && dst_y < rows) { float x_pos, y_pos, intpX, intpY; intpX = modf(dst_x / scaleX, &x_pos); intpY = modf(dst_y / scaleY, &y_pos); float3 pix1 = uchar3_to_float3(src(y_pos, x_pos)); float3 pix2 = uchar3_to_float3(src(y_pos, x_pos + 1)); float3 pix3 = uchar3_to_float3(src(y_pos + 1, x_pos)); float3 pix4 = uchar3_to_float3(src(y_pos + 1, x_pos + 1)); float3 val = interpolate_pix(pix1, pix2, pix3, pix4, intpX, intpY); dst(dst_y, dst_x).x = val.x; dst(dst_y, dst_x).y = val.y; dst(dst_y, dst_x).z = val.z; } } int divUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); } void startCUDA (cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, float scaleX, float scaleY) { const dim3 block(32, 8); const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); hipLaunchKernelGGL(( process), dim3(grid), dim3(block), 0, 0, src, dst, dst.rows, dst.cols, scaleX, scaleY); }
abd4fc691e329ea2459a08b1be1dfaaaf9652a44.cu
#include<stdio.h> #include<stdlib.h> #include <opencv2/opencv.hpp> #include <cfloat> #include <opencv2/core/cuda/common.hpp> #include <opencv2/core/cuda/border_interpolate.hpp> #include <opencv2/core/cuda/vec_traits.hpp> #include <opencv2/core/cuda/vec_math.hpp> /** * Converts a uchar3 element to a float3. * * @param elem: element to convert. * @return input a float 3 with the input element fields casted to float. */ __device__ float3 uchar3_to_float3(uchar3 elem) { float3 result; result.x = (float)elem.x; result.y = (float)elem.y; result.z = (float)elem.z; return result; } /** * Returns the linear interpolation of a and b by a factor of t. * Performs the following operation: * result = a + t*(b - a) * * @param a: first value to interpolate. * @param b: second value to interpolate. * @param t: interpolation factor. * @return result of the interpolation. */ __device__ float lerp(float a, float b, float t) { return a + t * (b - a); } /** * Returns the bilinear interpolation of four pixels. * Performs the following operation: * inpH1 = lerp(pix1, pix2, intpX) * inpH2 = lerp(pix3, pix4, intpX) * inpV = lerp(inpH1, inpH2, intpY) * * @param pix1: first pixel to interpolate. * @param pix2: second pixel to interpolate. * @param pix3: third pixel to interpolate. * @param pix4: fourth pixel to interpolate. * @param intpX: horizontal interpolation factor. * @param intpY: vertical interpolation factor. * @return result of the bilinear interpolation. */ __device__ float3 interpolate_pix(float3 pix1, float3 pix2, float3 pix3, float3 pix4, float intpX, float intpY) { // First horizontal interpolation float B_h1 = lerp(pix1.x, pix2.x, intpX); float G_h1 = lerp(pix1.y, pix2.y, intpX); float R_h1 = lerp(pix1.z, pix2.z, intpX); // Second horizontal interpolation float B_h2 = lerp(pix3.x, pix4.x, intpX); float G_h2 = lerp(pix3.y, pix4.y, intpX); float R_h2 = lerp(pix3.z, pix4.z, intpX); // Vertical interpolation float B = lerp(B_h1, B_h2, intpY); float G = lerp(G_h1, G_h2, intpY); float R = lerp(R_h1, R_h2, intpY); return make_float3(B, G, R); } __global__ void process(const cv::cuda::PtrStep<uchar3> src, cv::cuda::PtrStep<uchar3> dst, int rows, int cols, float scaleX, float scaleY) { const int dst_x = blockDim.x * blockIdx.x + threadIdx.x; const int dst_y = blockDim.y * blockIdx.y + threadIdx.y; if (dst_x < cols && dst_y < rows) { float x_pos, y_pos, intpX, intpY; intpX = modf(dst_x / scaleX, &x_pos); intpY = modf(dst_y / scaleY, &y_pos); float3 pix1 = uchar3_to_float3(src(y_pos, x_pos)); float3 pix2 = uchar3_to_float3(src(y_pos, x_pos + 1)); float3 pix3 = uchar3_to_float3(src(y_pos + 1, x_pos)); float3 pix4 = uchar3_to_float3(src(y_pos + 1, x_pos + 1)); float3 val = interpolate_pix(pix1, pix2, pix3, pix4, intpX, intpY); dst(dst_y, dst_x).x = val.x; dst(dst_y, dst_x).y = val.y; dst(dst_y, dst_x).z = val.z; } } int divUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); } void startCUDA (cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, float scaleX, float scaleY) { const dim3 block(32, 8); const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); process<<<grid, block>>>(src, dst, dst.rows, dst.cols, scaleX, scaleY); }
dfd6b37c91f88c1d06dea42f468c87e89f0b5bb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* GPU version of Firefly */ #include "g_firefly.cuh" /* Calculate the norm between two vectors */ inline float Distance(const float *pos1, const float *pos2, const int &dims) { float distance = 0.0f; for (int i = 0; i < dims; ++i) { float tp = pos1[i] - pos2[i]; distance += tp * tp; } return sqrt(distance); } __device__ float GPUDistance(const float *pos1, const float *pos2, const int dims) { float distance = 0.0f; for (int i = 0; i < dims; ++i) { float tp = pos1[i] - pos2[i]; distance += tp * tp; } return sqrt(distance); } /* Initialize d_states */ //<<<max_particles*max_particles/32, 32>>> __global__ void InitializeStates(unsigned long long seed, hiprandState_t *d_states) { int index = blockDim.x * blockIdx.x + threadIdx.x; //hiprand_init(index, index, 0, &d_states[index]); hiprand_init(seed, index, 0, &d_states[index]); } //Kernel to initialize particles //<<<max_particles, dims>>> __global__ void Initialize(Firefly *d_pop, Firefly *d_best_sol, hiprandState_t *d_states, float const *d_xmin, float const *d_xmax) { int index = blockIdx.x * blockDim.x + threadIdx.x; //d_states[max_particles * max_particles] should //be enough for max_particles * dims d_pop[blockIdx.x].pos[threadIdx.x] = hiprand_uniform(&(d_states[index])); d_pop[blockIdx.x].pos[threadIdx.x] = d_xmin[threadIdx.x] + (d_xmax[threadIdx.x] - d_xmin[threadIdx.x]) * d_pop[blockIdx.x].pos[threadIdx.x]; //if (threadIdx.x == 9) //printf("%.2f\n", d_pos[index]); //Initializing the fitness of d_pop and d_best_sol if (index == 0) d_best_sol->fitness = inf; } /* Calculate the fitness function for each particle */ __global__ void FitnessCalculate(Firefly *d_pop, float const *d_inVector, float const *d_zern, const int dims, const int pixel_num) { int index = blockDim.x * blockIdx.x + threadIdx.x; //particle index float tp_fitness = 0.0f; /* index: particle index d_zern: pixel_num * dims [store in this way (pixel1, pixel2, pixel3, ..., pixel_pixel_num)_dim1, (pixel1, pixel2, pixel3, ..., pixel_pixel_num)_dim2, ...] d_inVector: [store in this way (pixel1, pixel2, pixel3, ..., pixel_pixel_num)_image1, (pixel1, pixel2, pixel3, ..., pixel_pixel_num)_image2] d_pos: [store in this way (dim1, dim2, dim3, ...)_particle1, (dim1, dim2, dim3, ...)_particle2, ...] */ float phi; float delta; float tp1, tp2; for (int i = 0; i < pixel_num; ++i) { phi = 0.0f; delta = d_pop[index].pos[dims - 1]; for (int j = 0; j < dims - 1; ++j) { phi += d_zern[j * pixel_num + i] * d_pop[index].pos[j]; } //tp_fitness += abs(cos(phi) - d_inVector[i]) + abs(cos(phi + delta) - d_inVector[pixel_num + i]); tp1 = cos(phi) - d_inVector[i]; tp2 = cos(phi + delta) - d_inVector[pixel_num + i]; tp_fitness += tp1 * tp1 + tp2 * tp2; } d_pop[index].fitness = tp_fitness; //printf("index: %d, fitness: %.2f\n", index, fitness[index]); /*Test if (index == 0) { float tp_result = 0.0f; //float tp_pos[] = { 0.4820f, -0.2068f, 1.1224f, -0.0914f, 0.0067f, -9.5738f, -0.0742f, 0.0980f, 0.0987f, -0.0152f, 0.0301f }; float tp_pos[] = { 0.479791f, -0.174059f, 1.09441f, 0.0210509f, 0.0434665f, - 9.51595f, - 0.0642395f, 0.0921565f, 0.0961314f, 0.0441319f, 0.0345671f }; for (int i = 0; i < pixel_num; ++i) { phi = 0.0f; delta = tp_pos[dims - 1]; for (int j = 0; j < dims - 1; ++j) { phi += d_zern[j * pixel_num + i] * tp_pos[j]; } //tp_result += abs(cos(phi) - d_inVector[i]) + abs(cos(phi + delta) - d_inVector[pixel_num + i]); tp_result += (cos(phi) - d_inVector[i]) * (cos(phi) - d_inVector[i]) + (cos(phi + delta) - d_inVector[pixel_num + i]) * (cos(phi + delta) - d_inVector[pixel_num + i]); } printf("%f", tp_result); } */ } /* initialize the d_best_sol with the min fitness d_pop */ __global__ void BestSolInitialize(const Firefly *d_pop, Firefly *d_best_sol) { float tp_min = inf; int tp_index = 0; for (int i = 0; i < max_particles; ++i) { if (tp_min > d_pop[i].fitness) { tp_min = d_pop[i].fitness; tp_index = i; } } *d_best_sol = d_pop[tp_index]; /* printf("fitness of d_best_sol: %.2f, fitness of d_pop[%d]: %.2f\n", d_best_sol->fitness, tp_index, d_pop[tp_index].fitness); for (int i = 0; i < 11; ++i) { printf("i = %d, %.2f, %.2f\n", i, d_pop[tp_index].pos[i], d_best_sol->pos[i]); } */ } /* Kernel to obtain the min newsols for each particles; < << max_particles, max_particles >> > The first max_particles will be the best newsol for each block */ __global__ void SelectNewSol(Firefly *d_newsols, const int dims) { int index = blockDim.x * blockIdx.x + threadIdx.x; int tx = threadIdx.x; //Declare shared memory for staging the reduce phase extern __shared__ Firefly s[]; Firefly *stage = s; float *stage_pos = (float *)&stage[max_particles]; //max_particles Firefly stage[tx].dims = dims; stage[tx].pos = &(stage_pos[tx * dims]); //Copy PBestY to shared memory stage[tx] = d_newsols[index]; __syncthreads(); //Perform the actual reduce for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tx < s) { if (stage[tx].fitness > stage[tx + s].fitness) { stage[tx] = stage[tx + s]; } } __syncthreads(); } //Copy results back into global memory if (tx == 0) { /* if (index == 0) { for (int i = 0; i < dims; ++i) { printf("d_newsols[0]: dim = %d, %.2f\n", i, d_newsols[0].pos[i]); } for (int i = 0; i < dims; ++i) { printf("stage[0]: dim = %d, %.2f\n", i, stage[0].pos[i]); } } */ d_newsols[blockIdx.x] = stage[0]; /* if (index == 0) { for (int i = 0; i < dims; ++i) { printf("new d_newsols[0]: dim = %d, %.2f\n", i, d_newsols[0].pos[i]); } } */ } } /* Update d_best_sol use the found index */ /* __global__ void BestSolUpdate(Firefly *d_best_sol, const Firefly *d_pop, int *d_best_fitness_index) { *d_best_sol = d_pop[*d_best_fitness_index]; return; } */ /* Calculate fitness for a single trail */ __device__ float TrailFitnessCalculate(float *d_trail, float const *d_inVector, float const *d_zern, const int dims, const int pixel_num) { float tp_fitness = 0.0f; float phi; float delta; float tp1, tp2; for (int i = 0; i < pixel_num; ++i) { phi = 0.0f; delta = d_trail[dims - 1]; for (int j = 0; j < dims - 1; ++j) { phi += d_zern[j * pixel_num + i] * d_trail[j]; } //tp_fitness += abs(cos(phi) - d_inVector[i]) + abs(cos(phi + delta) - d_inVector[pixel_num + i]); tp1 = cos(phi) - d_inVector[i]; tp2 = cos(phi + delta) - d_inVector[pixel_num + i]; tp_fitness += tp1 * tp1 + tp2 * tp2; } return tp_fitness; } /* Obtain newsols for each ij combination of particles < << max_particles * max_particles / 32, 32 >> > */ __global__ void ObtainNewSol(Firefly *d_pop, Firefly *d_newsols, const float lambda, const float sigma_square, const float dmax, hiprandState_t *d_states, const float *d_inVector, const float *d_zern, const float *d_xmin, const float *d_xmax, const int dims, const int pixel_num, const float beta0, const float gamma, const float alpha) { int index = blockDim.x * blockIdx.x + threadIdx.x; //particle index int i = index / max_particles; int j = index % max_particles; hiprandState_t &local_state = d_states[index]; //printf("i = %d, j = %d, %.2f, %.2f\n", i, j, d_pop[i].fitness, d_pop[j].fitness); d_newsols[index].fitness = inf; //initialize d_newsols in each iteration if (d_pop[j].fitness < d_pop[i].fitness) { float r = GPUDistance(d_pop[j].pos, d_pop[i].pos, dims) / dmax; float beta = beta0 * exp(-gamma * pow(r, 2)); for (int k = 0; k < dims; ++k) { float step = hiprand_normal(&local_state) * sigma_square / powf(abs(hiprand_normal(&local_state)), 1 / lambda); step = ((1.59922f - 1.0f) * expf(-step / 2.737f) + 1) * step; float e = (d_xmax[k] - d_xmin[k]) / 100.0f * step; d_newsols[index].pos[k] = d_pop[i].pos[k] + beta * hiprand_uniform(&local_state) * (d_pop[j].pos[k] - d_pop[i].pos[k]) + alpha * e; if (d_newsols[index].pos[k] > d_xmax[k]) d_newsols[index].pos[k] = d_xmax[k]; if (d_newsols[index].pos[k] < d_xmin[k]) d_newsols[index].pos[k] = d_xmin[k]; } d_newsols[index].fitness = TrailFitnessCalculate(d_newsols[index].pos, d_inVector, d_zern, dims, pixel_num); } ////Set the current state of the PRNG //d_states[index] = local_state; // if (i == 0 || i == 1) // printf("i = %d, j = %d, %.2f, newsol: %.2f, %.2f\n", i, j, // d_pop[i].fitness, d_newsols[index].fitness, d_pop[j].fitness); } /* Update pop with the newsols; the best max_particles in the first max_particles newsols and max_particles d_pop will be selected, and stored in d_pop; Update d_best_sol with the best particles in d_pop */ __global__ void UpdatePop(Firefly *d_pop, Firefly *d_newsols, Firefly *d_best_sol, int iter_count, float *d_best_fit_rec) { //Test /* for (int i = 0; i < max_particles; ++i) { printf("i = %d, old d_pop: %.2f\n", i, d_pop[i].fitness); } for (int i = 0; i < max_particles; ++i) { printf("i = %d, d_newsols: %.2f\n", i, d_newsols[i].fitness); } */ float low_bound = 0.0f; for (int i = 0; i < max_particles; ++i) { float tp_min = inf; int tp_min_index; bool old_or_new; //old = 0; new = 1; for (int j = 0; j < max_particles; ++j) { if (d_pop[j].fitness < tp_min && d_pop[j].fitness > low_bound) { tp_min = d_pop[j].fitness; tp_min_index = j; old_or_new = false; } if (d_newsols[j].fitness < tp_min && d_newsols[j].fitness > low_bound) { tp_min = d_newsols[j].fitness; tp_min_index = j; old_or_new = true; } } Firefly tp_particle = d_pop[i]; if (!old_or_new) { //old d_pop[i] = d_pop[tp_min_index]; d_pop[tp_min_index] = tp_particle; } else { //new d_pop[i] = d_newsols[tp_min_index]; d_newsols[tp_min_index] = tp_particle; } low_bound = d_pop[i].fitness; } for (int i = 0; i < max_particles; ++i) { //printf("i = %d, new d_pop: %.2f\n", i, d_pop[i].fitness); if (d_pop[i].fitness < d_best_sol->fitness) *d_best_sol = d_pop[i]; } d_best_fit_rec[iter_count] = d_best_sol->fitness; printf("Iteration: %d, Best = %.2f\n", iter_count, d_best_sol->fitness); } /* __global__ void FitnessTest(Firefly *d_pop, Firefly *d_best_sol) { int index = blockIdx.x * blockDim.x + threadIdx.x; printf("index = %d, fitness = %.2f\n", index, d_pop[index].fitness); if (index == 0) { for (int i = 0; i < 11; ++i) { printf("%.2f\n", d_pop[0].pos[i]); } printf("d_best_sol -> fitness: %.2f, d_best_sol ->pos: \n", d_best_sol->fitness); for (int i = 0; i < 11; ++i) { printf("%.2f\n", d_best_sol->pos[i]); } } } __global__ void SimpleTest(Firefly *d_newsols) { printf("d_newsols[0].fitness = %.2f, d_newsols[1].fitness = %.2f\n", d_newsols[0].fitness, d_newsols[1].fitness); printf(""); } */ void FireflyOptimization(const float * const in_vector, const float * const xmin, const float * const xmax, const float * const zern, const float &gamma, const float &beta0, float alpha, const float &alpha_damp, const int &pixel_num, const int &dims, float *g_best, float *g_best_pos, float &time_use, std::vector<float> &fit_rec){ //GPU memory allocation float const *d_inVector; float const *d_zern; float const *d_xmin; float const *d_xmax; hipMalloc((void**)&d_inVector, 2 * pixel_num * sizeof(float)); hipMemcpy((void*)d_inVector, (void*)in_vector, 2 * pixel_num * sizeof(float), hipMemcpyHostToDevice); hipMalloc((void**)&d_zern, (dims - 1) * pixel_num * sizeof(float)); hipMemcpy((void*)d_zern, (void*)zern, (dims - 1) * pixel_num * sizeof(float), hipMemcpyHostToDevice); hipMalloc((void**)&d_xmin, dims * sizeof(float)); hipMemcpy((void*)d_xmin, (void*)xmin, dims * sizeof(float), hipMemcpyHostToDevice); hipMalloc((void**)&d_xmax, dims * sizeof(float)); hipMemcpy((void*)d_xmax, (void*)xmax, dims * sizeof(float), hipMemcpyHostToDevice); Firefly *d_pop; hiprandState_t *d_states; Firefly *d_best_sol; float *d_best_fit_rec; //record the best fitness in each iteration hipMalloc((void**)&d_pop, max_particles * sizeof(Firefly)); hipMalloc((void**)&d_states, max_particles * max_particles * sizeof(hiprandState_t)); hipMalloc((void**)&d_best_sol, sizeof(Firefly)); hipMalloc((void**)&d_best_fit_rec, sizeof(float) * max_iters); float *d_pop_pos[max_particles]; float *d_best_sol_pos; for (int i = 0; i < max_particles; ++i) { hipMalloc((void**)&(d_pop_pos[i]), dims * sizeof(float)); //allocate memory at GPU, and assigned it to d_pop_pos[i] hipMemcpy(&(d_pop[i].pos), &(d_pop_pos[i]), sizeof(float *), hipMemcpyHostToDevice); //The memory at GPU pointed by d_pop_pos[i] is pointed by d_pop[i].pos, too hipMemcpy(&(d_pop[i].dims), &(dims), sizeof(int), hipMemcpyHostToDevice); } hipMalloc((void**)&(d_best_sol_pos), dims * sizeof(float)); hipMemcpy(&(d_best_sol->pos), &(d_best_sol_pos), sizeof(float *), hipMemcpyHostToDevice); hipMemcpy(&(d_best_sol->dims), &(dims), sizeof(int), hipMemcpyHostToDevice); //Initialize d_pop and d_states //hiprandGenerator_t gen; //hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT); //hiprandSetPseudoRandomGeneratorSeed(gen, time(NULL)); //for (int i = 0; i < max_particles; ++i) //hiprandGenerateUniform(gen, d_pop[i].pos, dims); //hiprandDestroyGenerator(gen); //Generate the different seeds every time std::random_device rd; std::mt19937_64 eng(rd()); std::uniform_int_distribution<unsigned long long> distr; unsigned long long seed = distr(eng); hipLaunchKernelGGL(( InitializeStates), dim3(max_particles*max_particles/32), dim3(32), 0, 0, seed, d_states); hipLaunchKernelGGL(( Initialize) , dim3(max_particles), dim3(dims), 0, 0, d_pop, d_best_sol, d_states, d_xmin, d_xmax); hipLaunchKernelGGL(( FitnessCalculate) , dim3(max_particles / 32), dim3(32), 0, 0, d_pop, d_inVector, d_zern, dims, pixel_num); // Calculate the fitness for each particle //Initialize d_best_sol hipLaunchKernelGGL(( BestSolInitialize) , dim3(1), dim3(1), 0, 0, d_pop, d_best_sol); //float *tp_d_fitness; //hipMalloc((void**)&tp_d_fitness, max_particles * sizeof(float)); //hipMemcpy(tp_d_fitness, d_fitness, max_particles * sizeof(float), hipMemcpyDeviceToDevice); //ReduceFitnessMin << <max_particles / 32, 32 >> > (tp_d_fitness, d_best_fitness_index, 1); //ReduceFitnessMin << <1, max_particles / 32 >> > (tp_d_fitness, d_best_fitness_index, 2); //BestSolUpdate << <1, 1 >> > (d_best_sol, d_pop, d_best_fitness_index); //hipFree((void *) d_best_fitness_index); //hipFree((void *) tp_d_fitness); //FitnessTest << <max_particles / 32, 32 >> > (d_pop, d_best_sol); //hipDeviceSynchronize(); //Iteration //auto t_start = std::chrono::system_clock::now();//variables for time record const float lambda = 1.5f; const float sigma_square = powf( tgammaf(1.0f + lambda) * sinf(M_PI * lambda / 2.0f) / ( tgammaf((1.0f + lambda) / 2.0f)*lambda * powf(2.0f, (lambda - 1.0f) / 2.0f) ), 1.0f / lambda); const float dmax = Distance(xmin, xmax, dims); Firefly *d_newsols; hipMalloc((void**)&d_newsols, max_particles * max_particles * sizeof(Firefly)); float *d_newsols_pos[max_particles * max_particles]; for (int i = 0; i < max_particles * max_particles; ++i) { hipMalloc((void**)&(d_newsols_pos[i]), dims * sizeof(float)); //allocate memory at GPU, and assigned it to d_newsols_pos[i] hipMemcpy(&(d_newsols[i].pos), &(d_newsols_pos[i]), sizeof(float *), hipMemcpyHostToDevice); //The memory at GPU pointed by d_newsols_pos[i] is pointed by d_newsols[i].pos, too hipMemcpy(&(d_newsols[i].dims), &(dims), sizeof(int), hipMemcpyHostToDevice); } //FitnessTest << <max_particles / 32, 32 >> > (d_pop, d_best_sol); //printf("sizeof(Firefly) = %d\n", sizeof(Firefly)); //hipDeviceSynchronize(); auto t_start = std::chrono::system_clock::now();//variables for time record for (int it = 0; it < max_iters; ++it) { //it //newsols for each ij is created (max_particles * max_particles) hipLaunchKernelGGL(( ObtainNewSol) , dim3(max_particles * max_particles / 32), dim3(32) , 0, 0, d_pop, d_newsols, lambda, sigma_square, dmax, d_states, d_inVector, d_zern, d_xmin, d_xmax, dims, pixel_num, beta0, gamma, alpha); //only the newsol with smallest fitness for each i is needed; SelectNewSol selects the newsol with //the smallest fitness and put it at the [i-1] position of d_newsols hipLaunchKernelGGL(( SelectNewSol) , dim3(max_particles), dim3(max_particles), max_particles * sizeof(Firefly) + max_particles * dims * sizeof(float), 0, d_newsols, dims); //SimpleTest << <1, 1 >> > (d_newsols); //hipDeviceSynchronize(); hipLaunchKernelGGL(( UpdatePop) , dim3(1), dim3(1) , 0, 0, d_pop, d_newsols, d_best_sol, it, d_best_fit_rec); alpha = alpha * alpha_damp; //hipDeviceSynchronize(); //For printf display } hipDeviceSynchronize(); auto t_end_i = std::chrono::system_clock::now(); std::chrono::duration<double> elapsed_seconds_from_start = t_end_i - t_start; std::cout << "*******" << std::endl << "Time for the iteration(s): " << elapsed_seconds_from_start.count() << std::endl << "********" << std::endl; time_use = elapsed_seconds_from_start.count(); hipMemcpy((void *)g_best, &(d_best_sol->fitness), sizeof(float), hipMemcpyDeviceToHost); hipMemcpy((void *)g_best_pos, d_best_sol_pos, dims * sizeof(float), hipMemcpyDeviceToHost); if (fit_rec.size() < max_iters) printf("Error: fit_rec are not big enough!\n"); hipMemcpy((void *)fit_rec.data(), d_best_fit_rec, max_iters * sizeof(float), hipMemcpyDeviceToHost); //Clean hipFree((void *) d_inVector); hipFree((void *) d_zern); hipFree((void *) d_xmin); hipFree((void *) d_xmax); hipFree((void *) d_pop); hipFree((void *) d_states); hipFree((void *) d_best_sol); hipFree((void *) d_newsols); hipFree((void *)d_pop_pos); hipFree((void *)d_best_sol_pos); hipFree((void *)d_newsols); hipFree((void *)d_newsols_pos); }
dfd6b37c91f88c1d06dea42f468c87e89f0b5bb5.cu
/* GPU version of Firefly */ #include "g_firefly.cuh" /* Calculate the norm between two vectors */ inline float Distance(const float *pos1, const float *pos2, const int &dims) { float distance = 0.0f; for (int i = 0; i < dims; ++i) { float tp = pos1[i] - pos2[i]; distance += tp * tp; } return sqrt(distance); } __device__ float GPUDistance(const float *pos1, const float *pos2, const int dims) { float distance = 0.0f; for (int i = 0; i < dims; ++i) { float tp = pos1[i] - pos2[i]; distance += tp * tp; } return sqrt(distance); } /* Initialize d_states */ //<<<max_particles*max_particles/32, 32>>> __global__ void InitializeStates(unsigned long long seed, curandState *d_states) { int index = blockDim.x * blockIdx.x + threadIdx.x; //curand_init(index, index, 0, &d_states[index]); curand_init(seed, index, 0, &d_states[index]); } //Kernel to initialize particles //<<<max_particles, dims>>> __global__ void Initialize(Firefly *d_pop, Firefly *d_best_sol, curandState *d_states, float const *d_xmin, float const *d_xmax) { int index = blockIdx.x * blockDim.x + threadIdx.x; //d_states[max_particles * max_particles] should //be enough for max_particles * dims d_pop[blockIdx.x].pos[threadIdx.x] = curand_uniform(&(d_states[index])); d_pop[blockIdx.x].pos[threadIdx.x] = d_xmin[threadIdx.x] + (d_xmax[threadIdx.x] - d_xmin[threadIdx.x]) * d_pop[blockIdx.x].pos[threadIdx.x]; //if (threadIdx.x == 9) //printf("%.2f\n", d_pos[index]); //Initializing the fitness of d_pop and d_best_sol if (index == 0) d_best_sol->fitness = inf; } /* Calculate the fitness function for each particle */ __global__ void FitnessCalculate(Firefly *d_pop, float const *d_inVector, float const *d_zern, const int dims, const int pixel_num) { int index = blockDim.x * blockIdx.x + threadIdx.x; //particle index float tp_fitness = 0.0f; /* index: particle index d_zern: pixel_num * dims [store in this way (pixel1, pixel2, pixel3, ..., pixel_pixel_num)_dim1, (pixel1, pixel2, pixel3, ..., pixel_pixel_num)_dim2, ...] d_inVector: [store in this way (pixel1, pixel2, pixel3, ..., pixel_pixel_num)_image1, (pixel1, pixel2, pixel3, ..., pixel_pixel_num)_image2] d_pos: [store in this way (dim1, dim2, dim3, ...)_particle1, (dim1, dim2, dim3, ...)_particle2, ...] */ float phi; float delta; float tp1, tp2; for (int i = 0; i < pixel_num; ++i) { phi = 0.0f; delta = d_pop[index].pos[dims - 1]; for (int j = 0; j < dims - 1; ++j) { phi += d_zern[j * pixel_num + i] * d_pop[index].pos[j]; } //tp_fitness += abs(cos(phi) - d_inVector[i]) + abs(cos(phi + delta) - d_inVector[pixel_num + i]); tp1 = cos(phi) - d_inVector[i]; tp2 = cos(phi + delta) - d_inVector[pixel_num + i]; tp_fitness += tp1 * tp1 + tp2 * tp2; } d_pop[index].fitness = tp_fitness; //printf("index: %d, fitness: %.2f\n", index, fitness[index]); /*Test if (index == 0) { float tp_result = 0.0f; //float tp_pos[] = { 0.4820f, -0.2068f, 1.1224f, -0.0914f, 0.0067f, -9.5738f, -0.0742f, 0.0980f, 0.0987f, -0.0152f, 0.0301f }; float tp_pos[] = { 0.479791f, -0.174059f, 1.09441f, 0.0210509f, 0.0434665f, - 9.51595f, - 0.0642395f, 0.0921565f, 0.0961314f, 0.0441319f, 0.0345671f }; for (int i = 0; i < pixel_num; ++i) { phi = 0.0f; delta = tp_pos[dims - 1]; for (int j = 0; j < dims - 1; ++j) { phi += d_zern[j * pixel_num + i] * tp_pos[j]; } //tp_result += abs(cos(phi) - d_inVector[i]) + abs(cos(phi + delta) - d_inVector[pixel_num + i]); tp_result += (cos(phi) - d_inVector[i]) * (cos(phi) - d_inVector[i]) + (cos(phi + delta) - d_inVector[pixel_num + i]) * (cos(phi + delta) - d_inVector[pixel_num + i]); } printf("%f", tp_result); } */ } /* initialize the d_best_sol with the min fitness d_pop */ __global__ void BestSolInitialize(const Firefly *d_pop, Firefly *d_best_sol) { float tp_min = inf; int tp_index = 0; for (int i = 0; i < max_particles; ++i) { if (tp_min > d_pop[i].fitness) { tp_min = d_pop[i].fitness; tp_index = i; } } *d_best_sol = d_pop[tp_index]; /* printf("fitness of d_best_sol: %.2f, fitness of d_pop[%d]: %.2f\n", d_best_sol->fitness, tp_index, d_pop[tp_index].fitness); for (int i = 0; i < 11; ++i) { printf("i = %d, %.2f, %.2f\n", i, d_pop[tp_index].pos[i], d_best_sol->pos[i]); } */ } /* Kernel to obtain the min newsols for each particles; < << max_particles, max_particles >> > The first max_particles will be the best newsol for each block */ __global__ void SelectNewSol(Firefly *d_newsols, const int dims) { int index = blockDim.x * blockIdx.x + threadIdx.x; int tx = threadIdx.x; //Declare shared memory for staging the reduce phase extern __shared__ Firefly s[]; Firefly *stage = s; float *stage_pos = (float *)&stage[max_particles]; //max_particles Firefly stage[tx].dims = dims; stage[tx].pos = &(stage_pos[tx * dims]); //Copy PBestY to shared memory stage[tx] = d_newsols[index]; __syncthreads(); //Perform the actual reduce for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tx < s) { if (stage[tx].fitness > stage[tx + s].fitness) { stage[tx] = stage[tx + s]; } } __syncthreads(); } //Copy results back into global memory if (tx == 0) { /* if (index == 0) { for (int i = 0; i < dims; ++i) { printf("d_newsols[0]: dim = %d, %.2f\n", i, d_newsols[0].pos[i]); } for (int i = 0; i < dims; ++i) { printf("stage[0]: dim = %d, %.2f\n", i, stage[0].pos[i]); } } */ d_newsols[blockIdx.x] = stage[0]; /* if (index == 0) { for (int i = 0; i < dims; ++i) { printf("new d_newsols[0]: dim = %d, %.2f\n", i, d_newsols[0].pos[i]); } } */ } } /* Update d_best_sol use the found index */ /* __global__ void BestSolUpdate(Firefly *d_best_sol, const Firefly *d_pop, int *d_best_fitness_index) { *d_best_sol = d_pop[*d_best_fitness_index]; return; } */ /* Calculate fitness for a single trail */ __device__ float TrailFitnessCalculate(float *d_trail, float const *d_inVector, float const *d_zern, const int dims, const int pixel_num) { float tp_fitness = 0.0f; float phi; float delta; float tp1, tp2; for (int i = 0; i < pixel_num; ++i) { phi = 0.0f; delta = d_trail[dims - 1]; for (int j = 0; j < dims - 1; ++j) { phi += d_zern[j * pixel_num + i] * d_trail[j]; } //tp_fitness += abs(cos(phi) - d_inVector[i]) + abs(cos(phi + delta) - d_inVector[pixel_num + i]); tp1 = cos(phi) - d_inVector[i]; tp2 = cos(phi + delta) - d_inVector[pixel_num + i]; tp_fitness += tp1 * tp1 + tp2 * tp2; } return tp_fitness; } /* Obtain newsols for each ij combination of particles < << max_particles * max_particles / 32, 32 >> > */ __global__ void ObtainNewSol(Firefly *d_pop, Firefly *d_newsols, const float lambda, const float sigma_square, const float dmax, curandState *d_states, const float *d_inVector, const float *d_zern, const float *d_xmin, const float *d_xmax, const int dims, const int pixel_num, const float beta0, const float gamma, const float alpha) { int index = blockDim.x * blockIdx.x + threadIdx.x; //particle index int i = index / max_particles; int j = index % max_particles; curandState &local_state = d_states[index]; //printf("i = %d, j = %d, %.2f, %.2f\n", i, j, d_pop[i].fitness, d_pop[j].fitness); d_newsols[index].fitness = inf; //initialize d_newsols in each iteration if (d_pop[j].fitness < d_pop[i].fitness) { float r = GPUDistance(d_pop[j].pos, d_pop[i].pos, dims) / dmax; float beta = beta0 * exp(-gamma * pow(r, 2)); for (int k = 0; k < dims; ++k) { float step = curand_normal(&local_state) * sigma_square / powf(abs(curand_normal(&local_state)), 1 / lambda); step = ((1.59922f - 1.0f) * expf(-step / 2.737f) + 1) * step; float e = (d_xmax[k] - d_xmin[k]) / 100.0f * step; d_newsols[index].pos[k] = d_pop[i].pos[k] + beta * curand_uniform(&local_state) * (d_pop[j].pos[k] - d_pop[i].pos[k]) + alpha * e; if (d_newsols[index].pos[k] > d_xmax[k]) d_newsols[index].pos[k] = d_xmax[k]; if (d_newsols[index].pos[k] < d_xmin[k]) d_newsols[index].pos[k] = d_xmin[k]; } d_newsols[index].fitness = TrailFitnessCalculate(d_newsols[index].pos, d_inVector, d_zern, dims, pixel_num); } ////Set the current state of the PRNG //d_states[index] = local_state; // if (i == 0 || i == 1) // printf("i = %d, j = %d, %.2f, newsol: %.2f, %.2f\n", i, j, // d_pop[i].fitness, d_newsols[index].fitness, d_pop[j].fitness); } /* Update pop with the newsols; the best max_particles in the first max_particles newsols and max_particles d_pop will be selected, and stored in d_pop; Update d_best_sol with the best particles in d_pop */ __global__ void UpdatePop(Firefly *d_pop, Firefly *d_newsols, Firefly *d_best_sol, int iter_count, float *d_best_fit_rec) { //Test /* for (int i = 0; i < max_particles; ++i) { printf("i = %d, old d_pop: %.2f\n", i, d_pop[i].fitness); } for (int i = 0; i < max_particles; ++i) { printf("i = %d, d_newsols: %.2f\n", i, d_newsols[i].fitness); } */ float low_bound = 0.0f; for (int i = 0; i < max_particles; ++i) { float tp_min = inf; int tp_min_index; bool old_or_new; //old = 0; new = 1; for (int j = 0; j < max_particles; ++j) { if (d_pop[j].fitness < tp_min && d_pop[j].fitness > low_bound) { tp_min = d_pop[j].fitness; tp_min_index = j; old_or_new = false; } if (d_newsols[j].fitness < tp_min && d_newsols[j].fitness > low_bound) { tp_min = d_newsols[j].fitness; tp_min_index = j; old_or_new = true; } } Firefly tp_particle = d_pop[i]; if (!old_or_new) { //old d_pop[i] = d_pop[tp_min_index]; d_pop[tp_min_index] = tp_particle; } else { //new d_pop[i] = d_newsols[tp_min_index]; d_newsols[tp_min_index] = tp_particle; } low_bound = d_pop[i].fitness; } for (int i = 0; i < max_particles; ++i) { //printf("i = %d, new d_pop: %.2f\n", i, d_pop[i].fitness); if (d_pop[i].fitness < d_best_sol->fitness) *d_best_sol = d_pop[i]; } d_best_fit_rec[iter_count] = d_best_sol->fitness; printf("Iteration: %d, Best = %.2f\n", iter_count, d_best_sol->fitness); } /* __global__ void FitnessTest(Firefly *d_pop, Firefly *d_best_sol) { int index = blockIdx.x * blockDim.x + threadIdx.x; printf("index = %d, fitness = %.2f\n", index, d_pop[index].fitness); if (index == 0) { for (int i = 0; i < 11; ++i) { printf("%.2f\n", d_pop[0].pos[i]); } printf("d_best_sol -> fitness: %.2f, d_best_sol ->pos: \n", d_best_sol->fitness); for (int i = 0; i < 11; ++i) { printf("%.2f\n", d_best_sol->pos[i]); } } } __global__ void SimpleTest(Firefly *d_newsols) { printf("d_newsols[0].fitness = %.2f, d_newsols[1].fitness = %.2f\n", d_newsols[0].fitness, d_newsols[1].fitness); printf(""); } */ void FireflyOptimization(const float * const in_vector, const float * const xmin, const float * const xmax, const float * const zern, const float &gamma, const float &beta0, float alpha, const float &alpha_damp, const int &pixel_num, const int &dims, float *g_best, float *g_best_pos, float &time_use, std::vector<float> &fit_rec){ //GPU memory allocation float const *d_inVector; float const *d_zern; float const *d_xmin; float const *d_xmax; cudaMalloc((void**)&d_inVector, 2 * pixel_num * sizeof(float)); cudaMemcpy((void*)d_inVector, (void*)in_vector, 2 * pixel_num * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_zern, (dims - 1) * pixel_num * sizeof(float)); cudaMemcpy((void*)d_zern, (void*)zern, (dims - 1) * pixel_num * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_xmin, dims * sizeof(float)); cudaMemcpy((void*)d_xmin, (void*)xmin, dims * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_xmax, dims * sizeof(float)); cudaMemcpy((void*)d_xmax, (void*)xmax, dims * sizeof(float), cudaMemcpyHostToDevice); Firefly *d_pop; curandState *d_states; Firefly *d_best_sol; float *d_best_fit_rec; //record the best fitness in each iteration cudaMalloc((void**)&d_pop, max_particles * sizeof(Firefly)); cudaMalloc((void**)&d_states, max_particles * max_particles * sizeof(curandState)); cudaMalloc((void**)&d_best_sol, sizeof(Firefly)); cudaMalloc((void**)&d_best_fit_rec, sizeof(float) * max_iters); float *d_pop_pos[max_particles]; float *d_best_sol_pos; for (int i = 0; i < max_particles; ++i) { cudaMalloc((void**)&(d_pop_pos[i]), dims * sizeof(float)); //allocate memory at GPU, and assigned it to d_pop_pos[i] cudaMemcpy(&(d_pop[i].pos), &(d_pop_pos[i]), sizeof(float *), cudaMemcpyHostToDevice); //The memory at GPU pointed by d_pop_pos[i] is pointed by d_pop[i].pos, too cudaMemcpy(&(d_pop[i].dims), &(dims), sizeof(int), cudaMemcpyHostToDevice); } cudaMalloc((void**)&(d_best_sol_pos), dims * sizeof(float)); cudaMemcpy(&(d_best_sol->pos), &(d_best_sol_pos), sizeof(float *), cudaMemcpyHostToDevice); cudaMemcpy(&(d_best_sol->dims), &(dims), sizeof(int), cudaMemcpyHostToDevice); //Initialize d_pop and d_states //curandGenerator_t gen; //curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); //curandSetPseudoRandomGeneratorSeed(gen, time(NULL)); //for (int i = 0; i < max_particles; ++i) //curandGenerateUniform(gen, d_pop[i].pos, dims); //curandDestroyGenerator(gen); //Generate the different seeds every time std::random_device rd; std::mt19937_64 eng(rd()); std::uniform_int_distribution<unsigned long long> distr; unsigned long long seed = distr(eng); InitializeStates<<<max_particles*max_particles/32, 32>>>(seed, d_states); Initialize <<<max_particles, dims>>> (d_pop, d_best_sol, d_states, d_xmin, d_xmax); FitnessCalculate <<<max_particles / 32, 32>>> (d_pop, d_inVector, d_zern, dims, pixel_num); // Calculate the fitness for each particle //Initialize d_best_sol BestSolInitialize <<<1, 1>>> (d_pop, d_best_sol); //float *tp_d_fitness; //cudaMalloc((void**)&tp_d_fitness, max_particles * sizeof(float)); //cudaMemcpy(tp_d_fitness, d_fitness, max_particles * sizeof(float), cudaMemcpyDeviceToDevice); //ReduceFitnessMin << <max_particles / 32, 32 >> > (tp_d_fitness, d_best_fitness_index, 1); //ReduceFitnessMin << <1, max_particles / 32 >> > (tp_d_fitness, d_best_fitness_index, 2); //BestSolUpdate << <1, 1 >> > (d_best_sol, d_pop, d_best_fitness_index); //cudaFree((void *) d_best_fitness_index); //cudaFree((void *) tp_d_fitness); //FitnessTest << <max_particles / 32, 32 >> > (d_pop, d_best_sol); //cudaDeviceSynchronize(); //Iteration //auto t_start = std::chrono::system_clock::now();//variables for time record const float lambda = 1.5f; const float sigma_square = powf( tgammaf(1.0f + lambda) * sinf(M_PI * lambda / 2.0f) / ( tgammaf((1.0f + lambda) / 2.0f)*lambda * powf(2.0f, (lambda - 1.0f) / 2.0f) ), 1.0f / lambda); const float dmax = Distance(xmin, xmax, dims); Firefly *d_newsols; cudaMalloc((void**)&d_newsols, max_particles * max_particles * sizeof(Firefly)); float *d_newsols_pos[max_particles * max_particles]; for (int i = 0; i < max_particles * max_particles; ++i) { cudaMalloc((void**)&(d_newsols_pos[i]), dims * sizeof(float)); //allocate memory at GPU, and assigned it to d_newsols_pos[i] cudaMemcpy(&(d_newsols[i].pos), &(d_newsols_pos[i]), sizeof(float *), cudaMemcpyHostToDevice); //The memory at GPU pointed by d_newsols_pos[i] is pointed by d_newsols[i].pos, too cudaMemcpy(&(d_newsols[i].dims), &(dims), sizeof(int), cudaMemcpyHostToDevice); } //FitnessTest << <max_particles / 32, 32 >> > (d_pop, d_best_sol); //printf("sizeof(Firefly) = %d\n", sizeof(Firefly)); //cudaDeviceSynchronize(); auto t_start = std::chrono::system_clock::now();//variables for time record for (int it = 0; it < max_iters; ++it) { //it //newsols for each ij is created (max_particles * max_particles) ObtainNewSol <<< max_particles * max_particles / 32, 32 >>> (d_pop, d_newsols, lambda, sigma_square, dmax, d_states, d_inVector, d_zern, d_xmin, d_xmax, dims, pixel_num, beta0, gamma, alpha); //only the newsol with smallest fitness for each i is needed; SelectNewSol selects the newsol with //the smallest fitness and put it at the [i-1] position of d_newsols SelectNewSol <<< max_particles, max_particles, max_particles * sizeof(Firefly) + max_particles * dims * sizeof(float)>>> (d_newsols, dims); //SimpleTest << <1, 1 >> > (d_newsols); //cudaDeviceSynchronize(); UpdatePop <<< 1, 1 >>> (d_pop, d_newsols, d_best_sol, it, d_best_fit_rec); alpha = alpha * alpha_damp; //cudaDeviceSynchronize(); //For printf display } cudaDeviceSynchronize(); auto t_end_i = std::chrono::system_clock::now(); std::chrono::duration<double> elapsed_seconds_from_start = t_end_i - t_start; std::cout << "*******" << std::endl << "Time for the iteration(s): " << elapsed_seconds_from_start.count() << std::endl << "********" << std::endl; time_use = elapsed_seconds_from_start.count(); cudaMemcpy((void *)g_best, &(d_best_sol->fitness), sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy((void *)g_best_pos, d_best_sol_pos, dims * sizeof(float), cudaMemcpyDeviceToHost); if (fit_rec.size() < max_iters) printf("Error: fit_rec are not big enough!\n"); cudaMemcpy((void *)fit_rec.data(), d_best_fit_rec, max_iters * sizeof(float), cudaMemcpyDeviceToHost); //Clean cudaFree((void *) d_inVector); cudaFree((void *) d_zern); cudaFree((void *) d_xmin); cudaFree((void *) d_xmax); cudaFree((void *) d_pop); cudaFree((void *) d_states); cudaFree((void *) d_best_sol); cudaFree((void *) d_newsols); cudaFree((void *)d_pop_pos); cudaFree((void *)d_best_sol_pos); cudaFree((void *)d_newsols); cudaFree((void *)d_newsols_pos); }
bd5c6509b3796660cce318a5e5d37f13519e81cd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/native/SegmentReduce.h> #include <ATen/ATen.h> #include <ATen/NumericUtils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/hip/cub.cuh> namespace at { namespace native { namespace { struct CustomMax { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { if (at::_isnan(a)) { return a; } else if (at::_isnan(b)) { return b; } return std::max<OutputT>(a, b); } }; struct CustomSum { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { return a + b; } }; struct CustomMin { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { if (at::_isnan(a)) { return a; } else if (at::_isnan(b)) { return b; } return std::min<OutputT>(a, b); } }; Tensor _get_complete_sum(const Tensor& lengths) { int64_t segment_count = lengths.numel(); TORCH_CHECK(segment_count < INT_MAX); auto offsets = at::empty({segment_count + 1}, lengths.options()); offsets[0].zero_(); AT_DISPATCH_INDEX_TYPES( lengths.type(), "_segment_reduce_cuda_backward_kernel1", ([&] { auto* lengths_data_ptr = lengths.data_ptr<index_t>(); auto* offsets_data_ptr = offsets.data_ptr<index_t>(); CUB_WRAPPER( hipcub::DeviceScan::InclusiveSum, lengths_data_ptr, offsets_data_ptr + 1, segment_count, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); })); return offsets; } template <typename scalar_t, typename index_t> __global__ static void post_sum_div_kernel( scalar_t* output_data, const index_t* lengths_data, const int64_t segment_count, bool is_initial_set, scalar_t initial) { CUDA_KERNEL_LOOP(index, segment_count) { CUDA_KERNEL_ASSERT(lengths_data[index] >= 0); if (lengths_data[index] == 0) { if (is_initial_set) { output_data[index] = initial; } else { output_data[index] = NAN; } } else if (!at::_isnan(output_data[index])) { output_data[index] = output_data[index] / lengths_data[index]; } } } template <typename scalar_t, typename index_t> __global__ void segment_reduce_forward_kernel( SegmentReductionType reduction, scalar_t* output_data, scalar_t* values_data, const index_t* lengths_data, const index_t* lengths_cumsum_data, const int64_t segment_count, const int64_t stride_count, bool is_initial_set, scalar_t initial_value) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; int64_t row_id = idx / stride_count; int64_t lane_id = idx % stride_count; if (idx >= (segment_count * stride_count)) { return; } int64_t offset_start = lengths_cumsum_data[row_id]; int64_t offset_end = lengths_cumsum_data[row_id + 1]; // ===== step2: apply reduction for (int64_t j = offset_start; j < offset_end; ++j) { int64_t starting_index = (j * stride_count) + lane_id; const auto data = values_data[starting_index]; // TODO: There is no need to branch with every element if (reduction == SegmentReductionType::MAX) { initial_value = at::_isnan(data) ? data : std::max<scalar_t>(initial_value, data); } else if ( reduction == SegmentReductionType::MEAN || reduction == SegmentReductionType::SUM) { initial_value = initial_value + data; } else if (reduction == SegmentReductionType::MIN) { initial_value = at::_isnan(data) ? data : std::min<scalar_t>(initial_value, data); } } // ===== step3: finalize reduction CUDA_KERNEL_ASSERT(lengths_data[row_id] >= 0); if (lengths_data[row_id] == 0 && !is_initial_set && reduction == SegmentReductionType::MEAN) { initial_value = static_cast<scalar_t>(NAN); } else if ( reduction == SegmentReductionType::MEAN && lengths_data[row_id] > 0 && !at::_isnan(initial_value)) { initial_value = initial_value / lengths_data[row_id]; } int64_t output_index = (row_id * stride_count) + lane_id; output_data[output_index] = initial_value; } template <typename scalar_t, typename index_t> __global__ void segment_reduce_backward_kernel( SegmentReductionType reduction, scalar_t* grad_input_data, scalar_t* grad_data, scalar_t* output_data, const scalar_t* values_data, const index_t* lengths_data, const index_t* lengths_cumsum_data, const int64_t segment_count, const int64_t stride_count) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; int64_t row_id = idx / stride_count; int64_t lane_id = idx % stride_count; if (idx >= (segment_count * stride_count)) { return; } if (lengths_data[row_id] == 0) { return; } int64_t offset_start = lengths_cumsum_data[row_id]; int64_t offset_end = lengths_cumsum_data[row_id + 1]; int64_t output_index = (row_id * stride_count) + lane_id; if (reduction == SegmentReductionType::MAX || reduction == SegmentReductionType::MIN) { int64_t counter = 0; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t starting_index = (j * stride_count) + lane_id; if (at::_isnan(values_data[starting_index]) || values_data[starting_index] == output_data[output_index]) { grad_input_data[starting_index] = grad_data[output_index]; counter++; } } // Average gradient based on number of maximum elements in the // segment if (counter < 2) { return; } for (int64_t j = offset_start; j < offset_end; ++j) { int64_t starting_index = (j * stride_count) + lane_id; if (grad_input_data[starting_index] > 0) { grad_input_data[starting_index] = grad_input_data[starting_index] / counter; } } } else if (reduction == SegmentReductionType::MEAN) { auto grad_val = grad_data[output_index] / lengths_data[row_id]; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t starting_index = (j * stride_count) + lane_id; grad_input_data[starting_index] = grad_val; } } else if (reduction == SegmentReductionType::SUM) { const auto& grad_val = grad_data[output_index]; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t starting_index = (j * stride_count) + lane_id; grad_input_data[starting_index] = grad_val; } } } } // namespace Tensor _segment_reduce_cuda_backward_kernel( const Tensor& grad_contig, const Tensor& output_contig, const Tensor& data_contig, SegmentReductionType reduction, const Tensor& lengths_contig, int64_t axis) { int64_t segment_count = lengths_contig.numel(); auto output_shape = data_contig.sizes().vec(); output_shape[axis] = segment_count; auto grad_input = at::zeros({data_contig.sizes()}, grad_contig.options()); int64_t stride_count = data_contig.numel() / data_contig.size(axis); auto offsets = _get_complete_sum(lengths_contig); constexpr int threads_per_block = 256; int64_t num_blocks = ((segment_count * stride_count) + threads_per_block - 1) / threads_per_block; num_blocks = ::max(num_blocks, (int64_t)1); AT_DISPATCH_INDEX_TYPES( lengths_contig.type(), "_segment_reduce_cuda_backward_kernel1", ([&] { const auto* lengths_data = lengths_contig.data_ptr<index_t>(); auto* offsets_data = offsets.data_ptr<index_t>(); // TODO: Swtich to TensorIterator for better maintainablility and // readability AT_DISPATCH_FLOATING_TYPES_AND2( kBFloat16, kHalf, data_contig.scalar_type(), "_segment_reduce_cpu", ([&]() { auto* output_data = output_contig.data_ptr<scalar_t>(); auto* grad_data = grad_contig.data_ptr<scalar_t>(); auto* grad_input_data = grad_input.data_ptr<scalar_t>(); const auto* values_data = data_contig.data_ptr<scalar_t>(); hipLaunchKernelGGL(( segment_reduce_backward_kernel<scalar_t>) , dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), reduction, grad_input_data, grad_data, output_data, values_data, lengths_data, offsets_data, segment_count, stride_count); C10_HIP_KERNEL_LAUNCH_CHECK(); })); })); return grad_input; } Tensor _segment_reduce_cuda_kernel( SegmentReductionType reduction, const Tensor& data, const Tensor& lengths, int64_t axis, const c10::optional<Scalar>& initial) { int64_t segment_count = lengths.numel(); auto output_shape = data.sizes().vec(); output_shape[axis] = segment_count; auto output = at::empty(output_shape, data.options()); int64_t stride_count = data.numel() / data.size(axis); auto offsets = _get_complete_sum(lengths); constexpr int threads_per_block = 256; int64_t num_blocks = ((segment_count * stride_count) + threads_per_block - 1) / threads_per_block; num_blocks = ::max(num_blocks, (int64_t)1); AT_DISPATCH_INDEX_TYPES( lengths.type(), "_segment_reduce_cuda_kernel1", ([&] { auto* offsets_data_ptr = offsets.data_ptr<index_t>(); auto* lengths_data_ptr = lengths.data_ptr<index_t>(); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, data.scalar_type(), "segment_reduce_cuda", [&]() { auto* data_data_ptr = data.data_ptr<scalar_t>(); auto* output_data_ptr = output.data_ptr<scalar_t>(); // initialize starting value scalar_t initial_value; if (initial.has_value()) { initial_value = initial.value().to<scalar_t>(); } else if (reduction == SegmentReductionType::MAX) { initial_value = -std::numeric_limits<scalar_t>::infinity(); } else if ( reduction == SegmentReductionType::MEAN || reduction == SegmentReductionType::SUM) { initial_value = 0; } else if (reduction == SegmentReductionType::MIN) { initial_value = std::numeric_limits<scalar_t>::infinity(); } if (output_shape.size() > 1) { hipLaunchKernelGGL(( segment_reduce_forward_kernel<scalar_t>) , dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), reduction, output_data_ptr, data_data_ptr, lengths_data_ptr, offsets_data_ptr, segment_count, stride_count, initial.has_value(), initial_value); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { if (reduction == SegmentReductionType::MAX) { CustomMax max_op{}; CUB_WRAPPER( hipcub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, max_op, initial_value, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); } else if (reduction == SegmentReductionType::MEAN) { CustomSum sum_op{}; CUB_WRAPPER( hipcub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, sum_op, initial_value, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); hipLaunchKernelGGL(( post_sum_div_kernel<scalar_t>) , dim3(num_blocks), dim3(threads_per_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output_data_ptr, lengths_data_ptr, segment_count, initial.has_value(), initial_value); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (reduction == SegmentReductionType::MIN) { CustomMin min_op{}; CUB_WRAPPER( hipcub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, min_op, initial_value, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); } else if (reduction == SegmentReductionType::SUM) { CustomSum sum_op{}; CUB_WRAPPER( hipcub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, sum_op, initial_value, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); } } }); })); return output; } REGISTER_DISPATCH(_segment_reduce_stub, &_segment_reduce_cuda_kernel); REGISTER_DISPATCH( _segment_reduce_backward_stub, &_segment_reduce_cuda_backward_kernel); } // namespace native } // namespace at
bd5c6509b3796660cce318a5e5d37f13519e81cd.cu
#include <ATen/native/SegmentReduce.h> #include <ATen/ATen.h> #include <ATen/NumericUtils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/cuda/cub.cuh> namespace at { namespace native { namespace { struct CustomMax { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { if (at::_isnan(a)) { return a; } else if (at::_isnan(b)) { return b; } return std::max<OutputT>(a, b); } }; struct CustomSum { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { return a + b; } }; struct CustomMin { template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT& a, const OutputT& b) const { if (at::_isnan(a)) { return a; } else if (at::_isnan(b)) { return b; } return std::min<OutputT>(a, b); } }; Tensor _get_complete_sum(const Tensor& lengths) { int64_t segment_count = lengths.numel(); TORCH_CHECK(segment_count < INT_MAX); auto offsets = at::empty({segment_count + 1}, lengths.options()); offsets[0].zero_(); AT_DISPATCH_INDEX_TYPES( lengths.type(), "_segment_reduce_cuda_backward_kernel1", ([&] { auto* lengths_data_ptr = lengths.data_ptr<index_t>(); auto* offsets_data_ptr = offsets.data_ptr<index_t>(); CUB_WRAPPER( cub::DeviceScan::InclusiveSum, lengths_data_ptr, offsets_data_ptr + 1, segment_count, at::cuda::getCurrentCUDAStream()); })); return offsets; } template <typename scalar_t, typename index_t> __global__ static void post_sum_div_kernel( scalar_t* output_data, const index_t* lengths_data, const int64_t segment_count, bool is_initial_set, scalar_t initial) { CUDA_KERNEL_LOOP(index, segment_count) { CUDA_KERNEL_ASSERT(lengths_data[index] >= 0); if (lengths_data[index] == 0) { if (is_initial_set) { output_data[index] = initial; } else { output_data[index] = NAN; } } else if (!at::_isnan(output_data[index])) { output_data[index] = output_data[index] / lengths_data[index]; } } } template <typename scalar_t, typename index_t> __global__ void segment_reduce_forward_kernel( SegmentReductionType reduction, scalar_t* output_data, scalar_t* values_data, const index_t* lengths_data, const index_t* lengths_cumsum_data, const int64_t segment_count, const int64_t stride_count, bool is_initial_set, scalar_t initial_value) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; int64_t row_id = idx / stride_count; int64_t lane_id = idx % stride_count; if (idx >= (segment_count * stride_count)) { return; } int64_t offset_start = lengths_cumsum_data[row_id]; int64_t offset_end = lengths_cumsum_data[row_id + 1]; // ===== step2: apply reduction for (int64_t j = offset_start; j < offset_end; ++j) { int64_t starting_index = (j * stride_count) + lane_id; const auto data = values_data[starting_index]; // TODO: There is no need to branch with every element if (reduction == SegmentReductionType::MAX) { initial_value = at::_isnan(data) ? data : std::max<scalar_t>(initial_value, data); } else if ( reduction == SegmentReductionType::MEAN || reduction == SegmentReductionType::SUM) { initial_value = initial_value + data; } else if (reduction == SegmentReductionType::MIN) { initial_value = at::_isnan(data) ? data : std::min<scalar_t>(initial_value, data); } } // ===== step3: finalize reduction CUDA_KERNEL_ASSERT(lengths_data[row_id] >= 0); if (lengths_data[row_id] == 0 && !is_initial_set && reduction == SegmentReductionType::MEAN) { initial_value = static_cast<scalar_t>(NAN); } else if ( reduction == SegmentReductionType::MEAN && lengths_data[row_id] > 0 && !at::_isnan(initial_value)) { initial_value = initial_value / lengths_data[row_id]; } int64_t output_index = (row_id * stride_count) + lane_id; output_data[output_index] = initial_value; } template <typename scalar_t, typename index_t> __global__ void segment_reduce_backward_kernel( SegmentReductionType reduction, scalar_t* grad_input_data, scalar_t* grad_data, scalar_t* output_data, const scalar_t* values_data, const index_t* lengths_data, const index_t* lengths_cumsum_data, const int64_t segment_count, const int64_t stride_count) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; int64_t row_id = idx / stride_count; int64_t lane_id = idx % stride_count; if (idx >= (segment_count * stride_count)) { return; } if (lengths_data[row_id] == 0) { return; } int64_t offset_start = lengths_cumsum_data[row_id]; int64_t offset_end = lengths_cumsum_data[row_id + 1]; int64_t output_index = (row_id * stride_count) + lane_id; if (reduction == SegmentReductionType::MAX || reduction == SegmentReductionType::MIN) { int64_t counter = 0; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t starting_index = (j * stride_count) + lane_id; if (at::_isnan(values_data[starting_index]) || values_data[starting_index] == output_data[output_index]) { grad_input_data[starting_index] = grad_data[output_index]; counter++; } } // Average gradient based on number of maximum elements in the // segment if (counter < 2) { return; } for (int64_t j = offset_start; j < offset_end; ++j) { int64_t starting_index = (j * stride_count) + lane_id; if (grad_input_data[starting_index] > 0) { grad_input_data[starting_index] = grad_input_data[starting_index] / counter; } } } else if (reduction == SegmentReductionType::MEAN) { auto grad_val = grad_data[output_index] / lengths_data[row_id]; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t starting_index = (j * stride_count) + lane_id; grad_input_data[starting_index] = grad_val; } } else if (reduction == SegmentReductionType::SUM) { const auto& grad_val = grad_data[output_index]; for (int64_t j = offset_start; j < offset_end; ++j) { int64_t starting_index = (j * stride_count) + lane_id; grad_input_data[starting_index] = grad_val; } } } } // namespace Tensor _segment_reduce_cuda_backward_kernel( const Tensor& grad_contig, const Tensor& output_contig, const Tensor& data_contig, SegmentReductionType reduction, const Tensor& lengths_contig, int64_t axis) { int64_t segment_count = lengths_contig.numel(); auto output_shape = data_contig.sizes().vec(); output_shape[axis] = segment_count; auto grad_input = at::zeros({data_contig.sizes()}, grad_contig.options()); int64_t stride_count = data_contig.numel() / data_contig.size(axis); auto offsets = _get_complete_sum(lengths_contig); constexpr int threads_per_block = 256; int64_t num_blocks = ((segment_count * stride_count) + threads_per_block - 1) / threads_per_block; num_blocks = std::max(num_blocks, (int64_t)1); AT_DISPATCH_INDEX_TYPES( lengths_contig.type(), "_segment_reduce_cuda_backward_kernel1", ([&] { const auto* lengths_data = lengths_contig.data_ptr<index_t>(); auto* offsets_data = offsets.data_ptr<index_t>(); // TODO: Swtich to TensorIterator for better maintainablility and // readability AT_DISPATCH_FLOATING_TYPES_AND2( kBFloat16, kHalf, data_contig.scalar_type(), "_segment_reduce_cpu", ([&]() { auto* output_data = output_contig.data_ptr<scalar_t>(); auto* grad_data = grad_contig.data_ptr<scalar_t>(); auto* grad_input_data = grad_input.data_ptr<scalar_t>(); const auto* values_data = data_contig.data_ptr<scalar_t>(); segment_reduce_backward_kernel<scalar_t> <<<num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( reduction, grad_input_data, grad_data, output_data, values_data, lengths_data, offsets_data, segment_count, stride_count); C10_CUDA_KERNEL_LAUNCH_CHECK(); })); })); return grad_input; } Tensor _segment_reduce_cuda_kernel( SegmentReductionType reduction, const Tensor& data, const Tensor& lengths, int64_t axis, const c10::optional<Scalar>& initial) { int64_t segment_count = lengths.numel(); auto output_shape = data.sizes().vec(); output_shape[axis] = segment_count; auto output = at::empty(output_shape, data.options()); int64_t stride_count = data.numel() / data.size(axis); auto offsets = _get_complete_sum(lengths); constexpr int threads_per_block = 256; int64_t num_blocks = ((segment_count * stride_count) + threads_per_block - 1) / threads_per_block; num_blocks = std::max(num_blocks, (int64_t)1); AT_DISPATCH_INDEX_TYPES( lengths.type(), "_segment_reduce_cuda_kernel1", ([&] { auto* offsets_data_ptr = offsets.data_ptr<index_t>(); auto* lengths_data_ptr = lengths.data_ptr<index_t>(); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, data.scalar_type(), "segment_reduce_cuda", [&]() { auto* data_data_ptr = data.data_ptr<scalar_t>(); auto* output_data_ptr = output.data_ptr<scalar_t>(); // initialize starting value scalar_t initial_value; if (initial.has_value()) { initial_value = initial.value().to<scalar_t>(); } else if (reduction == SegmentReductionType::MAX) { initial_value = -std::numeric_limits<scalar_t>::infinity(); } else if ( reduction == SegmentReductionType::MEAN || reduction == SegmentReductionType::SUM) { initial_value = 0; } else if (reduction == SegmentReductionType::MIN) { initial_value = std::numeric_limits<scalar_t>::infinity(); } if (output_shape.size() > 1) { segment_reduce_forward_kernel<scalar_t> <<<num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( reduction, output_data_ptr, data_data_ptr, lengths_data_ptr, offsets_data_ptr, segment_count, stride_count, initial.has_value(), initial_value); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { if (reduction == SegmentReductionType::MAX) { CustomMax max_op{}; CUB_WRAPPER( cub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, max_op, initial_value, at::cuda::getCurrentCUDAStream()); } else if (reduction == SegmentReductionType::MEAN) { CustomSum sum_op{}; CUB_WRAPPER( cub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, sum_op, initial_value, at::cuda::getCurrentCUDAStream()); post_sum_div_kernel<scalar_t> <<<num_blocks, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>( output_data_ptr, lengths_data_ptr, segment_count, initial.has_value(), initial_value); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (reduction == SegmentReductionType::MIN) { CustomMin min_op{}; CUB_WRAPPER( cub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, min_op, initial_value, at::cuda::getCurrentCUDAStream()); } else if (reduction == SegmentReductionType::SUM) { CustomSum sum_op{}; CUB_WRAPPER( cub::DeviceSegmentedReduce::Reduce, data_data_ptr, output_data_ptr, segment_count, offsets_data_ptr, offsets_data_ptr + 1, sum_op, initial_value, at::cuda::getCurrentCUDAStream()); } } }); })); return output; } REGISTER_DISPATCH(_segment_reduce_stub, &_segment_reduce_cuda_kernel); REGISTER_DISPATCH( _segment_reduce_backward_stub, &_segment_reduce_cuda_backward_kernel); } // namespace native } // namespace at
05f84094253f39f751b356a9ffd8abf3eb601d0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void mykernel(){ printf("Hello world from device!\n"); } /* end kernel */ int main(void) { /* launch this kernel 10 times*/ hipLaunchKernelGGL(( mykernel), dim3(1) , dim3(10), 0, 0, ); hipDeviceSynchronize(); printf("Hello World from Host\n"); return 0; } /* end main */
05f84094253f39f751b356a9ffd8abf3eb601d0e.cu
#include "cuda_runtime.h" #include <stdio.h> __global__ void mykernel(){ printf("Hello world from device!\n"); } /* end kernel */ int main(void) { /* launch this kernel 10 times*/ mykernel<<< 1 , 10>>>(); cudaDeviceSynchronize(); printf("Hello World from Host\n"); return 0; } /* end main */
5f134d2c88ba92f5a0989a5edf4c31ad42b62591.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ####################################################### # # # Final Porject, Programacin Multincleo # # Daniel Monzalvo, Miguel del Moral # # # # Rainbow table construction in parallel, # # Hash algorythm sequecial # # # ####################################################### */ #include <iostream> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <fstream> //#include "md5.h" //#include "sha.h" // Limits of char values #define maxChar '~' #define minChar ' ' #define SIZE_MD5 33 #define SIZE_SHA 33 // number of bytes to be processed by GPU #define NUM_B 10000 #define NUM_B_B 1000 #define NUM_T_B 1024 // *************************** MD5 ************************// __device__ unsigned func0( unsigned abcd[] ){ return ( abcd[1] & abcd[2]) | (~abcd[1] & abcd[3]);} __device__ unsigned func1( unsigned abcd[] ){ return ( abcd[3] & abcd[1]) | (~abcd[3] & abcd[2]);} __device__ unsigned func2( unsigned abcd[] ){ return abcd[1] ^ abcd[2] ^ abcd[3];} __device__ unsigned func3( unsigned abcd[] ){ return abcd[2] ^ (abcd[1] |~ abcd[3]);} typedef unsigned (*DgstFctn)(unsigned a[]); typedef union uwb { unsigned w; unsigned char b[4]; } MD5union; typedef unsigned DigestArray[4]; __device__ unsigned rol( unsigned r, short N ) { unsigned mask1 = (1<<N) -1; return ((r>>(32-N)) & mask1) | ((r<<N) & ~mask1); } __device__ unsigned *calctable( unsigned *k) { double s, pwr = 2; int i; for (int j = 1; i < 32; ++i){ pwr *= 2; } //pwr = pow( 2, 32); for (i=0; i<64; i++) { s = fabs(sin((double)(1+i))); k[i] = (unsigned)( s * pwr ); } return k; } __device__ unsigned *getMd5( const char *msg, int mlen){ DigestArray h0 = { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476 }; DgstFctn ff[] = { &func0, &func1, &func2, &func3 }; short M[] = { 1, 5, 3, 7 }; short O[] = { 0, 1, 5, 0 }; short rot0[] = { 7,12,17,22}; short rot1[] = { 5, 9,14,20}; short rot2[] = { 4,11,16,23}; short rot3[] = { 6,10,15,21}; short *rots[] = {rot0, rot1, rot2, rot3 }; unsigned kspace[64]; unsigned *k; DigestArray h; DigestArray abcd; DgstFctn fctn; short m, o, g; unsigned f; short *rotn; union { unsigned w[16]; char b[64]; }mm; int os = 0; int grp, grps, q, p; unsigned char *msg2; if (k==NULL) k= calctable(kspace); for (q=0; q<4; q++) h[q] = h0[q]; // initialize { grps = 1 + (mlen+8)/64; msg2 = (unsigned char*)malloc( 64*grps); memcpy( msg2, msg, mlen); msg2[mlen] = (unsigned char)0x80; q = mlen + 1; while (q < 64*grps){ msg2[q] = 0; q++ ; } { MD5union u; u.w = 8*mlen; q -= 8; memcpy(msg2+q, &u.w, 4 ); } } for (grp=0; grp<grps; grp++) { memcpy( mm.b, msg2+os, 64); for(q=0;q<4;q++) abcd[q] = h[q]; for (p = 0; p<4; p++) { fctn = ff[p]; rotn = rots[p]; m = M[p]; o= O[p]; for (q=0; q<16; q++) { g = (m*q + o) % 16; f = abcd[1] + rol( abcd[0]+ fctn(abcd) + k[q+16*p] + mm.w[g], rotn[q%4]); abcd[0] = abcd[3]; abcd[3] = abcd[2]; abcd[2] = abcd[1]; abcd[1] = f; } } for (p=0; p<4; p++) h[p] += abcd[p]; os += 64; } return h; } __device__ char * md5(char* msg, int H){ int j,k; char * res = ""; int sizef = 0; for(int i = 0; i < H; ++i){ if(msg[i] == '\0') break; sizef++; } unsigned *d = getMd5(msg, sizef); MD5union u; char temp[33]; char mask = 240; char mask2 = 15; temp[32] = '\0'; char* temp2; int cont = 0; for (j=0;j<4; j++){ u.w = d[j]; for (k=0;k<4;k++){ int sum = 0; int mult = 8; for (int i = 7 ; i >=4 ; i--) { if((u.b[k] & (1 << i)) != 0 ) sum+=mult; // printf("%d",(u.b[k] & (1 << i)) != 0 ); mult/=2; } // printf("\n%d\n",sum ); if(sum<=9) temp[cont++] = (char)sum+48; else temp[cont++] = (char)sum+87; sum=0; mult = 8; for (int i = 3 ; i >=0 ; i--) { if((u.b[k] & (1 << i)) != 0 ) sum+=mult; // printf("%d",(u.b[k] & (1 << i)) != 0 ); mult/=2; } if(sum<=9) temp[cont++] = (char)sum+48; else temp[cont++] = (char)sum+87; // printf("%02x", u.b[k] ); } } // printf("\n"); // printf("%s\n",temp ); return temp; } //*************************************** MD5 - FIN *************************************// //*************************************** crear nueva cadena ******************************// void nextChar(char* &str, int h){ int len = strlen(str); for (int i = len-1; i >= 0; --i){ if(str[i] < maxChar-1){ str[i] += 1; return; } else{ str[i] = minChar; } } if(len == h){ str = (char*) realloc (str, len+2); //std::cout << "Big\n"; len++; } char temp = str[0]; char temp2; for (int i = 1; i < len+2; ++i){ temp2 = str[i]; str[i] = temp; temp = temp2; } str[0] = minChar; //std::cout << "changed to: -" << str[1] << "-\n"; } /******************************** KERNEL ***********************************************/ __global__ void hashBrick(char* a, char* r, int p1, int p2, int H, int algoritmo){ int id = threadIdx.x + (blockIdx.x * blockDim.x); char* word = (char*)((char*)a + (id*p1)); char* hash = (char*)((char*)r + (id*p2)); if(word[0] != '\0'){ /******* AQUI VA LA LLAMADA A FUNCION DE HASHEO *******/ if(algoritmo == 1){ /**** MD5 *****/ //hash[0] = 48 + algoritmo; //hash[1] = '\0'; char* res = md5(word, H); for (int i = 0; i < SIZE_MD5-1; ++i){ hash[i] = res[i]; } hash[SIZE_MD5-1] = '\0'; } else{ /***** SHA *****/ //hash[0] = 48 + algoritmo; //hash[1] = '\0'; hash[SIZE_SHA-1] = '\0'; } } else{ hash[0] = '0'; hash[1] = '\0'; } } /******************* Main ***********************************/ int main(int argc, const char* argv[]){ //md5("hola"); //sha("hola"); int ll, al, blocks, threads, algo; if(argc < 3){ std::cout << "please choose algorythm: (1)MD5 (2)SHA, and length." << std::endl; return 0; } else if(argc > 4){ std::cout << "too many arguments.\n"; return 0; } else if(argc == 4){ if (argv[2] > argv[3]){ std::cout << "Lower limit higher than hig limit.\n"; return 0; } if (argv[2] <= 0){ std::cout << "Lower limit too low.\n"; return 0; } ll = atoi(argv[2]); algo = atoi(argv[1]); } else{ ll = atoi(argv[2]); algo = atoi(argv[1]); } const int lh = (argc == 4)? atoi(argv[3]) : atoi(argv[2]); float tiempo1; hipEvent_t inicio, fin; clock_t t; t = clock(); hipEventCreate(&inicio); hipEventCreate(&fin); hipEventRecord(inicio, 0); al = 0; int it = ll; while(it <= lh){ al += pow(94,it); it++; } // Calculo de bloques y threads blocks = NUM_B/NUM_B_B; if(blocks < NUM_B*NUM_B_B) blocks++; threads = (NUM_B/lh)/blocks; std::cout << "Words = " << (NUM_B/lh) << "\n"; std::cout << "Total = " << al << "\n"; // Calculo de bricks a procesar int loops = al / (NUM_B/lh); if(loops * (NUM_B/lh) < al) loops++; std::cout << "Loops = " << loops << "\n"; char* first = (char*) malloc (lh+1); for (int i = 0; i < ll; ++i){ first[i] = minChar; } first[ll] = '\0'; //std::cout << first << " first\n"; // Declare arrays const int width = NUM_B/lh; const int height = lh+1; const int height2 = (algo==1)? SIZE_MD5 : SIZE_SHA; size_t host_pitch1 = height*sizeof(char); size_t host_pitch2 = height2*sizeof(char); // CPU word aray char arr[width][height]; //CPU hash array char hash[width][height2]; //GPU word array char* arr_dev; size_t pitch1; hipMallocPitch((void**)&arr_dev, &pitch1, height, width); // GPU hash array char* hash_dev; size_t pitch2; hipMallocPitch((void**)&hash_dev, &pitch2, height2, width); // Archivo std::ofstream f; f.open("Table.txt"); // Recorrer todos los bricks for (int i = 0; i < loops; ++i){ // Crear Bricks for(int j = 0; j < width; ++j){ if(strlen(first) <= height-1){ for(int k = 0; k < height; ++k){ arr[j][k] = first[k]; } nextChar(first, lh); } else{ //std::cout << "nop: " << strlen(first) << " > " << height-1 << std::endl; arr[j][0] = '\0'; } } // Copiar a Tarjeta hipMemcpy2D(arr_dev, pitch1, arr, host_pitch1, height*sizeof(char), width, hipMemcpyHostToDevice); // Procesar el Kernel hipLaunchKernelGGL(( hashBrick), dim3(blocks),dim3(threads), 0, 0, arr_dev, hash_dev, pitch1, pitch2, height, algo); hipDeviceSynchronize(); // Copiar a RAM hipMemcpy2D(hash, host_pitch2, hash_dev, pitch2, height2*sizeof(char), width, hipMemcpyDeviceToHost); // Copiar a DISCO for(int j = 0; j < width; ++j){ if(strlen(arr[j])>0) f << arr[j] << '\t' << hash[j] << '\n'; } } f.close(); hipEventRecord(fin, 0); hipEventSynchronize(fin); hipEventElapsedTime(&tiempo1, inicio, fin); t = clock()-t; std::cout << "Time: " << (((float)t) / CLOCKS_PER_SEC) << std::endl; //free(arr); return 0; }
5f134d2c88ba92f5a0989a5edf4c31ad42b62591.cu
/* ####################################################### # # # Final Porject, Programación Multinúcleo # # Daniel Monzalvo, Miguel del Moral # # # # Rainbow table construction in parallel, # # Hash algorythm sequecial # # # ####################################################### */ #include <iostream> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <fstream> //#include "md5.h" //#include "sha.h" // Limits of char values #define maxChar '~' #define minChar ' ' #define SIZE_MD5 33 #define SIZE_SHA 33 // number of bytes to be processed by GPU #define NUM_B 10000 #define NUM_B_B 1000 #define NUM_T_B 1024 // *************************** MD5 ************************// __device__ unsigned func0( unsigned abcd[] ){ return ( abcd[1] & abcd[2]) | (~abcd[1] & abcd[3]);} __device__ unsigned func1( unsigned abcd[] ){ return ( abcd[3] & abcd[1]) | (~abcd[3] & abcd[2]);} __device__ unsigned func2( unsigned abcd[] ){ return abcd[1] ^ abcd[2] ^ abcd[3];} __device__ unsigned func3( unsigned abcd[] ){ return abcd[2] ^ (abcd[1] |~ abcd[3]);} typedef unsigned (*DgstFctn)(unsigned a[]); typedef union uwb { unsigned w; unsigned char b[4]; } MD5union; typedef unsigned DigestArray[4]; __device__ unsigned rol( unsigned r, short N ) { unsigned mask1 = (1<<N) -1; return ((r>>(32-N)) & mask1) | ((r<<N) & ~mask1); } __device__ unsigned *calctable( unsigned *k) { double s, pwr = 2; int i; for (int j = 1; i < 32; ++i){ pwr *= 2; } //pwr = pow( 2, 32); for (i=0; i<64; i++) { s = fabs(sin((double)(1+i))); k[i] = (unsigned)( s * pwr ); } return k; } __device__ unsigned *getMd5( const char *msg, int mlen){ DigestArray h0 = { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476 }; DgstFctn ff[] = { &func0, &func1, &func2, &func3 }; short M[] = { 1, 5, 3, 7 }; short O[] = { 0, 1, 5, 0 }; short rot0[] = { 7,12,17,22}; short rot1[] = { 5, 9,14,20}; short rot2[] = { 4,11,16,23}; short rot3[] = { 6,10,15,21}; short *rots[] = {rot0, rot1, rot2, rot3 }; unsigned kspace[64]; unsigned *k; DigestArray h; DigestArray abcd; DgstFctn fctn; short m, o, g; unsigned f; short *rotn; union { unsigned w[16]; char b[64]; }mm; int os = 0; int grp, grps, q, p; unsigned char *msg2; if (k==NULL) k= calctable(kspace); for (q=0; q<4; q++) h[q] = h0[q]; // initialize { grps = 1 + (mlen+8)/64; msg2 = (unsigned char*)malloc( 64*grps); memcpy( msg2, msg, mlen); msg2[mlen] = (unsigned char)0x80; q = mlen + 1; while (q < 64*grps){ msg2[q] = 0; q++ ; } { MD5union u; u.w = 8*mlen; q -= 8; memcpy(msg2+q, &u.w, 4 ); } } for (grp=0; grp<grps; grp++) { memcpy( mm.b, msg2+os, 64); for(q=0;q<4;q++) abcd[q] = h[q]; for (p = 0; p<4; p++) { fctn = ff[p]; rotn = rots[p]; m = M[p]; o= O[p]; for (q=0; q<16; q++) { g = (m*q + o) % 16; f = abcd[1] + rol( abcd[0]+ fctn(abcd) + k[q+16*p] + mm.w[g], rotn[q%4]); abcd[0] = abcd[3]; abcd[3] = abcd[2]; abcd[2] = abcd[1]; abcd[1] = f; } } for (p=0; p<4; p++) h[p] += abcd[p]; os += 64; } return h; } __device__ char * md5(char* msg, int H){ int j,k; char * res = ""; int sizef = 0; for(int i = 0; i < H; ++i){ if(msg[i] == '\0') break; sizef++; } unsigned *d = getMd5(msg, sizef); MD5union u; char temp[33]; char mask = 240; char mask2 = 15; temp[32] = '\0'; char* temp2; int cont = 0; for (j=0;j<4; j++){ u.w = d[j]; for (k=0;k<4;k++){ int sum = 0; int mult = 8; for (int i = 7 ; i >=4 ; i--) { if((u.b[k] & (1 << i)) != 0 ) sum+=mult; // printf("%d",(u.b[k] & (1 << i)) != 0 ); mult/=2; } // printf("\n%d\n",sum ); if(sum<=9) temp[cont++] = (char)sum+48; else temp[cont++] = (char)sum+87; sum=0; mult = 8; for (int i = 3 ; i >=0 ; i--) { if((u.b[k] & (1 << i)) != 0 ) sum+=mult; // printf("%d",(u.b[k] & (1 << i)) != 0 ); mult/=2; } if(sum<=9) temp[cont++] = (char)sum+48; else temp[cont++] = (char)sum+87; // printf("%02x", u.b[k] ); } } // printf("\n"); // printf("%s\n",temp ); return temp; } //*************************************** MD5 - FIN *************************************// //*************************************** crear nueva cadena ******************************// void nextChar(char* &str, int h){ int len = strlen(str); for (int i = len-1; i >= 0; --i){ if(str[i] < maxChar-1){ str[i] += 1; return; } else{ str[i] = minChar; } } if(len == h){ str = (char*) realloc (str, len+2); //std::cout << "Big\n"; len++; } char temp = str[0]; char temp2; for (int i = 1; i < len+2; ++i){ temp2 = str[i]; str[i] = temp; temp = temp2; } str[0] = minChar; //std::cout << "changed to: -" << str[1] << "-\n"; } /******************************** KERNEL ***********************************************/ __global__ void hashBrick(char* a, char* r, int p1, int p2, int H, int algoritmo){ int id = threadIdx.x + (blockIdx.x * blockDim.x); char* word = (char*)((char*)a + (id*p1)); char* hash = (char*)((char*)r + (id*p2)); if(word[0] != '\0'){ /******* AQUI VA LA LLAMADA A FUNCION DE HASHEO *******/ if(algoritmo == 1){ /**** MD5 *****/ //hash[0] = 48 + algoritmo; //hash[1] = '\0'; char* res = md5(word, H); for (int i = 0; i < SIZE_MD5-1; ++i){ hash[i] = res[i]; } hash[SIZE_MD5-1] = '\0'; } else{ /***** SHA *****/ //hash[0] = 48 + algoritmo; //hash[1] = '\0'; hash[SIZE_SHA-1] = '\0'; } } else{ hash[0] = '0'; hash[1] = '\0'; } } /******************* Main ***********************************/ int main(int argc, const char* argv[]){ //md5("hola"); //sha("hola"); int ll, al, blocks, threads, algo; if(argc < 3){ std::cout << "please choose algorythm: (1)MD5 (2)SHA, and length." << std::endl; return 0; } else if(argc > 4){ std::cout << "too many arguments.\n"; return 0; } else if(argc == 4){ if (argv[2] > argv[3]){ std::cout << "Lower limit higher than hig limit.\n"; return 0; } if (argv[2] <= 0){ std::cout << "Lower limit too low.\n"; return 0; } ll = atoi(argv[2]); algo = atoi(argv[1]); } else{ ll = atoi(argv[2]); algo = atoi(argv[1]); } const int lh = (argc == 4)? atoi(argv[3]) : atoi(argv[2]); float tiempo1; cudaEvent_t inicio, fin; clock_t t; t = clock(); cudaEventCreate(&inicio); cudaEventCreate(&fin); cudaEventRecord(inicio, 0); al = 0; int it = ll; while(it <= lh){ al += pow(94,it); it++; } // Calculo de bloques y threads blocks = NUM_B/NUM_B_B; if(blocks < NUM_B*NUM_B_B) blocks++; threads = (NUM_B/lh)/blocks; std::cout << "Words = " << (NUM_B/lh) << "\n"; std::cout << "Total = " << al << "\n"; // Calculo de bricks a procesar int loops = al / (NUM_B/lh); if(loops * (NUM_B/lh) < al) loops++; std::cout << "Loops = " << loops << "\n"; char* first = (char*) malloc (lh+1); for (int i = 0; i < ll; ++i){ first[i] = minChar; } first[ll] = '\0'; //std::cout << first << " first\n"; // Declare arrays const int width = NUM_B/lh; const int height = lh+1; const int height2 = (algo==1)? SIZE_MD5 : SIZE_SHA; size_t host_pitch1 = height*sizeof(char); size_t host_pitch2 = height2*sizeof(char); // CPU word aray char arr[width][height]; //CPU hash array char hash[width][height2]; //GPU word array char* arr_dev; size_t pitch1; cudaMallocPitch((void**)&arr_dev, &pitch1, height, width); // GPU hash array char* hash_dev; size_t pitch2; cudaMallocPitch((void**)&hash_dev, &pitch2, height2, width); // Archivo std::ofstream f; f.open("Table.txt"); // Recorrer todos los bricks for (int i = 0; i < loops; ++i){ // Crear Bricks for(int j = 0; j < width; ++j){ if(strlen(first) <= height-1){ for(int k = 0; k < height; ++k){ arr[j][k] = first[k]; } nextChar(first, lh); } else{ //std::cout << "nop: " << strlen(first) << " > " << height-1 << std::endl; arr[j][0] = '\0'; } } // Copiar a Tarjeta cudaMemcpy2D(arr_dev, pitch1, arr, host_pitch1, height*sizeof(char), width, cudaMemcpyHostToDevice); // Procesar el Kernel hashBrick<<<blocks,threads>>>(arr_dev, hash_dev, pitch1, pitch2, height, algo); cudaThreadSynchronize(); // Copiar a RAM cudaMemcpy2D(hash, host_pitch2, hash_dev, pitch2, height2*sizeof(char), width, cudaMemcpyDeviceToHost); // Copiar a DISCO for(int j = 0; j < width; ++j){ if(strlen(arr[j])>0) f << arr[j] << '\t' << hash[j] << '\n'; } } f.close(); cudaEventRecord(fin, 0); cudaEventSynchronize(fin); cudaEventElapsedTime(&tiempo1, inicio, fin); t = clock()-t; std::cout << "Time: " << (((float)t) / CLOCKS_PER_SEC) << std::endl; //free(arr); return 0; }
55ac2deb8aa75deec2b914a85314649159610b56.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define tileSize 32 //function for data initialization void initialization( double *M, double *N, int arow, int acol, int brow, int bcol); //(for Debugging) prints out the input data void printInput( double *M, double *N, int arow, int acol, int brow, int bcol); //(for Debugging) prints out the output data void printOutput( double *P_C, double *P_G, int arow, int bcol); //GPU kernels __global__ __global__ void vectorAddition(const double *A, const double *B, double *C, int numElements) { int gridIndex = blockDim.x * blockIdx.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = gridIndex; i<numElements; i+=stride) { C[i] = A[i] + B[i]; } }
55ac2deb8aa75deec2b914a85314649159610b56.cu
#include "includes.h" #define tileSize 32 //function for data initialization void initialization( double *M, double *N, int arow, int acol, int brow, int bcol); //(for Debugging) prints out the input data void printInput( double *M, double *N, int arow, int acol, int brow, int bcol); //(for Debugging) prints out the output data void printOutput( double *P_C, double *P_G, int arow, int bcol); //GPU kernels __global__ __global__ void vectorAddition(const double *A, const double *B, double *C, int numElements) { int gridIndex = blockDim.x * blockIdx.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = gridIndex; i<numElements; i+=stride) { C[i] = A[i] + B[i]; } }
9a74d6c466f9c4b30241b0c33391310d5bcdafe1.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; int* dev_extend; int* dev_map; int* dev_scan; int* dev_scatter; int* dev_data; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernUpSweep(int n, int d, int* data) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) { return; } int power = 1 << (d + 1); int step = 1 << d; if (index != 0 && (index + 1) % power == 0) { data[index] += data[index - step]; } } __global__ void kernUpSweepOptimized(int n, int stride, int* data, int start) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k >= n) { return; } int index = k * stride + start; data[index] += data[index - stride / 2]; } __global__ void kernDownSweep(int n, int d, int* data) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) { return; } int power = 1 << (d + 1); int step = power >> 1; if (index != 0 && (index + 1) % power == 0) { int t = data[index - step]; data[index - step] = data[index]; data[index] += t; } } __global__ void kernDownSweepOptimized(int n, int stride, int* data, int start) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k >= n) { return; } int index = start + k * stride; int power = stride; if (index != 0 && (index + 1) % power == 0) { int t = data[index - stride / 2]; data[index - stride / 2] = data[index]; data[index] += t; } } __global__ void kernExtendArr(int extendNum, int n, int* idata, int* odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= extendNum) { return; } if (index >= n) { odata[index] = 0; } else { odata[index] = idata[index]; } } __global__ void kernMap(int n, int* idata, int* odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) { return; } odata[index] = idata[index] == 0 ? 0 : 1; } __global__ void kernSetValue(int n, int value, int* data) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index == n) { data[index] = value; } else { return; } } __global__ void kernSetValueOptimized(int n, int value, int* data, int stride) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) { return; } data[index + stride - 1] = value; } __global__ void kernScatter(int n, int* idata, int* scan, int* odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) { return; } if (idata[index] != 0) { odata[scan[index]] = idata[index]; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // TODO dim3 threadsPerBlock(blockSize); dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); // Expand non power-2 to power-2 int ceil = ilog2ceil(n); int num = 1 << ceil; int* extendData = new int[num]; int* tmp = new int[num]; hipMalloc((void**)&dev_extend, num * sizeof(int)); checkCUDAError("dev_arrr failed!"); hipMalloc((void**)&dev_data, n * sizeof(int)); checkCUDAError("dev_arrr failed!"); hipMemcpy(dev_data, idata, n * sizeof(int), hipMemcpyHostToDevice); timer().startGpuTimer(); kernExtendArr << <fullBlocksPerGrid, threadsPerBlock >> > (num, n, dev_data, dev_extend); for (int d = 0; d <= ceil; d++) { kernUpSweep << <fullBlocksPerGrid, threadsPerBlock >> > (num, d, dev_extend); } kernSetValue << <fullBlocksPerGrid, threadsPerBlock >> > (num - 1, 0, dev_extend); for (int d = ceil - 1; d >= 0; d--) { kernDownSweep << <fullBlocksPerGrid, threadsPerBlock >> > (num, d, dev_extend); } timer().endGpuTimer(); hipMemcpy(odata, dev_extend, n * sizeof(int), hipMemcpyDeviceToHost); /* printf("_________________test____________________\n"); for (int i = 0; i < n; i++) { printf("%3d ", odata[i]); } */ hipFree(dev_extend); hipFree(dev_data); delete[] tmp; delete[] extendData; } void scanOptimized(int n, int* odata, const int* idata) { dim3 threadsPerBlock(blockSize); // Expand non power-2 to power-2 int ceil = ilog2ceil(n); int num = 1 << ceil; int* extendData = new int[num]; int* tmp = new int[num]; hipMalloc((void**)&dev_extend, num * sizeof(int)); checkCUDAError("dev_arrr failed!"); hipMalloc((void**)&dev_data, n * sizeof(int)); checkCUDAError("dev_arrr failed!"); hipMemcpy(dev_data, idata, n * sizeof(int), hipMemcpyHostToDevice); dim3 fullBlocksPerGrid((num + blockSize - 1) / blockSize); timer().startGpuTimer(); kernExtendArr << <fullBlocksPerGrid, threadsPerBlock >> > (num, n, dev_data, dev_extend); for (int d = 1; d <= ceil; d++) { int threadNum = 1 << (ceil - d); int stride = 1 << d; int start = stride - 1; fullBlocksPerGrid = (threadNum + blockSize - 1) / blockSize; kernUpSweepOptimized << <fullBlocksPerGrid, threadsPerBlock >> > (threadNum, stride, dev_extend, start); } kernSetValueOptimized << <1, threadsPerBlock >> > (1, 0, dev_extend, num); for (int d = ceil - 1; d >= 0; d--) { int threadNum = 1 << (ceil - d - 1); int stride = 1 << (d + 1); int start = stride - 1; fullBlocksPerGrid = (threadNum + blockSize - 1) / blockSize; kernDownSweepOptimized << <fullBlocksPerGrid, threadsPerBlock >> > (threadNum, stride, dev_extend, start); } timer().endGpuTimer(); hipMemcpy(odata, dev_extend, n * sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_extend); hipFree(dev_data); delete[] tmp; delete[] extendData; } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { int ceil = ilog2ceil(n); int num = 1 << ceil; int* host_scan = new int[num]; int* tmp = new int[num]; dim3 threadsPerBlock(blockSize); dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); hipMalloc((void**)&dev_map, num * sizeof(int)); checkCUDAError("dev_map failed!"); hipMalloc((void**)&dev_scan, num * sizeof(int)); checkCUDAError("dev_scan failed!"); hipMalloc((void**)&dev_scatter, num * sizeof(int)); checkCUDAError("dev_scatter failed!"); hipMalloc((void**)&dev_data, n * sizeof(int)); checkCUDAError("dev_data failed!"); hipMalloc((void**)&dev_extend, num * sizeof(int)); checkCUDAError("dev_extend failed!"); hipMemcpy(dev_data, idata, n * sizeof(int), hipMemcpyHostToDevice); timer().startGpuTimer(); // TODO // Extend non-power of 2 kernExtendArr << <fullBlocksPerGrid, threadsPerBlock >> > (num, n, dev_data, dev_extend); // map kernMap << <fullBlocksPerGrid, threadsPerBlock >> > (num, dev_extend, dev_scan); // scan for (int d = 0; d <= ceil; d++) { kernUpSweep << <fullBlocksPerGrid, threadsPerBlock >> > (num, d, dev_scan); } kernSetValue << <fullBlocksPerGrid, threadsPerBlock >> > (num - 1, 0, dev_scan); for (int d = ceil - 1; d >= 0; d--) { kernDownSweep << <fullBlocksPerGrid, threadsPerBlock >> > (num, d, dev_scan); } // scatter kernScatter << <fullBlocksPerGrid, threadsPerBlock >> > (num, dev_extend, dev_scan, dev_scatter); timer().endGpuTimer(); hipMemcpy(odata, dev_scatter, n * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(host_scan, dev_scan, num * sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_extend); hipFree(dev_data); hipFree(dev_map); hipFree(dev_scan); hipFree(dev_scatter); int count = host_scan[n - 1]; if (1 << ceil != n) { count = host_scan[n]; } delete[] host_scan; delete[] tmp; return count; } } }
9a74d6c466f9c4b30241b0c33391310d5bcdafe1.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; int* dev_extend; int* dev_map; int* dev_scan; int* dev_scatter; int* dev_data; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernUpSweep(int n, int d, int* data) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) { return; } int power = 1 << (d + 1); int step = 1 << d; if (index != 0 && (index + 1) % power == 0) { data[index] += data[index - step]; } } __global__ void kernUpSweepOptimized(int n, int stride, int* data, int start) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k >= n) { return; } int index = k * stride + start; data[index] += data[index - stride / 2]; } __global__ void kernDownSweep(int n, int d, int* data) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) { return; } int power = 1 << (d + 1); int step = power >> 1; if (index != 0 && (index + 1) % power == 0) { int t = data[index - step]; data[index - step] = data[index]; data[index] += t; } } __global__ void kernDownSweepOptimized(int n, int stride, int* data, int start) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k >= n) { return; } int index = start + k * stride; int power = stride; if (index != 0 && (index + 1) % power == 0) { int t = data[index - stride / 2]; data[index - stride / 2] = data[index]; data[index] += t; } } __global__ void kernExtendArr(int extendNum, int n, int* idata, int* odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= extendNum) { return; } if (index >= n) { odata[index] = 0; } else { odata[index] = idata[index]; } } __global__ void kernMap(int n, int* idata, int* odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) { return; } odata[index] = idata[index] == 0 ? 0 : 1; } __global__ void kernSetValue(int n, int value, int* data) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index == n) { data[index] = value; } else { return; } } __global__ void kernSetValueOptimized(int n, int value, int* data, int stride) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) { return; } data[index + stride - 1] = value; } __global__ void kernScatter(int n, int* idata, int* scan, int* odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) { return; } if (idata[index] != 0) { odata[scan[index]] = idata[index]; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // TODO dim3 threadsPerBlock(blockSize); dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); // Expand non power-2 to power-2 int ceil = ilog2ceil(n); int num = 1 << ceil; int* extendData = new int[num]; int* tmp = new int[num]; cudaMalloc((void**)&dev_extend, num * sizeof(int)); checkCUDAError("dev_arrr failed!"); cudaMalloc((void**)&dev_data, n * sizeof(int)); checkCUDAError("dev_arrr failed!"); cudaMemcpy(dev_data, idata, n * sizeof(int), cudaMemcpyHostToDevice); timer().startGpuTimer(); kernExtendArr << <fullBlocksPerGrid, threadsPerBlock >> > (num, n, dev_data, dev_extend); for (int d = 0; d <= ceil; d++) { kernUpSweep << <fullBlocksPerGrid, threadsPerBlock >> > (num, d, dev_extend); } kernSetValue << <fullBlocksPerGrid, threadsPerBlock >> > (num - 1, 0, dev_extend); for (int d = ceil - 1; d >= 0; d--) { kernDownSweep << <fullBlocksPerGrid, threadsPerBlock >> > (num, d, dev_extend); } timer().endGpuTimer(); cudaMemcpy(odata, dev_extend, n * sizeof(int), cudaMemcpyDeviceToHost); /* printf("_________________test____________________\n"); for (int i = 0; i < n; i++) { printf("%3d ", odata[i]); } */ cudaFree(dev_extend); cudaFree(dev_data); delete[] tmp; delete[] extendData; } void scanOptimized(int n, int* odata, const int* idata) { dim3 threadsPerBlock(blockSize); // Expand non power-2 to power-2 int ceil = ilog2ceil(n); int num = 1 << ceil; int* extendData = new int[num]; int* tmp = new int[num]; cudaMalloc((void**)&dev_extend, num * sizeof(int)); checkCUDAError("dev_arrr failed!"); cudaMalloc((void**)&dev_data, n * sizeof(int)); checkCUDAError("dev_arrr failed!"); cudaMemcpy(dev_data, idata, n * sizeof(int), cudaMemcpyHostToDevice); dim3 fullBlocksPerGrid((num + blockSize - 1) / blockSize); timer().startGpuTimer(); kernExtendArr << <fullBlocksPerGrid, threadsPerBlock >> > (num, n, dev_data, dev_extend); for (int d = 1; d <= ceil; d++) { int threadNum = 1 << (ceil - d); int stride = 1 << d; int start = stride - 1; fullBlocksPerGrid = (threadNum + blockSize - 1) / blockSize; kernUpSweepOptimized << <fullBlocksPerGrid, threadsPerBlock >> > (threadNum, stride, dev_extend, start); } kernSetValueOptimized << <1, threadsPerBlock >> > (1, 0, dev_extend, num); for (int d = ceil - 1; d >= 0; d--) { int threadNum = 1 << (ceil - d - 1); int stride = 1 << (d + 1); int start = stride - 1; fullBlocksPerGrid = (threadNum + blockSize - 1) / blockSize; kernDownSweepOptimized << <fullBlocksPerGrid, threadsPerBlock >> > (threadNum, stride, dev_extend, start); } timer().endGpuTimer(); cudaMemcpy(odata, dev_extend, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_extend); cudaFree(dev_data); delete[] tmp; delete[] extendData; } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { int ceil = ilog2ceil(n); int num = 1 << ceil; int* host_scan = new int[num]; int* tmp = new int[num]; dim3 threadsPerBlock(blockSize); dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); cudaMalloc((void**)&dev_map, num * sizeof(int)); checkCUDAError("dev_map failed!"); cudaMalloc((void**)&dev_scan, num * sizeof(int)); checkCUDAError("dev_scan failed!"); cudaMalloc((void**)&dev_scatter, num * sizeof(int)); checkCUDAError("dev_scatter failed!"); cudaMalloc((void**)&dev_data, n * sizeof(int)); checkCUDAError("dev_data failed!"); cudaMalloc((void**)&dev_extend, num * sizeof(int)); checkCUDAError("dev_extend failed!"); cudaMemcpy(dev_data, idata, n * sizeof(int), cudaMemcpyHostToDevice); timer().startGpuTimer(); // TODO // Extend non-power of 2 kernExtendArr << <fullBlocksPerGrid, threadsPerBlock >> > (num, n, dev_data, dev_extend); // map kernMap << <fullBlocksPerGrid, threadsPerBlock >> > (num, dev_extend, dev_scan); // scan for (int d = 0; d <= ceil; d++) { kernUpSweep << <fullBlocksPerGrid, threadsPerBlock >> > (num, d, dev_scan); } kernSetValue << <fullBlocksPerGrid, threadsPerBlock >> > (num - 1, 0, dev_scan); for (int d = ceil - 1; d >= 0; d--) { kernDownSweep << <fullBlocksPerGrid, threadsPerBlock >> > (num, d, dev_scan); } // scatter kernScatter << <fullBlocksPerGrid, threadsPerBlock >> > (num, dev_extend, dev_scan, dev_scatter); timer().endGpuTimer(); cudaMemcpy(odata, dev_scatter, n * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(host_scan, dev_scan, num * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_extend); cudaFree(dev_data); cudaFree(dev_map); cudaFree(dev_scan); cudaFree(dev_scatter); int count = host_scan[n - 1]; if (1 << ceil != n) { count = host_scan[n]; } delete[] host_scan; delete[] tmp; return count; } } }
d748bc0ab520d6473860ed8873d591b005939ea4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/ceil_op.h" #include "caffe2/utils/math.h" namespace caffe2 { template <typename T> __global__ void CeilKernel(const int N, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = ::ceil(X[i]); } } template <> bool CeilOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); CAFFE_ENFORCE_GT(X.size(), 0); Y->ResizeLike(X); hipLaunchKernelGGL(( CeilKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), X.data<float>(), Y->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(Ceil, CeilOp<float, CUDAContext>); } // namespace caffe2
d748bc0ab520d6473860ed8873d591b005939ea4.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/ceil_op.h" #include "caffe2/utils/math.h" namespace caffe2 { template <typename T> __global__ void CeilKernel(const int N, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = std::ceil(X[i]); } } template <> bool CeilOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); CAFFE_ENFORCE_GT(X.size(), 0); Y->ResizeLike(X); CeilKernel<<< CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), X.data<float>(), Y->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(Ceil, CeilOp<float, CUDAContext>); } // namespace caffe2
007d171727b26f5ec9137f1cd00ce8085d7a6770.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2017 Darius Rckert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/core/math/math.h" #include "saiga/core/util/table.h" #include "saiga/cuda/bitonicSort.h" #include "saiga/cuda/cudaHelper.h" #include "saiga/cuda/device_helper.h" #include "saiga/cuda/pinned_vector.h" #include <iostream> #include <vector> using Saiga::ArrayView; using Saiga::CUDA::ThreadInfo; inline HD int bfe(int i, int k) { return (i >> k) & 1; } template <typename T, unsigned int SIZE = 32> inline __device__ T shuffleSwapCompare(T x, int mask, int direction) { auto y = Saiga::CUDA::shfl_xor(x, mask, SIZE); return x < y == direction ? y : x; } template <typename T> inline __device__ T bitonicSortStageSimple(T v, unsigned int stage, unsigned int l) { for (int i = stage; i >= 0; --i) { auto distance = 1 << i; unsigned int direction; direction = bfe(l, i) ^ bfe(l, stage + 1); v = shuffleSwapCompare(v, distance, direction); } return v; } template <typename T> inline __device__ T bitonicWarpSortSimple(T v, unsigned int l) { for (int stage = 0; stage < 5; ++stage) { v = bitonicSortStageSimple(v, stage, l); } return v; } template <typename T> __global__ static void bitonicSortSimple(ArrayView<T> data) { ThreadInfo<> ti; if (ti.thread_id >= data.size()) return; auto l = ti.lane_id; auto v = data[ti.thread_id]; v = bitonicWarpSortSimple<T>(v, l); data[ti.thread_id] = v; } template <typename T, unsigned int SIZE = 32> __global__ static void bitonicSortSaiga(ArrayView<T> data) { ThreadInfo<> ti; if (ti.thread_id >= data.size()) return; auto l = ti.lane_id; auto v = data[ti.thread_id]; v = Saiga::CUDA::bitonicWarpSort<T, SIZE>(v, l); data[ti.thread_id] = v; } static void bitonicSortTest() { int N = 64; using T = float; Saiga::pinned_vector<T> h_data(N), res; thrust::device_vector<T> d_data(N); for (auto& f : h_data) { f = rand() % N; } Saiga::Table table({6, 7, 7}); { std::cout << "Full warp sort (width = 32)" << std::endl; table << "Id" << "Before" << "After"; d_data = h_data; hipLaunchKernelGGL(( bitonicSortSimple<T>), dim3(1), dim3(N), 0, 0, d_data); res = d_data; for (int i = 0; i < N; ++i) { table << i << h_data[i] << res[i]; } } { std::cout << "Partial warp sort (width = 8)" << std::endl; table << "Id" << "Before" << "After"; d_data = h_data; hipLaunchKernelGGL(( bitonicSortSaiga<T, 8>), dim3(1), dim3(N), 0, 0, d_data); res = d_data; for (int i = 0; i < N; ++i) { table << i << h_data[i] << res[i]; } } } int main(int argc, char* argv[]) { bitonicSortTest(); std::cout << "Done." << std::endl; }
007d171727b26f5ec9137f1cd00ce8085d7a6770.cu
/** * Copyright (c) 2017 Darius Rückert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/core/math/math.h" #include "saiga/core/util/table.h" #include "saiga/cuda/bitonicSort.h" #include "saiga/cuda/cudaHelper.h" #include "saiga/cuda/device_helper.h" #include "saiga/cuda/pinned_vector.h" #include <iostream> #include <vector> using Saiga::ArrayView; using Saiga::CUDA::ThreadInfo; inline HD int bfe(int i, int k) { return (i >> k) & 1; } template <typename T, unsigned int SIZE = 32> inline __device__ T shuffleSwapCompare(T x, int mask, int direction) { auto y = Saiga::CUDA::shfl_xor(x, mask, SIZE); return x < y == direction ? y : x; } template <typename T> inline __device__ T bitonicSortStageSimple(T v, unsigned int stage, unsigned int l) { for (int i = stage; i >= 0; --i) { auto distance = 1 << i; unsigned int direction; direction = bfe(l, i) ^ bfe(l, stage + 1); v = shuffleSwapCompare(v, distance, direction); } return v; } template <typename T> inline __device__ T bitonicWarpSortSimple(T v, unsigned int l) { for (int stage = 0; stage < 5; ++stage) { v = bitonicSortStageSimple(v, stage, l); } return v; } template <typename T> __global__ static void bitonicSortSimple(ArrayView<T> data) { ThreadInfo<> ti; if (ti.thread_id >= data.size()) return; auto l = ti.lane_id; auto v = data[ti.thread_id]; v = bitonicWarpSortSimple<T>(v, l); data[ti.thread_id] = v; } template <typename T, unsigned int SIZE = 32> __global__ static void bitonicSortSaiga(ArrayView<T> data) { ThreadInfo<> ti; if (ti.thread_id >= data.size()) return; auto l = ti.lane_id; auto v = data[ti.thread_id]; v = Saiga::CUDA::bitonicWarpSort<T, SIZE>(v, l); data[ti.thread_id] = v; } static void bitonicSortTest() { int N = 64; using T = float; Saiga::pinned_vector<T> h_data(N), res; thrust::device_vector<T> d_data(N); for (auto& f : h_data) { f = rand() % N; } Saiga::Table table({6, 7, 7}); { std::cout << "Full warp sort (width = 32)" << std::endl; table << "Id" << "Before" << "After"; d_data = h_data; bitonicSortSimple<T><<<1, N>>>(d_data); res = d_data; for (int i = 0; i < N; ++i) { table << i << h_data[i] << res[i]; } } { std::cout << "Partial warp sort (width = 8)" << std::endl; table << "Id" << "Before" << "After"; d_data = h_data; bitonicSortSaiga<T, 8><<<1, N>>>(d_data); res = d_data; for (int i = 0; i < N; ++i) { table << i << h_data[i] << res[i]; } } } int main(int argc, char* argv[]) { bitonicSortTest(); std::cout << "Done." << std::endl; }
aadf4784aba23b2c7c96a5f165ff4d1fefa722cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2010-2011, NVIDIA Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA Corporation nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <voxelpipe/compact_ranges.h> #include <voxelpipe/thrust_arch.h> #include <thrust/scan.h> #define CTA_SIZE 512 #define CTA_H (512/32) namespace voxelpipe { namespace compact { __forceinline__ __device__ int scan_popc(bool p, int& popc, const int tidx, volatile int *red) { const uint32 mask = __ballot( p ); popc = __popc( mask ); return __popc( mask << (32 - tidx) ); } // intra-warp inclusive scan __forceinline__ __device__ void scan_warp(int tidx, unsigned int limit, volatile int *red) { const uint32 val = red[tidx]; // pad initial segment with zeros red[tidx] = 0; red += 32; // Hillis-Steele scan red[tidx] = val; red[tidx] += red[tidx-1]; red[tidx] += red[tidx-2]; red[tidx] += red[tidx-4]; red[tidx] += red[tidx-8]; red[tidx] += red[tidx-16]; // propagate resullpv back red[tidx-32] = red[tidx]; } __forceinline__ __device__ int scan_popc(bool valid, volatile int* sm_warp_popc) { int idx = threadIdx.x; int tidx = threadIdx.x & 31; int widx = threadIdx.x / 32; __shared__ volatile int sm_red[CTA_SIZE*2]; volatile int *sm_warp_red = sm_red + widx*64; int popc; int eidx = scan_popc(valid,popc,tidx,sm_warp_red); if (tidx == 0) sm_warp_popc[widx] = popc; // population count of this warp __syncthreads(); // wait until all warps have written wpopc to shared mem const unsigned int warpcount = CTA_H; // - use 1 warp to sum over wpopc if (widx == 0) scan_warp( idx, warpcount, sm_warp_popc ); __syncthreads(); return eidx; } // count the amount of output non-empty ranges in the source list __global__ void compact_ranges_count( uint32* offsets, const int32* src_begin, const int32* src_end, const uint32 n_elements, const uint32 n_blocks, const uint32 n_elements_per_block) { //---------------------------------------------- // Init //---------------------------------------------- // useful variables (assumes 1D indexing) __shared__ volatile int sm_warp_popc[64]; const uint32 block_id = blockIdx.x; const uint32 group_size = CTA_SIZE; const uint32 block_begin = block_id * n_elements_per_block; // constant across CTA const uint32 block_end = block_begin + n_elements_per_block; // constant across CTA uint32 offset = 0; for (uint32 group_begin = block_begin; group_begin < block_end; group_begin += group_size) { const uint32 group_end = min( group_begin + group_size, n_elements ); // constant across CTA if (group_end <= group_begin) break; __syncthreads(); //---------------------------------------------- // Compaction condition //---------------------------------------------- const uint32 local_id = threadIdx.x; const uint32 global_id = group_begin + local_id; // check if input should go to output bool valid = false; if (global_id < n_elements) { if (src_begin[ global_id ] < src_end[ global_id ]) valid = true; } //--------------------------------------------------- // Do an intra-cta reduction on the number of outputs //--------------------------------------------------- scan_popc( valid, sm_warp_popc ); // ---------------------------------------------- // Increment global offset // ---------------------------------------------- const unsigned int warpcount = CTA_H; offset += sm_warp_popc[warpcount-1]; // constant across CTA __syncthreads(); } if (threadIdx.x == 0) offsets[ block_id ] = offset; } // emit the compacted list of non-empty ranges __global__ void compact_ranges_write( int32* dest_begin, int32* dest_end, int32* dest_id, const uint32* offsets, const int32* src_begin, const int32* src_end, const uint32 n_elements, const uint32 n_blocks, const uint32 n_elements_per_block) { //---------------------------------------------- // Init //---------------------------------------------- // useful variables (assumes 1D indexing) const int widx = threadIdx.x / 32; __shared__ volatile int sm_warp_popc[64]; const uint32 block_id = blockIdx.x; const uint32 group_size = CTA_SIZE; const uint32 block_begin = block_id * n_elements_per_block; // constant across CTA const uint32 block_end = block_begin + n_elements_per_block; // constant across CTA uint32 offset = offsets[ block_id ]; // constant across CTA for (uint32 group_begin = block_begin; group_begin < block_end; group_begin += group_size) { const uint32 group_end = min( group_begin + group_size, n_elements ); // constant across CTA if (group_end <= group_begin) break; __syncthreads(); //---------------------------------------------- // Compaction condition //---------------------------------------------- const uint32 local_id = threadIdx.x; const uint32 global_id = group_begin + local_id; // check if input should go to output bool valid = false; int32 in_begin; int32 in_end; if (global_id < n_elements) { in_begin = src_begin[ global_id ]; in_end = src_end[ global_id ]; if (in_begin < in_end) valid = true; } //--------------------------------------------------- // Do an intra-cta reduction on the number of outputs //--------------------------------------------------- const int eidx = scan_popc( valid, sm_warp_popc ); //---------------------------------------------- // Write to compact output buffer //---------------------------------------------- if (valid) { //const uint32 tpopc = (widx ? sm_warp_popc[widx-1] : 0u) + eidx; uint32 tpopc = eidx; if (widx) tpopc += sm_warp_popc[widx-1]; const uint32 destIdx = offset + tpopc; dest_begin[destIdx] = in_begin; dest_end[destIdx] = in_end; dest_id[destIdx] = global_id; } // ---------------------------------------------- // Increment global offset // ---------------------------------------------- __syncthreads(); const unsigned int warpcount = CTA_H; offset += sm_warp_popc[warpcount-1]; // constant across CTA } } } // namespace compact Compact_ranges::Compact_ranges(const uint32 n) { const size_t max_blocks = thrust::detail::device::cuda::arch::max_active_blocks(compact::compact_ranges_count, CTA_SIZE, 0); m_counters.resize( max_blocks ); m_offsets.resize( max_blocks ); } // given two arrays {b[0], b[1], ..., b[n-1]} and // {e[0], e[1], ..., e[n-1]} specifying a set of n // possibly empty ranges { [b(i),e(i)) : i = 0,...,n-1 }, // return a copy of the two arrays with all the empty // ranges removed, and an array specifying their position // in the original list. // // \param dest_begin output range start indices // \param dest_end output range end indices // \param dest_id output range index in the original list // \param src_begin input range start indices // \param src_end input range end indices // \param n number of input elements // \result number of output elements // uint32 Compact_ranges::run(int32* dest_begin, int32* dest_end, int32* dest_id, const int32* src_begin, const int32* src_end, const uint32 n_elements) { const size_t max_blocks = thrust::detail::device::cuda::arch::max_active_blocks(compact::compact_ranges_count, CTA_SIZE, 0); const uint32 group_size = CTA_SIZE; const uint32 n_groups = (n_elements + group_size-1) / group_size; const size_t n_blocks = ::min( (int)max_blocks, (int)n_groups ); const uint32 n_groups_per_block = (n_groups + n_blocks-1) / n_blocks; // constant across CTA const uint32 n_elements_per_block = n_groups_per_block * group_size; // constant across CTA uint32* counters_ptr = thrust::raw_pointer_cast( &*(m_counters.begin()) ); uint32* offsets_ptr = thrust::raw_pointer_cast( &*(m_offsets.begin()) ); // count the number of outputs per block hipLaunchKernelGGL(( compact::compact_ranges_count), dim3(n_blocks),dim3(CTA_SIZE), 0, 0, counters_ptr, src_begin, src_end, n_elements, n_blocks, n_elements_per_block ); hipDeviceSynchronize(); // read the last block counter before it's overwritten const uint32 last_block = m_counters[n_blocks-1]; // do an exclusive scan on the block counters to get proper offsets thrust::exclusive_scan( m_counters.begin(), m_counters.begin() + n_blocks, m_offsets.begin(), uint32(0) ); hipDeviceSynchronize(); // perform the actual writing hipLaunchKernelGGL(( compact::compact_ranges_write), dim3(n_blocks),dim3(CTA_SIZE), 0, 0, dest_begin, dest_end, dest_id, offsets_ptr, src_begin, src_end, n_elements, n_blocks, n_elements_per_block ); hipDeviceSynchronize(); // return number of output elements return m_offsets[n_blocks-1] + last_block; } #undef CTA_SIZE #undef CTA_H } // namespace voxelpipe
aadf4784aba23b2c7c96a5f165ff4d1fefa722cb.cu
/* * Copyright (c) 2010-2011, NVIDIA Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA Corporation nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <voxelpipe/compact_ranges.h> #include <voxelpipe/thrust_arch.h> #include <thrust/scan.h> #define CTA_SIZE 512 #define CTA_H (512/32) namespace voxelpipe { namespace compact { __forceinline__ __device__ int scan_popc(bool p, int& popc, const int tidx, volatile int *red) { const uint32 mask = __ballot( p ); popc = __popc( mask ); return __popc( mask << (32 - tidx) ); } // intra-warp inclusive scan __forceinline__ __device__ void scan_warp(int tidx, unsigned int limit, volatile int *red) { const uint32 val = red[tidx]; // pad initial segment with zeros red[tidx] = 0; red += 32; // Hillis-Steele scan red[tidx] = val; red[tidx] += red[tidx-1]; red[tidx] += red[tidx-2]; red[tidx] += red[tidx-4]; red[tidx] += red[tidx-8]; red[tidx] += red[tidx-16]; // propagate resullpv back red[tidx-32] = red[tidx]; } __forceinline__ __device__ int scan_popc(bool valid, volatile int* sm_warp_popc) { int idx = threadIdx.x; int tidx = threadIdx.x & 31; int widx = threadIdx.x / 32; __shared__ volatile int sm_red[CTA_SIZE*2]; volatile int *sm_warp_red = sm_red + widx*64; int popc; int eidx = scan_popc(valid,popc,tidx,sm_warp_red); if (tidx == 0) sm_warp_popc[widx] = popc; // population count of this warp __syncthreads(); // wait until all warps have written wpopc to shared mem const unsigned int warpcount = CTA_H; // - use 1 warp to sum over wpopc if (widx == 0) scan_warp( idx, warpcount, sm_warp_popc ); __syncthreads(); return eidx; } // count the amount of output non-empty ranges in the source list __global__ void compact_ranges_count( uint32* offsets, const int32* src_begin, const int32* src_end, const uint32 n_elements, const uint32 n_blocks, const uint32 n_elements_per_block) { //---------------------------------------------- // Init //---------------------------------------------- // useful variables (assumes 1D indexing) __shared__ volatile int sm_warp_popc[64]; const uint32 block_id = blockIdx.x; const uint32 group_size = CTA_SIZE; const uint32 block_begin = block_id * n_elements_per_block; // constant across CTA const uint32 block_end = block_begin + n_elements_per_block; // constant across CTA uint32 offset = 0; for (uint32 group_begin = block_begin; group_begin < block_end; group_begin += group_size) { const uint32 group_end = min( group_begin + group_size, n_elements ); // constant across CTA if (group_end <= group_begin) break; __syncthreads(); //---------------------------------------------- // Compaction condition //---------------------------------------------- const uint32 local_id = threadIdx.x; const uint32 global_id = group_begin + local_id; // check if input should go to output bool valid = false; if (global_id < n_elements) { if (src_begin[ global_id ] < src_end[ global_id ]) valid = true; } //--------------------------------------------------- // Do an intra-cta reduction on the number of outputs //--------------------------------------------------- scan_popc( valid, sm_warp_popc ); // ---------------------------------------------- // Increment global offset // ---------------------------------------------- const unsigned int warpcount = CTA_H; offset += sm_warp_popc[warpcount-1]; // constant across CTA __syncthreads(); } if (threadIdx.x == 0) offsets[ block_id ] = offset; } // emit the compacted list of non-empty ranges __global__ void compact_ranges_write( int32* dest_begin, int32* dest_end, int32* dest_id, const uint32* offsets, const int32* src_begin, const int32* src_end, const uint32 n_elements, const uint32 n_blocks, const uint32 n_elements_per_block) { //---------------------------------------------- // Init //---------------------------------------------- // useful variables (assumes 1D indexing) const int widx = threadIdx.x / 32; __shared__ volatile int sm_warp_popc[64]; const uint32 block_id = blockIdx.x; const uint32 group_size = CTA_SIZE; const uint32 block_begin = block_id * n_elements_per_block; // constant across CTA const uint32 block_end = block_begin + n_elements_per_block; // constant across CTA uint32 offset = offsets[ block_id ]; // constant across CTA for (uint32 group_begin = block_begin; group_begin < block_end; group_begin += group_size) { const uint32 group_end = min( group_begin + group_size, n_elements ); // constant across CTA if (group_end <= group_begin) break; __syncthreads(); //---------------------------------------------- // Compaction condition //---------------------------------------------- const uint32 local_id = threadIdx.x; const uint32 global_id = group_begin + local_id; // check if input should go to output bool valid = false; int32 in_begin; int32 in_end; if (global_id < n_elements) { in_begin = src_begin[ global_id ]; in_end = src_end[ global_id ]; if (in_begin < in_end) valid = true; } //--------------------------------------------------- // Do an intra-cta reduction on the number of outputs //--------------------------------------------------- const int eidx = scan_popc( valid, sm_warp_popc ); //---------------------------------------------- // Write to compact output buffer //---------------------------------------------- if (valid) { //const uint32 tpopc = (widx ? sm_warp_popc[widx-1] : 0u) + eidx; uint32 tpopc = eidx; if (widx) tpopc += sm_warp_popc[widx-1]; const uint32 destIdx = offset + tpopc; dest_begin[destIdx] = in_begin; dest_end[destIdx] = in_end; dest_id[destIdx] = global_id; } // ---------------------------------------------- // Increment global offset // ---------------------------------------------- __syncthreads(); const unsigned int warpcount = CTA_H; offset += sm_warp_popc[warpcount-1]; // constant across CTA } } } // namespace compact Compact_ranges::Compact_ranges(const uint32 n) { const size_t max_blocks = thrust::detail::device::cuda::arch::max_active_blocks(compact::compact_ranges_count, CTA_SIZE, 0); m_counters.resize( max_blocks ); m_offsets.resize( max_blocks ); } // given two arrays {b[0], b[1], ..., b[n-1]} and // {e[0], e[1], ..., e[n-1]} specifying a set of n // possibly empty ranges { [b(i),e(i)) : i = 0,...,n-1 }, // return a copy of the two arrays with all the empty // ranges removed, and an array specifying their position // in the original list. // // \param dest_begin output range start indices // \param dest_end output range end indices // \param dest_id output range index in the original list // \param src_begin input range start indices // \param src_end input range end indices // \param n number of input elements // \result number of output elements // uint32 Compact_ranges::run(int32* dest_begin, int32* dest_end, int32* dest_id, const int32* src_begin, const int32* src_end, const uint32 n_elements) { const size_t max_blocks = thrust::detail::device::cuda::arch::max_active_blocks(compact::compact_ranges_count, CTA_SIZE, 0); const uint32 group_size = CTA_SIZE; const uint32 n_groups = (n_elements + group_size-1) / group_size; const size_t n_blocks = std::min( (int)max_blocks, (int)n_groups ); const uint32 n_groups_per_block = (n_groups + n_blocks-1) / n_blocks; // constant across CTA const uint32 n_elements_per_block = n_groups_per_block * group_size; // constant across CTA uint32* counters_ptr = thrust::raw_pointer_cast( &*(m_counters.begin()) ); uint32* offsets_ptr = thrust::raw_pointer_cast( &*(m_offsets.begin()) ); // count the number of outputs per block compact::compact_ranges_count<<<n_blocks,CTA_SIZE>>>( counters_ptr, src_begin, src_end, n_elements, n_blocks, n_elements_per_block ); cudaThreadSynchronize(); // read the last block counter before it's overwritten const uint32 last_block = m_counters[n_blocks-1]; // do an exclusive scan on the block counters to get proper offsets thrust::exclusive_scan( m_counters.begin(), m_counters.begin() + n_blocks, m_offsets.begin(), uint32(0) ); cudaThreadSynchronize(); // perform the actual writing compact::compact_ranges_write<<<n_blocks,CTA_SIZE>>>( dest_begin, dest_end, dest_id, offsets_ptr, src_begin, src_end, n_elements, n_blocks, n_elements_per_block ); cudaThreadSynchronize(); // return number of output elements return m_offsets[n_blocks-1] + last_block; } #undef CTA_SIZE #undef CTA_H } // namespace voxelpipe
246bad8d32fd8de1f65ba3e0effe721b76aa8a5f.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zgesellcmmv.cu normal z -> s, Fri Jul 18 17:34:28 2014 */ #include "hip/hip_runtime.h" #include <stdio.h> #include "common_magma.h" #include "sm_32_intrinsics.h" #define PRECISION_s //#define TEXTURE /* // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4_ldg( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, const float* __restrict__ d_x, float beta, float *d_y) { #if defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = __ldg( d_x+ i1 ); x2 = __ldg( d_x+ i2 ); v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = __ldg( d_x + d_colind[ block*kk] ); v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } #endif } */ // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning one thread to each row - 1D kernel __global__ void zgesellptmv2d_kernel_1( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float* d_x, float beta, float *d_y) { // threads assigned to rows int Idx = blockDim.x * blockIdx.x + threadIdx.x ; int offset = d_rowptr[ blockIdx.x ]; int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize; if(Idx < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++){ int col = d_colind [offset+ blocksize * n + threadIdx.x ]; float val = d_val[offset+ blocksize * n + threadIdx.x]; if( val != 0){ dot=dot+val*d_x[col]; } } d_y[ Idx ] = dot * alpha + beta * d_y [ Idx ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float* d_x, float beta, float *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = d_x[ i1 ]; x2 = d_x[ i2 ]; v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = d_x[ d_colind[ block*kk] ]; v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_8( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float* d_x, float beta, float *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = d_x[ i1 ]; x2 = d_x[ i2 ]; v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = d_x[ d_colind[ block*kk] ]; v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_16( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float* d_x, float beta, float *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ float val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d_x[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_32( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float* d_x, float beta, float *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ float val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d_x[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } /************************* same but using texture mem *************************/ #if defined(PRECISION_d) && defined(TEXTURE) __inline__ __device__ float read_from_tex( hipTextureObject_t texdx, const int& i){ int2 temp = tex1Dfetch<int2>( texdx, i ); return __hiloint2float(temp.y,temp.x); } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4_tex( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, float beta, float *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = read_from_tex( texdx, d_colind[ block*kk] ); v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_8_tex( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, float beta, float *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = read_from_tex( texdx, d_colind[ block*kk] ); v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_16_tex( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, float beta, float *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ float val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_32_tex( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, hipTextureObject_t texdx, float beta, float *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ float val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } #endif /********************* end of texture versions **************************/ /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is SELLP. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param blocksize magma_int_t number of rows in one ELL-slice @param slices magma_int_t number of slices in matrix @param alignment magma_int_t number of threads assigned to one row @param alpha float scalar multiplier @param d_val float* array containing values of A in SELLP @param d_colind magma_int_t* columnindices of A in SELLP @param d_rowptr magma_int_t* rowpointer of SELLP @param d_x float* input vector x @param beta float scalar multiplier @param d_y float* input/output vector y @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_sgesellpmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float *d_x, float beta, float *d_y ){ // using a 2D thread grid int num_threads = blocksize*alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( blocksize, alignment, 1); int dimgrid1 = sqrt(slices); int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1; dim3 grid( dimgrid1, dimgrid2, 1); int Ms = num_threads * sizeof( float ); #if defined(PRECISION_d) && defined(TEXTURE) // Create channel. hipChannelFormatDesc channel_desc; channel_desc = hipCreateChannelDesc(32, 32, 0, 0, hipChannelFormatKindSigned); // Create resource descriptor. struct hipResourceDesc resDescdx; memset(&resDescdx, 0, sizeof(resDescdx)); resDescdx.resType = hipResourceTypeLinear; resDescdx.res.linear.devPtr = (void*)d_x; resDescdx.res.linear.desc = channel_desc; resDescdx.res.linear.sizeInBytes = m*sizeof(float); // Specify texture object parameters. struct hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeClamp; texDesc.filterMode = hipFilterModePoint; texDesc.readMode = hipReadModeElementType; // Create texture object. hipTextureObject_t texdx = 0; hipCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL); hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); if( alignment == 4) hipLaunchKernelGGL(( zgesellptmv2d_kernel_4_tex), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); else if( alignment == 8) hipLaunchKernelGGL(( zgesellptmv2d_kernel_8_tex), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); else if( alignment == 16) hipLaunchKernelGGL(( zgesellptmv2d_kernel_16_tex), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); else if( alignment == 32) hipLaunchKernelGGL(( zgesellptmv2d_kernel_32_tex), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); else{ printf("error: alignment %d not supported.\n", alignment); exit(-1); } hipDestroyTextureObject(texdx); #else if( alignment == 1) hipLaunchKernelGGL(( zgesellptmv2d_kernel_1), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else if( alignment == 4) hipLaunchKernelGGL(( zgesellptmv2d_kernel_4), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else if( alignment == 8) hipLaunchKernelGGL(( zgesellptmv2d_kernel_8), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else if( alignment == 16) hipLaunchKernelGGL(( zgesellptmv2d_kernel_16), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else if( alignment == 32) hipLaunchKernelGGL(( zgesellptmv2d_kernel_32), dim3(grid), dim3(block), Ms, magma_stream , m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else{ printf("error: alignment %d not supported.\n", alignment); exit(-1); } #endif return MAGMA_SUCCESS; }
246bad8d32fd8de1f65ba3e0effe721b76aa8a5f.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zgesellcmmv.cu normal z -> s, Fri Jul 18 17:34:28 2014 */ #include "cuda_runtime.h" #include <stdio.h> #include "common_magma.h" #include "sm_32_intrinsics.h" #define PRECISION_s //#define TEXTURE /* // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4_ldg( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, const float* __restrict__ d_x, float beta, float *d_y) { #if defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = __ldg( d_x+ i1 ); x2 = __ldg( d_x+ i2 ); v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = __ldg( d_x + d_colind[ block*kk] ); v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } #endif } */ // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning one thread to each row - 1D kernel __global__ void zgesellptmv2d_kernel_1( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float* d_x, float beta, float *d_y) { // threads assigned to rows int Idx = blockDim.x * blockIdx.x + threadIdx.x ; int offset = d_rowptr[ blockIdx.x ]; int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize; if(Idx < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++){ int col = d_colind [offset+ blocksize * n + threadIdx.x ]; float val = d_val[offset+ blocksize * n + threadIdx.x]; if( val != 0){ dot=dot+val*d_x[col]; } } d_y[ Idx ] = dot * alpha + beta * d_y [ Idx ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float* d_x, float beta, float *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = d_x[ i1 ]; x2 = d_x[ i2 ]; v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = d_x[ d_colind[ block*kk] ]; v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_8( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float* d_x, float beta, float *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = d_x[ i1 ]; x2 = d_x[ i2 ]; v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = d_x[ d_colind[ block*kk] ]; v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_16( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float* d_x, float beta, float *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ float val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d_x[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_32( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float* d_x, float beta, float *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ float val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * d_x[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } /************************* same but using texture mem *************************/ #if defined(PRECISION_d) && defined(TEXTURE) __inline__ __device__ float read_from_tex( cudaTextureObject_t texdx, const int& i){ int2 temp = tex1Dfetch<int2>( texdx, i ); return __hiloint2float(temp.y,temp.x); } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4_tex( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, float beta, float *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = read_from_tex( texdx, d_colind[ block*kk] ); v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_8_tex( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, float beta, float *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; d_colind += offset + ldx ; d_val += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = d_colind[ block*kk]; i2 = d_colind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = d_val[ block*kk ]; v2 = d_val[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = read_from_tex( texdx, d_colind[ block*kk] ); v1 = d_val[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_16_tex( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, float beta, float *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ float val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_32_tex( int num_rows, int num_cols, int blocksize, int T, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, cudaTextureObject_t texdx, float beta, float *d_y) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = d_rowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (d_rowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ float val = d_val[ offset + ldx + block*k ]; int col = d_colind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { d_y[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row]; } } } } #endif /********************* end of texture versions **************************/ /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is SELLP. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param blocksize magma_int_t number of rows in one ELL-slice @param slices magma_int_t number of slices in matrix @param alignment magma_int_t number of threads assigned to one row @param alpha float scalar multiplier @param d_val float* array containing values of A in SELLP @param d_colind magma_int_t* columnindices of A in SELLP @param d_rowptr magma_int_t* rowpointer of SELLP @param d_x float* input vector x @param beta float scalar multiplier @param d_y float* input/output vector y @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_sgesellpmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, float alpha, float *d_val, magma_index_t *d_colind, magma_index_t *d_rowptr, float *d_x, float beta, float *d_y ){ // using a 2D thread grid int num_threads = blocksize*alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( blocksize, alignment, 1); int dimgrid1 = sqrt(slices); int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1; dim3 grid( dimgrid1, dimgrid2, 1); int Ms = num_threads * sizeof( float ); #if defined(PRECISION_d) && defined(TEXTURE) // Create channel. cudaChannelFormatDesc channel_desc; channel_desc = cudaCreateChannelDesc(32, 32, 0, 0, cudaChannelFormatKindSigned); // Create resource descriptor. struct cudaResourceDesc resDescdx; memset(&resDescdx, 0, sizeof(resDescdx)); resDescdx.resType = cudaResourceTypeLinear; resDescdx.res.linear.devPtr = (void*)d_x; resDescdx.res.linear.desc = channel_desc; resDescdx.res.linear.sizeInBytes = m*sizeof(float); // Specify texture object parameters. struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.filterMode = cudaFilterModePoint; texDesc.readMode = cudaReadModeElementType; // Create texture object. cudaTextureObject_t texdx = 0; cudaCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL); cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); if( alignment == 4) zgesellptmv2d_kernel_4_tex<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); else if( alignment == 8) zgesellptmv2d_kernel_8_tex<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); else if( alignment == 16) zgesellptmv2d_kernel_16_tex<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); else if( alignment == 32) zgesellptmv2d_kernel_32_tex<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, texdx, beta, d_y ); else{ printf("error: alignment %d not supported.\n", alignment); exit(-1); } cudaDestroyTextureObject(texdx); #else if( alignment == 1) zgesellptmv2d_kernel_1<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else if( alignment == 4) zgesellptmv2d_kernel_4<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else if( alignment == 8) zgesellptmv2d_kernel_8<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else if( alignment == 16) zgesellptmv2d_kernel_16<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else if( alignment == 32) zgesellptmv2d_kernel_32<<< grid, block, Ms, magma_stream >>> ( m, n, blocksize, alignment, alpha, d_val, d_colind, d_rowptr, d_x, beta, d_y ); else{ printf("error: alignment %d not supported.\n", alignment); exit(-1); } #endif return MAGMA_SUCCESS; }
6472deb9df6bc9caa47c6afb0c93be6bbcd51b40.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> #include "vision_cuda.h" namespace rcnn{ namespace layers{ // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __device__ T bilinear_interpolate(const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int) y; int x_low = (int) x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void RoIAlignForward(const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T & w1, T & w2, T & w3, T & w4, int & x_low, int & x_high, int & y_low, int & y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int) y; x_low = (int) x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = bottom_data[y_low * width + x_low]; // T v2 = bottom_data[y_low * width + x_high]; // T v3 = bottom_data[y_high * width + x_low]; // T v4 = bottom_data[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void RoIAlignBackwardFeature(const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward torch::Tensor ROIAlign_forward_cuda(const torch::Tensor& input, const torch::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int sampling_ratio) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto output = torch::empty({num_rois, channels, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * pooled_height * pooled_width * channels; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv((long)output_size, 512L), 4096L)); dim3 block(512); if (output.numel() == 0) { THCudaCheck(hipGetLastError()); return output; } AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_forward", [&] { hipLaunchKernelGGL(( RoIAlignForward<scalar_t>), dim3(grid), dim3(block), 0, stream, output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data<scalar_t>(), output.data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return output; } // TODO remove the dependency on input and use instead its sizes -> save memory torch::Tensor ROIAlign_backward_cuda(const torch::Tensor& grad, const torch::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width, const int sampling_ratio) { AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto grad_input = torch::zeros({batch_size, channels, height, width}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv((long)grad.numel(), 512L), 4096L)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { THCudaCheck(hipGetLastError()); return grad_input; } AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlign_backward", [&] { hipLaunchKernelGGL(( RoIAlignBackwardFeature<scalar_t>), dim3(grid), dim3(block), 0, stream, grad.numel(), grad.contiguous().data<scalar_t>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return grad_input; } } }
6472deb9df6bc9caa47c6afb0c93be6bbcd51b40.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #include "vision_cuda.h" namespace rcnn{ namespace layers{ // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __device__ T bilinear_interpolate(const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int) y; int x_low = (int) x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void RoIAlignForward(const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T & w1, T & w2, T & w3, T & w4, int & x_low, int & x_high, int & y_low, int & y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int) y; x_low = (int) x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = bottom_data[y_low * width + x_low]; // T v2 = bottom_data[y_low * width + x_high]; // T v3 = bottom_data[y_high * width + x_low]; // T v4 = bottom_data[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void RoIAlignBackwardFeature(const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward torch::Tensor ROIAlign_forward_cuda(const torch::Tensor& input, const torch::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int sampling_ratio) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto output = torch::empty({num_rois, channels, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * pooled_height * pooled_width * channels; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv((long)output_size, 512L), 4096L)); dim3 block(512); if (output.numel() == 0) { THCudaCheck(cudaGetLastError()); return output; } AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_forward", [&] { RoIAlignForward<scalar_t><<<grid, block, 0, stream>>>( output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data<scalar_t>(), output.data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return output; } // TODO remove the dependency on input and use instead its sizes -> save memory torch::Tensor ROIAlign_backward_cuda(const torch::Tensor& grad, const torch::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width, const int sampling_ratio) { AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto grad_input = torch::zeros({batch_size, channels, height, width}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv((long)grad.numel(), 512L), 4096L)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { THCudaCheck(cudaGetLastError()); return grad_input; } AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlign_backward", [&] { RoIAlignBackwardFeature<scalar_t><<<grid, block, 0, stream>>>( grad.numel(), grad.contiguous().data<scalar_t>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return grad_input; } } }
513ae927a7f06bcc9c4b79eec5cbbfd90c031810.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // A small gpu volumetric path tracer in 200 lines #include "device_launch_parameters.h" // Jerry Guo (c) CGV TU Delft #include "math_constants.h" // Based on smallvpt and cu-smallpt #include "hiprand/hiprand_kernel.h" // Compile: nvcc #include <stdlib.h> // Usage: cusmallvpt [#SPP] #include <stdio.h> // Result: image.ppm enum Refl_t { DIFF, SPEC, REFR }; inline void HandleError(hipError_t err) { if (hipSuccess != err) { printf("%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } } struct Vec { // position, also color (r,g,b) float x, y, z; __host__ __device__ explicit Vec() { x = 0.f; y = 0.f; z = 0.f; } __host__ __device__ explicit Vec(float v) { x = v; y = v; z = v; } __host__ __device__ explicit Vec(float x_ = 0.f, float y_ = 0.f, float z_ = 0.f) { x = x_; y = y_; z = z_; } Vec(const Vec& vec) noexcept = default; Vec(Vec&& vec) noexcept = default; ~Vec() = default; __device__ Vec& operator=(const Vec& b) { this->x = b.x; this->y = b.y; this->z = b.z; return *this; } __device__ const Vec operator+(const Vec& b) const { return Vec(x + b.x, y + b.y, z + b.z); } __device__ const Vec operator-(const Vec& b) const { return Vec(x - b.x, y - b.y, z - b.z); } __host__ __device__ const Vec operator*(float b) const { return Vec(x * b, y * b, z * b); } __device__ const Vec mult(const Vec& b) const { return Vec(x * b.x, y * b.y, z * b.z); } __device__ float len() const { return sqrt(x * x + y * y + z * z); } __device__ Vec& norm() { float inv_len = 1.f / len(); this->x *= inv_len; this->y *= inv_len; this->z *= inv_len; return *this; } __device__ float dot(const Vec& b) const { return x * b.x + y * b.y + z * b.z; } // cross: __device__ Vec operator%(Vec& b) { return Vec(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); } __device__ Vec operator%(const Vec& b) { return Vec(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); } }; __device__ inline float len(const Vec& v) { return sqrt(v.x*v.x + v.y*v.y + v.z*v.z); } __device__ inline Vec norm(const Vec& v) { float inv_len = 1.f / len(v); return Vec(v.x * inv_len, v.y * inv_len, v.z * inv_len); } struct Ray { Vec o, d; __host__ __device__ explicit Ray() : o(Vec(0.f, 0.f, 0.f)), d(Vec(0.f, 0.f, 0.f)) {} __host__ __device__ explicit Ray(Vec o_, Vec d_) noexcept : o(o_), d(d_) {} Ray(const Ray& ray) noexcept = default; Ray(Ray&& ray) noexcept = default; ~Ray() = default; __device__ Ray& operator=(const Ray& r) { this->o = r.o; this->d = r.d; return *this; } }; struct Sphere { float rad; Vec p, e, c; Refl_t refl; __host__ __device__ explicit Sphere(float rad_, Vec p_, Vec e_, Vec c_, Refl_t refl_) : rad(rad_), p(p_), e(e_), c(c_), refl(refl_) {} __device__ float intersect(const Ray& r, float* tin = NULL, float* tout = NULL) const { Vec op = p - r.o; float t, eps = 1e-4, b = op.dot(r.d), det = b * b - op.dot(op) + rad * rad; if (det < 0.f) return 0; else det = sqrt(det); if (tin && tout) { *tin = (b - det <= 0.f) ? 0.f : b - det; *tout = b + det; } return (t = b - det) > eps ? t : ((t = b + det) > eps ? t : 0.f); } }; __host__ __device__ inline float clamp(float x) { return x < 0.f ? 0.f : x>1.f ? 1.f : x; } __host__ __device__ inline int toInt(float x) { return int(pow(clamp(x), 1.f / 2.2f) * 255.f + .5f); } __device__ inline bool intersect(const Sphere* spheres, size_t n_sphere, const Ray& r, float& t, int& id, float tmax = 1e20) { float d, inf = t = tmax; for (int i = int(n_sphere); i--;) if ((d = spheres[i].intersect(r)) && d < t) { t = d; id = i; } return t < inf; } __device__ inline float sampleSegment(float epsilon, float sigma, float smax) { return -log(1.f - epsilon * (1.f - exp(-sigma * smax))) / sigma; } __device__ inline Vec sampleSphere(float e1, float e2) { float z = 1.f - 2.f * e1, sint = sqrt(1.f - z * z); return Vec(cos(2.f * CUDART_PI_F * e2) * sint, sin(2.f * CUDART_PI_F * e2) * sint, z); } __device__ inline Vec sampleHG(float g, float e1, float e2) { float s = 1.f-2.f*e1,cost=(s+2.f*g*g*g*(-1.0+e1)*e1+g*g*s+2.f*g*(1.f-e1+e1*e1))/((1.f+g*s)*(1.f+g*s)),sint=sqrt(1.f-cost*cost); return Vec(cos(2.f * CUDART_PI_F * e2) * sint, sin(2.f * CUDART_PI_F * e2) * sint, cost); } __device__ inline void generateOrthoBasis(Vec& u, Vec& v, Vec w) { Vec coVec = w; if (fabs(w.x) <= fabs(w.y)) if (fabs(w.x) <= fabs(w.z)) coVec = Vec(0.f, -w.z, w.y); else coVec = Vec(-w.y, w.x, 0.f); else if (fabs(w.y) <= fabs(w.z)) coVec = Vec(-w.z, 0.f, w.x); else coVec = Vec(-w.y, w.x, 0.f); coVec.norm(); u = w % coVec, v = w % u; } __device__ inline float scatter(const Ray& r, Ray* sRay, float tin, float tout, float& s, const float& sigma_s, hiprandState_t* rand_state) { s = sampleSegment(hiprand_uniform(rand_state), sigma_s, tout - tin); Vec x = r.o + r.d * tin + r.d * s; Vec dir = sampleHG(-0.5f, hiprand_uniform(rand_state), hiprand_uniform(rand_state)); Vec u(0.f, 0.f, 0.f), v(0.f, 0.f, 0.f); generateOrthoBasis(u, v, r.d); dir = u * dir.x + v * dir.y + r.d * dir.z; if (sRay) *sRay = Ray(x, dir); return (1.0f - exp(-sigma_s * (tout - tin))); } __device__ Vec radiance(const Sphere* spheres, size_t n_sphere, const Ray& r, int _depth, hiprandState_t* rand_state) { Ray ray = r; Vec L(0.f, 0.f, 0.f); Vec B(1.f, 1.f, 1.f); int depth = _depth; float tnear, tfar, scaleBy = 1.f, absorption = 1.f; const Sphere homoMedium(300.f, Vec(50.f, 50.f, 80.f), Vec(0.f, 0.f, 0.f), Vec(0.f, 0.f, 0.f), DIFF); const float sigma_s = 0.009f, sigma_a = 0.006f, sigma_t = sigma_s + sigma_a; while (1) { float t; // distance to intersection int id = 0; // id of intersected object if (homoMedium.intersect(ray, &tnear, &tfar) > 0) { Ray sRay; float s, ms = scatter(ray, &sRay, tnear, tfar, s, sigma_s, rand_state), prob_s = ms; scaleBy = 1.f / (1.f - prob_s); if (hiprand_uniform(rand_state) <= prob_s) {// Sample surface or volume? if (!intersect(spheres, n_sphere, ray, t, id, tnear + s)) { B = B * ms * (1.f - prob_s); ray = sRay; ++depth; continue; } scaleBy = 1.f; } else if (!intersect(spheres, n_sphere, ray, t, id)) return L; if (t >= tnear) { float dist = (t > tfar ? tfar - tnear : t - tnear); absorption = exp(-sigma_t * dist); } } else if (!intersect(spheres, n_sphere, ray, t, id)) return L; const Sphere& obj = spheres[id]; Vec x = r.o + r.d * t, n = Vec(x - obj.p).norm(), nl = n.dot(ray.d) < 0 ? n : n * -1, f = obj.c, Le = obj.e; float p = f.x > f.y && f.x > f.z ? f.x : f.y > f.z ? f.y : f.z; if (++depth > 5) if (hiprand_uniform(rand_state) < p) B = B * (1 / p); else return L; if (n.dot(nl) > 0 || obj.refl != REFR) { B = B * absorption; Le = obj.e * absorption; } else scaleBy = 1.f; // Accumulate luminance and throughtput L = L + B.mult(Le); B = B.mult(f * scaleBy); ++depth; switch (obj.refl) { case SPEC: { ray = Ray(x, r.d - n * 2 * n.dot(r.d)); break; } case REFR: { ray = Ray(x, r.d - n * 2 * n.dot(r.d)); bool into = n.dot(nl) > 0; float nc = 1, nt = 1.5, nnt = into ? nc / nt : nt / nc, ddn = r.d.dot(nl), cos2t; if ((cos2t = 1 - nnt * nnt * (1 - ddn * ddn)) < 0) break; Vec tdir = Vec(r.d*nnt-n*((into?1:-1)*(ddn*nnt+sqrt(cos2t)))).norm(); float a=nt-nc,b=nt+nc,R0=a*a/(b*b),c = 1 - (into ? -ddn : tdir.dot(n)); float Re=R0+(1-R0)*c*c*c*c*c, Tr=1-Re,P=.25+.5*Re,RP=Re/P,TP = Tr / (1 - P); if (hiprand_uniform(rand_state) < P) B=B*RP; else { ray=Ray(x,tdir); B=B*TP; } break; } default: { float r1=2*CUDART_PI_F*hiprand_uniform(rand_state),r2=hiprand_uniform(rand_state),r2s = sqrt(r2); Vec w = nl, u = Vec((fabs(w.x) > .1 ? Vec(0, 1) : Vec(1.f, 1.f, 1.f)) % w).norm(), v = w % u; Vec d = Vec(u * cos(r1) * r2s + v * sin(r1) * r2s + w * sqrt(1 - r2)).norm(); ray = Ray(x, d); } } } } __global__ void render_kernel(const Sphere* spheres, const size_t n_sphere, Vec* Ls, size_t w, size_t h, int spp) { const size_t x = threadIdx.x + blockIdx.x * blockDim.x; const size_t y = threadIdx.y + blockIdx.y * blockDim.y; const size_t offset = x + y * blockDim.x * gridDim.x; const float inv_spp = 1.0f / float(spp); if (x >= w || y >= h) return; hiprandState_t rand_state; hiprand_init(offset, 0u, 0u, &rand_state); Ray cam(Vec(50.f, 52.f, 285.f), norm(Vec(0.f, -0.042612f, -1.f))); const float fov = 0.5135f; Vec cx = Vec(w * fov / h, 0.0f, 0.0f); Vec cy = norm(Vec(cx % cam.d)) * fov; size_t i = (h - 1u - y) * w + x; for (size_t sy = 0u; sy < 2u; ++sy) for (size_t sx = 0u; sx < 2u; ++sx) { Vec L(0.f, 0.f, 0.f); for (size_t s = 0u; s < spp; ++s) { float u1 = 2.f * hiprand_uniform(&rand_state); float u2 = 2.f * hiprand_uniform(&rand_state); float dx = (u1 < 1.f) ? sqrt(u1) - 1.f : 1.f - sqrt(2.f - u1); float dy = (u2 < 1.f) ? sqrt(u2) - 1.f : 1.f - sqrt(2.f - u2); Vec d = cx * (((sx+0.5+dx)*0.5+x)/w-0.5)+cy*(((sy+0.5+dy)*0.5+y)/h-0.5)+cam.d; Ray pRay(cam.o + d * 140.f, d.norm()); L = L + radiance(spheres, n_sphere, pRay, 0, &rand_state) * inv_spp; } Ls[i] = Ls[i] + Vec(0.25f * clamp(L.x), 0.25f * clamp(L.y), 0.25f * clamp(L.z)); } } hipError_t Render(int w, int h, unsigned int spp = 100) { const size_t n_sphere = 4; Sphere spheres[n_sphere] = {//Scene: radius, position, emission, color, material Sphere(26.5f, Vec(27.f, 18.5f, 78.f),Vec(0.f, 0.f, 0.f),Vec(1.f,1.f,1.f)*.75f,SPEC),//Mirr Sphere(12.f, Vec(70.f, 43.f, 78.f), Vec(0.f, 0.f, 0.f), Vec(0.27f,0.8f,0.8f), REFR),//Glas Sphere(8.f, Vec(55.f, 87.f, 78.f), Vec(0.f, 0.f, 0.f), Vec(1,1,1) * .75f, DIFF), //Lite Sphere(4.f, Vec(55.f, 80.f, 78.f), Vec(10.f,10.f,10.f), Vec(0.f, 0.f, 0.f), DIFF) //Lite }; HandleError(hipSetDevice(0)); const size_t n_pixels = size_t(w * h); Sphere* spheres_device; HandleError(hipMalloc((void**)&spheres_device, sizeof(spheres))); HandleError(hipMemcpy(spheres_device, spheres, sizeof(spheres), hipMemcpyHostToDevice)); Vec* film_device; HandleError(hipMalloc((void**)&film_device, sizeof(Vec) * n_pixels)); HandleError(hipMemset(film_device, 0, sizeof(Vec) * n_pixels)); const dim3 nblocks(w / 16, h / 16); const dim3 nthreads(16, 16); hipLaunchKernelGGL(( render_kernel) , dim3(nblocks), dim3(nthreads) , 0, 0, spheres_device, n_sphere, film_device, w, h, spp); Vec* film = (Vec*)malloc(n_pixels * sizeof(Vec)); HandleError(hipMemcpy(film, film_device, sizeof(Vec) * n_pixels, hipMemcpyDeviceToHost)); HandleError(hipFree(spheres_device)); HandleError(hipFree(film_device)); FILE* f = fopen("image.ppm", "w"); // Write image to PPM file. fprintf(f, "P3\n%d %d\n%d\n", w, h, 255); for (int i=0;i<w*h;i++) fprintf(f,"%d %d %d ",toInt(film[i].x),toInt(film[i].y),toInt(film[i].z)); free(film); return hipSuccess; } int main(int argc, char* argv[]) { int w = 1024, h = 768, spp = argc == 2 ? atoi(argv[1]) / 4 : 100; Render(w, h, spp); return 0; }
513ae927a7f06bcc9c4b79eec5cbbfd90c031810.cu
#include "cuda_runtime.h" // A small gpu volumetric path tracer in 200 lines #include "device_launch_parameters.h" // Jerry Guo (c) CGV TU Delft #include "math_constants.h" // Based on smallvpt and cu-smallpt #include "curand_kernel.h" // Compile: nvcc #include <stdlib.h> // Usage: cusmallvpt [#SPP] #include <stdio.h> // Result: image.ppm enum Refl_t { DIFF, SPEC, REFR }; inline void HandleError(cudaError_t err) { if (cudaSuccess != err) { printf("%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } struct Vec { // position, also color (r,g,b) float x, y, z; __host__ __device__ explicit Vec() { x = 0.f; y = 0.f; z = 0.f; } __host__ __device__ explicit Vec(float v) { x = v; y = v; z = v; } __host__ __device__ explicit Vec(float x_ = 0.f, float y_ = 0.f, float z_ = 0.f) { x = x_; y = y_; z = z_; } Vec(const Vec& vec) noexcept = default; Vec(Vec&& vec) noexcept = default; ~Vec() = default; __device__ Vec& operator=(const Vec& b) { this->x = b.x; this->y = b.y; this->z = b.z; return *this; } __device__ const Vec operator+(const Vec& b) const { return Vec(x + b.x, y + b.y, z + b.z); } __device__ const Vec operator-(const Vec& b) const { return Vec(x - b.x, y - b.y, z - b.z); } __host__ __device__ const Vec operator*(float b) const { return Vec(x * b, y * b, z * b); } __device__ const Vec mult(const Vec& b) const { return Vec(x * b.x, y * b.y, z * b.z); } __device__ float len() const { return sqrt(x * x + y * y + z * z); } __device__ Vec& norm() { float inv_len = 1.f / len(); this->x *= inv_len; this->y *= inv_len; this->z *= inv_len; return *this; } __device__ float dot(const Vec& b) const { return x * b.x + y * b.y + z * b.z; } // cross: __device__ Vec operator%(Vec& b) { return Vec(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); } __device__ Vec operator%(const Vec& b) { return Vec(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); } }; __device__ inline float len(const Vec& v) { return sqrt(v.x*v.x + v.y*v.y + v.z*v.z); } __device__ inline Vec norm(const Vec& v) { float inv_len = 1.f / len(v); return Vec(v.x * inv_len, v.y * inv_len, v.z * inv_len); } struct Ray { Vec o, d; __host__ __device__ explicit Ray() : o(Vec(0.f, 0.f, 0.f)), d(Vec(0.f, 0.f, 0.f)) {} __host__ __device__ explicit Ray(Vec o_, Vec d_) noexcept : o(o_), d(d_) {} Ray(const Ray& ray) noexcept = default; Ray(Ray&& ray) noexcept = default; ~Ray() = default; __device__ Ray& operator=(const Ray& r) { this->o = r.o; this->d = r.d; return *this; } }; struct Sphere { float rad; Vec p, e, c; Refl_t refl; __host__ __device__ explicit Sphere(float rad_, Vec p_, Vec e_, Vec c_, Refl_t refl_) : rad(rad_), p(p_), e(e_), c(c_), refl(refl_) {} __device__ float intersect(const Ray& r, float* tin = NULL, float* tout = NULL) const { Vec op = p - r.o; float t, eps = 1e-4, b = op.dot(r.d), det = b * b - op.dot(op) + rad * rad; if (det < 0.f) return 0; else det = sqrt(det); if (tin && tout) { *tin = (b - det <= 0.f) ? 0.f : b - det; *tout = b + det; } return (t = b - det) > eps ? t : ((t = b + det) > eps ? t : 0.f); } }; __host__ __device__ inline float clamp(float x) { return x < 0.f ? 0.f : x>1.f ? 1.f : x; } __host__ __device__ inline int toInt(float x) { return int(pow(clamp(x), 1.f / 2.2f) * 255.f + .5f); } __device__ inline bool intersect(const Sphere* spheres, size_t n_sphere, const Ray& r, float& t, int& id, float tmax = 1e20) { float d, inf = t = tmax; for (int i = int(n_sphere); i--;) if ((d = spheres[i].intersect(r)) && d < t) { t = d; id = i; } return t < inf; } __device__ inline float sampleSegment(float epsilon, float sigma, float smax) { return -log(1.f - epsilon * (1.f - exp(-sigma * smax))) / sigma; } __device__ inline Vec sampleSphere(float e1, float e2) { float z = 1.f - 2.f * e1, sint = sqrt(1.f - z * z); return Vec(cos(2.f * CUDART_PI_F * e2) * sint, sin(2.f * CUDART_PI_F * e2) * sint, z); } __device__ inline Vec sampleHG(float g, float e1, float e2) { float s = 1.f-2.f*e1,cost=(s+2.f*g*g*g*(-1.0+e1)*e1+g*g*s+2.f*g*(1.f-e1+e1*e1))/((1.f+g*s)*(1.f+g*s)),sint=sqrt(1.f-cost*cost); return Vec(cos(2.f * CUDART_PI_F * e2) * sint, sin(2.f * CUDART_PI_F * e2) * sint, cost); } __device__ inline void generateOrthoBasis(Vec& u, Vec& v, Vec w) { Vec coVec = w; if (fabs(w.x) <= fabs(w.y)) if (fabs(w.x) <= fabs(w.z)) coVec = Vec(0.f, -w.z, w.y); else coVec = Vec(-w.y, w.x, 0.f); else if (fabs(w.y) <= fabs(w.z)) coVec = Vec(-w.z, 0.f, w.x); else coVec = Vec(-w.y, w.x, 0.f); coVec.norm(); u = w % coVec, v = w % u; } __device__ inline float scatter(const Ray& r, Ray* sRay, float tin, float tout, float& s, const float& sigma_s, curandState_t* rand_state) { s = sampleSegment(curand_uniform(rand_state), sigma_s, tout - tin); Vec x = r.o + r.d * tin + r.d * s; Vec dir = sampleHG(-0.5f, curand_uniform(rand_state), curand_uniform(rand_state)); Vec u(0.f, 0.f, 0.f), v(0.f, 0.f, 0.f); generateOrthoBasis(u, v, r.d); dir = u * dir.x + v * dir.y + r.d * dir.z; if (sRay) *sRay = Ray(x, dir); return (1.0f - exp(-sigma_s * (tout - tin))); } __device__ Vec radiance(const Sphere* spheres, size_t n_sphere, const Ray& r, int _depth, curandState_t* rand_state) { Ray ray = r; Vec L(0.f, 0.f, 0.f); Vec B(1.f, 1.f, 1.f); int depth = _depth; float tnear, tfar, scaleBy = 1.f, absorption = 1.f; const Sphere homoMedium(300.f, Vec(50.f, 50.f, 80.f), Vec(0.f, 0.f, 0.f), Vec(0.f, 0.f, 0.f), DIFF); const float sigma_s = 0.009f, sigma_a = 0.006f, sigma_t = sigma_s + sigma_a; while (1) { float t; // distance to intersection int id = 0; // id of intersected object if (homoMedium.intersect(ray, &tnear, &tfar) > 0) { Ray sRay; float s, ms = scatter(ray, &sRay, tnear, tfar, s, sigma_s, rand_state), prob_s = ms; scaleBy = 1.f / (1.f - prob_s); if (curand_uniform(rand_state) <= prob_s) {// Sample surface or volume? if (!intersect(spheres, n_sphere, ray, t, id, tnear + s)) { B = B * ms * (1.f - prob_s); ray = sRay; ++depth; continue; } scaleBy = 1.f; } else if (!intersect(spheres, n_sphere, ray, t, id)) return L; if (t >= tnear) { float dist = (t > tfar ? tfar - tnear : t - tnear); absorption = exp(-sigma_t * dist); } } else if (!intersect(spheres, n_sphere, ray, t, id)) return L; const Sphere& obj = spheres[id]; Vec x = r.o + r.d * t, n = Vec(x - obj.p).norm(), nl = n.dot(ray.d) < 0 ? n : n * -1, f = obj.c, Le = obj.e; float p = f.x > f.y && f.x > f.z ? f.x : f.y > f.z ? f.y : f.z; if (++depth > 5) if (curand_uniform(rand_state) < p) B = B * (1 / p); else return L; if (n.dot(nl) > 0 || obj.refl != REFR) { B = B * absorption; Le = obj.e * absorption; } else scaleBy = 1.f; // Accumulate luminance and throughtput L = L + B.mult(Le); B = B.mult(f * scaleBy); ++depth; switch (obj.refl) { case SPEC: { ray = Ray(x, r.d - n * 2 * n.dot(r.d)); break; } case REFR: { ray = Ray(x, r.d - n * 2 * n.dot(r.d)); bool into = n.dot(nl) > 0; float nc = 1, nt = 1.5, nnt = into ? nc / nt : nt / nc, ddn = r.d.dot(nl), cos2t; if ((cos2t = 1 - nnt * nnt * (1 - ddn * ddn)) < 0) break; Vec tdir = Vec(r.d*nnt-n*((into?1:-1)*(ddn*nnt+sqrt(cos2t)))).norm(); float a=nt-nc,b=nt+nc,R0=a*a/(b*b),c = 1 - (into ? -ddn : tdir.dot(n)); float Re=R0+(1-R0)*c*c*c*c*c, Tr=1-Re,P=.25+.5*Re,RP=Re/P,TP = Tr / (1 - P); if (curand_uniform(rand_state) < P) B=B*RP; else { ray=Ray(x,tdir); B=B*TP; } break; } default: { float r1=2*CUDART_PI_F*curand_uniform(rand_state),r2=curand_uniform(rand_state),r2s = sqrt(r2); Vec w = nl, u = Vec((fabs(w.x) > .1 ? Vec(0, 1) : Vec(1.f, 1.f, 1.f)) % w).norm(), v = w % u; Vec d = Vec(u * cos(r1) * r2s + v * sin(r1) * r2s + w * sqrt(1 - r2)).norm(); ray = Ray(x, d); } } } } __global__ void render_kernel(const Sphere* spheres, const size_t n_sphere, Vec* Ls, size_t w, size_t h, int spp) { const size_t x = threadIdx.x + blockIdx.x * blockDim.x; const size_t y = threadIdx.y + blockIdx.y * blockDim.y; const size_t offset = x + y * blockDim.x * gridDim.x; const float inv_spp = 1.0f / float(spp); if (x >= w || y >= h) return; curandState rand_state; curand_init(offset, 0u, 0u, &rand_state); Ray cam(Vec(50.f, 52.f, 285.f), norm(Vec(0.f, -0.042612f, -1.f))); const float fov = 0.5135f; Vec cx = Vec(w * fov / h, 0.0f, 0.0f); Vec cy = norm(Vec(cx % cam.d)) * fov; size_t i = (h - 1u - y) * w + x; for (size_t sy = 0u; sy < 2u; ++sy) for (size_t sx = 0u; sx < 2u; ++sx) { Vec L(0.f, 0.f, 0.f); for (size_t s = 0u; s < spp; ++s) { float u1 = 2.f * curand_uniform(&rand_state); float u2 = 2.f * curand_uniform(&rand_state); float dx = (u1 < 1.f) ? sqrt(u1) - 1.f : 1.f - sqrt(2.f - u1); float dy = (u2 < 1.f) ? sqrt(u2) - 1.f : 1.f - sqrt(2.f - u2); Vec d = cx * (((sx+0.5+dx)*0.5+x)/w-0.5)+cy*(((sy+0.5+dy)*0.5+y)/h-0.5)+cam.d; Ray pRay(cam.o + d * 140.f, d.norm()); L = L + radiance(spheres, n_sphere, pRay, 0, &rand_state) * inv_spp; } Ls[i] = Ls[i] + Vec(0.25f * clamp(L.x), 0.25f * clamp(L.y), 0.25f * clamp(L.z)); } } cudaError_t Render(int w, int h, unsigned int spp = 100) { const size_t n_sphere = 4; Sphere spheres[n_sphere] = {//Scene: radius, position, emission, color, material Sphere(26.5f, Vec(27.f, 18.5f, 78.f),Vec(0.f, 0.f, 0.f),Vec(1.f,1.f,1.f)*.75f,SPEC),//Mirr Sphere(12.f, Vec(70.f, 43.f, 78.f), Vec(0.f, 0.f, 0.f), Vec(0.27f,0.8f,0.8f), REFR),//Glas Sphere(8.f, Vec(55.f, 87.f, 78.f), Vec(0.f, 0.f, 0.f), Vec(1,1,1) * .75f, DIFF), //Lite Sphere(4.f, Vec(55.f, 80.f, 78.f), Vec(10.f,10.f,10.f), Vec(0.f, 0.f, 0.f), DIFF) //Lite }; HandleError(cudaSetDevice(0)); const size_t n_pixels = size_t(w * h); Sphere* spheres_device; HandleError(cudaMalloc((void**)&spheres_device, sizeof(spheres))); HandleError(cudaMemcpy(spheres_device, spheres, sizeof(spheres), cudaMemcpyHostToDevice)); Vec* film_device; HandleError(cudaMalloc((void**)&film_device, sizeof(Vec) * n_pixels)); HandleError(cudaMemset(film_device, 0, sizeof(Vec) * n_pixels)); const dim3 nblocks(w / 16, h / 16); const dim3 nthreads(16, 16); render_kernel <<< nblocks, nthreads >>> (spheres_device, n_sphere, film_device, w, h, spp); Vec* film = (Vec*)malloc(n_pixels * sizeof(Vec)); HandleError(cudaMemcpy(film, film_device, sizeof(Vec) * n_pixels, cudaMemcpyDeviceToHost)); HandleError(cudaFree(spheres_device)); HandleError(cudaFree(film_device)); FILE* f = fopen("image.ppm", "w"); // Write image to PPM file. fprintf(f, "P3\n%d %d\n%d\n", w, h, 255); for (int i=0;i<w*h;i++) fprintf(f,"%d %d %d ",toInt(film[i].x),toInt(film[i].y),toInt(film[i].z)); free(film); return cudaSuccess; } int main(int argc, char* argv[]) { int w = 1024, h = 768, spp = argc == 2 ? atoi(argv[1]) / 4 : 100; Render(w, h, spp); return 0; }
kernel_hip.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef ENABLE_CURD #include<curd_lib_host.h> #endif /* * Copyright (c) 2016 University of Cordoba and University of Illinois * All rights reserved. * * Developed by: IMPACT Research Group * University of Cordoba and University of Illinois * http://impact.crhc.illinois.edu/ * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * with the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * > Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimers. * > Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimers in the * documentation and/or other materials provided with the distribution. * > Neither the names of IMPACT Research Group, University of Cordoba, * University of Illinois nor the names of its contributors may be used * to endorse or promote products derived from this Software without * specific prior written permission. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH * THE SOFTWARE. * */ #define _CUDA_COMPILER_ #include "support/common.h" #include "support/partitioner.h" // CUDA baseline kernel ------------------------------------------------------------------------------------------ // Generate model on GPU side __device__ int gen_model_paramGPU(int x1, int y1, int vx1, int vy1, int x2, int y2, int vx2, int vy2, float *model_param) { float temp; // xc -> model_param[0], yc -> model_param[1], D -> model_param[2], R -> model_param[3] temp = (float)((vx1 * (vx1 - (2 * vx2))) + (vx2 * vx2) + (vy1 * vy1) - (vy2 * ((2 * vy1) - vy2))); if(temp == 0) { // Check to prevent division by zero return (0); } model_param[0] = (((vx1 * ((-vx2 * x1) + (vx1 * x2) - (vx2 * x2) + (vy2 * y1) - (vy2 * y2))) + (vy1 * ((-vy2 * x1) + (vy1 * x2) - (vy2 * x2) - (vx2 * y1) + (vx2 * y2))) + (x1 * ((vy2 * vy2) + (vx2 * vx2)))) / temp); model_param[1] = (((vx2 * ((vy1 * x1) - (vy1 * x2) - (vx1 * y1) + (vx2 * y1) - (vx1 * y2))) + (vy2 * ((-vx1 * x1) + (vx1 * x2) - (vy1 * y1) + (vy2 * y1) - (vy1 * y2))) + (y2 * ((vx1 * vx1) + (vy1 * vy1)))) / temp); temp = (float)((x1 * (x1 - (2 * x2))) + (x2 * x2) + (y1 * (y1 - (2 * y2))) + (y2 * y2)); if(temp == 0) { // Check to prevent division by zero return (0); } model_param[2] = ((((x1 - x2) * (vx1 - vx2)) + ((y1 - y2) * (vy1 - vy2))) / temp); model_param[3] = ((((x1 - x2) * (vy1 - vy2)) + ((y2 - y1) * (vx1 - vx2))) / temp); return (1); } __global__ void RANSAC_kernel_block(int flowvector_count, int max_iter, int error_threshold, float convergence_threshold, int n_tasks, float alpha, float *model_param_local, flowvector *flowvectors, int *random_numbers, int *model_candidate, int *outliers_candidate, int *g_out_id #ifdef CUDA_8_0 , int *worklist #endif ) { extern __shared__ int l_mem[]; int* outlier_block_count = l_mem; #ifdef CUDA_8_0 int* l_tmp = &outlier_block_count[1]; #endif #ifdef CUDA_8_0 Partitioner p = partitioner_create(n_tasks, alpha, worklist, l_tmp); #else Partitioner p = partitioner_create(n_tasks, alpha); #endif const int tx = threadIdx.x; const int bx = blockIdx.x; const int num_blocks = gridDim.x; float vx_error, vy_error; int outlier_local_count = 0; // Each block performs one iteration for(int iter = gpu_first(&p); gpu_more(&p); iter = gpu_next(&p)) { float *model_param = &model_param_local [4 * iter]; // xc=model_param_sh[0], yc=model_param_sh[1], D=model_param_sh[2], R=model_param_sh[3] // Thread 0 computes F-o-F model (SISD phase) if(tx == 0) { outlier_block_count[0] = 0; // Select two random flow vectors int rand_num = random_numbers[iter * 2 + 0]; flowvector fv[2]; fv[0] = flowvectors[rand_num]; rand_num = random_numbers[iter * 2 + 1]; fv[1] = flowvectors[rand_num]; int ret = 0; int vx1 = fv[0].vx - fv[0].x; int vy1 = fv[0].vy - fv[0].y; int vx2 = fv[1].vx - fv[1].x; int vy2 = fv[1].vy - fv[1].y; // Function to generate model parameters according to F-o-F (xc, yc, D and R) ret = gen_model_paramGPU(fv[0].x, fv[0].y, vx1, vy1, fv[1].x, fv[1].y, vx2, vy2, model_param); if(ret == 0) model_param[0] = -2011; } __syncthreads(); if(model_param[0] == -2011) continue; // SIMD phase // Reset local outlier counter outlier_local_count = 0; // Compute number of outliers for(int i = tx; i < flowvector_count; i += blockDim.x) { flowvector fvreg = flowvectors[i]; // x, y, vx, vy vx_error = fvreg.x + ((int)((fvreg.x - model_param[0]) * model_param[2]) - (int)((fvreg.y - model_param[1]) * model_param[3])) - fvreg.vx; vy_error = fvreg.y + ((int)((fvreg.y - model_param[1]) * model_param[2]) + (int)((fvreg.x - model_param[0]) * model_param[3])) - fvreg.vy; if((fabs(vx_error) >= error_threshold) || (fabs(vy_error) >= error_threshold)) { outlier_local_count++; } } #ifdef CUDA_8_0 atomicAdd_system(&outlier_block_count[0], outlier_local_count); #else atomicAdd(&outlier_block_count[0], outlier_local_count); #endif __syncthreads(); if(tx == 0) { // Compare to threshold #ifdef CUDA_8_0 if(atomicAdd_system(&outlier_block_count[0], 0) < flowvector_count * convergence_threshold) { int index = atomicAdd_system(g_out_id, 1); outliers_candidate[index] = atomicAdd_system(&outlier_block_count[0], 0); model_candidate[index] = iter; } #else if(outlier_block_count[0] < flowvector_count * convergence_threshold) { int index = atomicAdd(g_out_id, 1); outliers_candidate[index] = outlier_block_count[0]; model_candidate[index] = iter; } #endif } } } hipError_t call_RANSAC_kernel_block(int blocks, int threads, int flowvector_count, int max_iter, int error_threshold, float convergence_threshold, int n_tasks, float alpha, float *model_param_local, flowvector *flowvectors, int *random_numbers, int *model_candidate, int *outliers_candidate, int *g_out_id, int l_mem_size #ifdef CUDA_8_0 , int *worklist #endif ){ dim3 dimGrid(blocks); dim3 dimBlock(threads); #ifdef ENABLE_CURD allocateReadWriteSets(dimGrid, dimBlock); #endif hipLaunchKernelGGL(( RANSAC_kernel_block), dim3(dimGrid), dim3(dimBlock), l_mem_size, 0, flowvector_count, max_iter, error_threshold, convergence_threshold, n_tasks, alpha, model_param_local, flowvectors, random_numbers, model_candidate, outliers_candidate, g_out_id #ifdef CUDA_8_0 , worklist #endif ); #ifdef ENABLE_CURD freeReadWriteSets(dimGrid, dimBlock); #endif hipError_t err = hipGetLastError(); return err; }
kernel_hip.cuh
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright (c) 2010-2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * Consecutive reduction upsweep reduction kernel ******************************************************************************/ #pragma once #include <b40c/util/cta_work_distribution.cuh> #include <b40c/consecutive_reduction/upsweep/cta.cuh> namespace b40c { namespace consecutive_reduction { namespace upsweep { /** * Consecutive reduction upsweep reduction pass */ template <typename KernelPolicy> __device__ __forceinline__ void UpsweepPass( typename KernelPolicy::KeyType *d_in_keys, typename KernelPolicy::ValueType *d_in_values, typename KernelPolicy::ValueType *d_spine_partials, typename KernelPolicy::SizeT *d_spine_flags, typename KernelPolicy::ReductionOp reduction_op, typename KernelPolicy::EqualityOp equality_op, util::CtaWorkDistribution<typename KernelPolicy::SizeT> &work_decomposition, typename KernelPolicy::SmemStorage &smem_storage) { typedef Cta<KernelPolicy> Cta; typedef typename KernelPolicy::SizeT SizeT; typedef typename KernelPolicy::SoaScanOperator SoaScanOperator; // CTA processing abstraction Cta cta( smem_storage, d_in_keys, d_in_values, d_spine_partials, d_spine_flags, SoaScanOperator(reduction_op), equality_op); // Determine our threadblock's work range util::CtaWorkLimits<SizeT> work_limits; work_decomposition.template GetCtaWorkLimits< KernelPolicy::LOG_TILE_ELEMENTS, KernelPolicy::LOG_SCHEDULE_GRANULARITY>(work_limits); // Quit if we're the last threadblock (no need for it in upsweep). if (work_limits.last_block) { return; } cta.ProcessWorkRange(work_limits); } /** * Consecutive reduction upsweep reduction kernel entry point */ template <typename KernelPolicy> __launch_bounds__ (KernelPolicy::THREADS, KernelPolicy::MIN_CTA_OCCUPANCY) __global__ void Kernel( typename KernelPolicy::KeyType *d_in_keys, typename KernelPolicy::ValueType *d_in_values, typename KernelPolicy::ValueType *d_spine_partials, typename KernelPolicy::SizeT *d_spine_flags, typename KernelPolicy::ReductionOp reduction_op, typename KernelPolicy::EqualityOp equality_op, util::CtaWorkDistribution<typename KernelPolicy::SizeT> work_decomposition) { // Shared storage for the kernel __shared__ typename KernelPolicy::SmemStorage smem_storage; UpsweepPass<KernelPolicy>( d_in_keys, d_in_values, d_spine_partials, d_spine_flags, reduction_op, equality_op, work_decomposition, smem_storage); } } // namespace upsweep } // namespace consecutive_reduction } // namespace b40c
82cbe4c52c4e04f714116fd5e366b88830bbabf3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <gtest/gtest.h> #include <ATen/ATen.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/MemoryAccess.cuh> #include <ATen/hip/HIPContext.h> using namespace at::native; using namespace at::native::memory; __managed__ double4 buffer1[1024]; __managed__ double4 buffer2[1024]; void reset_buffers() { for (int i = 0; i < 1024; i++) { buffer1[i].x = i; buffer1[i].y = i + 0.1; buffer1[i].z = i + 0.2; buffer1[i].w = i + 0.3; buffer2[2].x = -i; buffer2[2].y = -(i + 0.1); buffer2[2].z = -(i + 0.2); buffer2[2].w = -(i + 0.3); } } #ifdef __HIP_PLATFORM_HCC__ TEST(TestLoops, HasSameArgTypes) { // This is a compile-time unit test. If this file compiles without error, // then the test passes and during runtime, we just need to return. using namespace at::native::modern::detail; using func1_t = int (*)(float, float); using func2_t = int (*)(bool, float, float); using func3_t = int (*)(float); using func4_t = int (*)(); static_assert(has_same_arg_types<func1_t>::value, "func1_t has the same argument types"); static_assert(!has_same_arg_types<func2_t>::value, "func2_t does not have the same argument types"); static_assert(has_same_arg_types<func3_t>::value, "func3_t has the same argument types"); static_assert(has_same_arg_types<func4_t>::value, "func4_t has the same argument types"); return; } #endif TEST(TestVectorizedMemoryAccess, CanVectorizeUpTo) { char *ptr = reinterpret_cast<char *>(buffer1); ASSERT_EQ(can_vectorize_up_to<bool>(ptr), 4); ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr), 4); ASSERT_EQ(can_vectorize_up_to<int16_t>(ptr), 4); ASSERT_EQ(can_vectorize_up_to<int>(ptr), 4); ASSERT_EQ(can_vectorize_up_to<int64_t>(ptr), 4); ASSERT_EQ(can_vectorize_up_to<bool>(ptr + 1), 1); ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr + 1), 1); ASSERT_EQ(can_vectorize_up_to<bool>(ptr + 2), 2); ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr + 2), 2); ASSERT_EQ(can_vectorize_up_to<int16_t>(ptr + 2), 1); ASSERT_EQ(can_vectorize_up_to<bool>(ptr + 4), 4); ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr + 4), 4); ASSERT_EQ(can_vectorize_up_to<int16_t>(ptr + 4), 2); ASSERT_EQ(can_vectorize_up_to<int>(ptr + 4), 1); ASSERT_EQ(can_vectorize_up_to<bool>(ptr + 8), 4); ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr + 8), 4); ASSERT_EQ(can_vectorize_up_to<int16_t>(ptr + 8), 4); ASSERT_EQ(can_vectorize_up_to<int>(ptr + 8), 2); ASSERT_EQ(can_vectorize_up_to<int64_t>(ptr + 8), 1); } // The following kernel copy values by using vectorized policies // defined in `ATen/native/cuda/MemoryAccess.cuh` template <typename scalar_t, int vec_size> __global__ void vectorized_copy(scalar_t *dst, scalar_t *src) { using vectorized = policies::vectorized<vec_size>; auto policy = vectorized(); scalar_t buf[thread_work_size]; auto accessor = [&](int index) -> scalar_t & { return buf[index]; }; policy.load(accessor, src + 256 * blockIdx.x); policy.store(accessor, dst + 256 * blockIdx.x); } TEST(TestVectorizedMemoryAccess, CopyKernel) { if (!at::cuda::is_available()) { return; } double *b1 = reinterpret_cast<double *>(buffer1); double *b2 = reinterpret_cast<double *>(buffer2); // vec4 copy reset_buffers(); hipDeviceSynchronize(); hipLaunchKernelGGL(( vectorized_copy<double, 4>), dim3(16), dim3(64), 0, 0, b2, b1); hipDeviceSynchronize(); ASSERT_EQ(hipGetLastError(), hipSuccess); for (int i = 0; i < 1024; i++) { ASSERT_EQ(buffer1[i].x, buffer2[i].x); ASSERT_EQ(buffer1[i].y, buffer2[i].y); ASSERT_EQ(buffer1[i].z, buffer2[i].z); ASSERT_EQ(buffer1[i].w, buffer2[i].w); } // vec2 copy reset_buffers(); hipDeviceSynchronize(); hipLaunchKernelGGL(( vectorized_copy<double, 2>), dim3(16), dim3(64), 0, 0, b2, b1); hipDeviceSynchronize(); ASSERT_EQ(hipGetLastError(), hipSuccess); for (int i = 0; i < 1024; i++) { ASSERT_EQ(buffer1[i].x, buffer2[i].x); ASSERT_EQ(buffer1[i].y, buffer2[i].y); ASSERT_EQ(buffer1[i].z, buffer2[i].z); ASSERT_EQ(buffer1[i].w, buffer2[i].w); } // vec1 copy reset_buffers(); hipDeviceSynchronize(); hipLaunchKernelGGL(( vectorized_copy<double, 1>), dim3(16), dim3(64), 0, 0, b2, b1); hipDeviceSynchronize(); ASSERT_EQ(hipGetLastError(), hipSuccess); for (int i = 0; i < 1024; i++) { ASSERT_EQ(buffer1[i].x, buffer2[i].x); ASSERT_EQ(buffer1[i].y, buffer2[i].y); ASSERT_EQ(buffer1[i].z, buffer2[i].z); ASSERT_EQ(buffer1[i].w, buffer2[i].w); } // unaligned for (int i = 0; i < 16; i++) { for (int j = 0; j < 16; j++) { b1 = reinterpret_cast<double *>(reinterpret_cast<char *>(buffer1) + i); b2 = reinterpret_cast<double *>(reinterpret_cast<char *>(buffer2) + j); hipGetLastError(); hipDeviceSynchronize(); hipLaunchKernelGGL(( vectorized_copy<double, 4>), dim3(1), dim3(64), 0, 0, b2, b1); hipDeviceSynchronize(); auto err = hipGetLastError(); if (i % 16 == 0 && j % 16 == 0) { ASSERT_EQ(err, hipSuccess); } else { ASSERT_EQ(err, hipErrorMisalignedAddress); } } } }
82cbe4c52c4e04f714116fd5e366b88830bbabf3.cu
#include <gtest/gtest.h> #include <ATen/ATen.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/MemoryAccess.cuh> #include <ATen/cuda/CUDAContext.h> using namespace at::native; using namespace at::native::memory; __managed__ double4 buffer1[1024]; __managed__ double4 buffer2[1024]; void reset_buffers() { for (int i = 0; i < 1024; i++) { buffer1[i].x = i; buffer1[i].y = i + 0.1; buffer1[i].z = i + 0.2; buffer1[i].w = i + 0.3; buffer2[2].x = -i; buffer2[2].y = -(i + 0.1); buffer2[2].z = -(i + 0.2); buffer2[2].w = -(i + 0.3); } } #ifdef __HIP_PLATFORM_HCC__ TEST(TestLoops, HasSameArgTypes) { // This is a compile-time unit test. If this file compiles without error, // then the test passes and during runtime, we just need to return. using namespace at::native::modern::detail; using func1_t = int (*)(float, float); using func2_t = int (*)(bool, float, float); using func3_t = int (*)(float); using func4_t = int (*)(); static_assert(has_same_arg_types<func1_t>::value, "func1_t has the same argument types"); static_assert(!has_same_arg_types<func2_t>::value, "func2_t does not have the same argument types"); static_assert(has_same_arg_types<func3_t>::value, "func3_t has the same argument types"); static_assert(has_same_arg_types<func4_t>::value, "func4_t has the same argument types"); return; } #endif TEST(TestVectorizedMemoryAccess, CanVectorizeUpTo) { char *ptr = reinterpret_cast<char *>(buffer1); ASSERT_EQ(can_vectorize_up_to<bool>(ptr), 4); ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr), 4); ASSERT_EQ(can_vectorize_up_to<int16_t>(ptr), 4); ASSERT_EQ(can_vectorize_up_to<int>(ptr), 4); ASSERT_EQ(can_vectorize_up_to<int64_t>(ptr), 4); ASSERT_EQ(can_vectorize_up_to<bool>(ptr + 1), 1); ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr + 1), 1); ASSERT_EQ(can_vectorize_up_to<bool>(ptr + 2), 2); ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr + 2), 2); ASSERT_EQ(can_vectorize_up_to<int16_t>(ptr + 2), 1); ASSERT_EQ(can_vectorize_up_to<bool>(ptr + 4), 4); ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr + 4), 4); ASSERT_EQ(can_vectorize_up_to<int16_t>(ptr + 4), 2); ASSERT_EQ(can_vectorize_up_to<int>(ptr + 4), 1); ASSERT_EQ(can_vectorize_up_to<bool>(ptr + 8), 4); ASSERT_EQ(can_vectorize_up_to<int8_t>(ptr + 8), 4); ASSERT_EQ(can_vectorize_up_to<int16_t>(ptr + 8), 4); ASSERT_EQ(can_vectorize_up_to<int>(ptr + 8), 2); ASSERT_EQ(can_vectorize_up_to<int64_t>(ptr + 8), 1); } // The following kernel copy values by using vectorized policies // defined in `ATen/native/cuda/MemoryAccess.cuh` template <typename scalar_t, int vec_size> __global__ void vectorized_copy(scalar_t *dst, scalar_t *src) { using vectorized = policies::vectorized<vec_size>; auto policy = vectorized(); scalar_t buf[thread_work_size]; auto accessor = [&](int index) -> scalar_t & { return buf[index]; }; policy.load(accessor, src + 256 * blockIdx.x); policy.store(accessor, dst + 256 * blockIdx.x); } TEST(TestVectorizedMemoryAccess, CopyKernel) { if (!at::cuda::is_available()) { return; } double *b1 = reinterpret_cast<double *>(buffer1); double *b2 = reinterpret_cast<double *>(buffer2); // vec4 copy reset_buffers(); cudaDeviceSynchronize(); vectorized_copy<double, 4><<<16, 64>>>(b2, b1); cudaDeviceSynchronize(); ASSERT_EQ(cudaGetLastError(), cudaSuccess); for (int i = 0; i < 1024; i++) { ASSERT_EQ(buffer1[i].x, buffer2[i].x); ASSERT_EQ(buffer1[i].y, buffer2[i].y); ASSERT_EQ(buffer1[i].z, buffer2[i].z); ASSERT_EQ(buffer1[i].w, buffer2[i].w); } // vec2 copy reset_buffers(); cudaDeviceSynchronize(); vectorized_copy<double, 2><<<16, 64>>>(b2, b1); cudaDeviceSynchronize(); ASSERT_EQ(cudaGetLastError(), cudaSuccess); for (int i = 0; i < 1024; i++) { ASSERT_EQ(buffer1[i].x, buffer2[i].x); ASSERT_EQ(buffer1[i].y, buffer2[i].y); ASSERT_EQ(buffer1[i].z, buffer2[i].z); ASSERT_EQ(buffer1[i].w, buffer2[i].w); } // vec1 copy reset_buffers(); cudaDeviceSynchronize(); vectorized_copy<double, 1><<<16, 64>>>(b2, b1); cudaDeviceSynchronize(); ASSERT_EQ(cudaGetLastError(), cudaSuccess); for (int i = 0; i < 1024; i++) { ASSERT_EQ(buffer1[i].x, buffer2[i].x); ASSERT_EQ(buffer1[i].y, buffer2[i].y); ASSERT_EQ(buffer1[i].z, buffer2[i].z); ASSERT_EQ(buffer1[i].w, buffer2[i].w); } // unaligned for (int i = 0; i < 16; i++) { for (int j = 0; j < 16; j++) { b1 = reinterpret_cast<double *>(reinterpret_cast<char *>(buffer1) + i); b2 = reinterpret_cast<double *>(reinterpret_cast<char *>(buffer2) + j); cudaGetLastError(); cudaDeviceSynchronize(); vectorized_copy<double, 4><<<1, 64>>>(b2, b1); cudaDeviceSynchronize(); auto err = cudaGetLastError(); if (i % 16 == 0 && j % 16 == 0) { ASSERT_EQ(err, cudaSuccess); } else { ASSERT_EQ(err, cudaErrorMisalignedAddress); } } } }
a518a3b1ae1a87c4765011834af9a5025378ab21.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Implementing matrix multiplication in CUDA // @Jiangyan Feng, [email protected] #include <wb.h> #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) // Compute C = A * B __global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here int Row = blockIdx.y*blockDim.y+threadIdx.y; int Col = blockIdx.x*blockDim.x+threadIdx.x; float sum = 0.0; if ((Row < numARows) && (Col < numCColumns)){ for (int k = 0; k < numAColumns; ++k){ sum += A[Row*numAColumns+k]*B[k*numBColumns+Col]; } C[Row*numBColumns+Col]=sum; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set // this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float *)malloc(numCRows*numCColumns*sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here int sizeA=numARows*numAColumns*sizeof(float); int sizeB=numBRows*numBColumns*sizeof(float); int sizeC=numCRows*numCColumns*sizeof(float); hipMalloc((void **) &deviceA, sizeA); hipMalloc((void **) &deviceB, sizeB); hipMalloc((void **) &deviceC, sizeC); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here hipMemcpy(deviceA, hostA, sizeA, hipMemcpyHostToDevice); hipMemcpy(deviceB, hostB, sizeB, hipMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here int BLOCK_WIDTH = 16; dim3 DimGrid(ceil(numCColumns/(BLOCK_WIDTH*1.0)), ceil(numCRows/(BLOCK_WIDTH*1.0)), 1); dim3 DimBlock(BLOCK_WIDTH, BLOCK_WIDTH, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( matrixMultiply), dim3(DimGrid), dim3(DimBlock), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here hipMemcpy(hostC, deviceC, sizeC, hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
a518a3b1ae1a87c4765011834af9a5025378ab21.cu
// Implementing matrix multiplication in CUDA // @Jiangyan Feng, [email protected] #include <wb.h> #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) // Compute C = A * B __global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here int Row = blockIdx.y*blockDim.y+threadIdx.y; int Col = blockIdx.x*blockDim.x+threadIdx.x; float sum = 0.0; if ((Row < numARows) && (Col < numCColumns)){ for (int k = 0; k < numAColumns; ++k){ sum += A[Row*numAColumns+k]*B[k*numBColumns+Col]; } C[Row*numBColumns+Col]=sum; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set // this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float *)malloc(numCRows*numCColumns*sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here int sizeA=numARows*numAColumns*sizeof(float); int sizeB=numBRows*numBColumns*sizeof(float); int sizeC=numCRows*numCColumns*sizeof(float); cudaMalloc((void **) &deviceA, sizeA); cudaMalloc((void **) &deviceB, sizeB); cudaMalloc((void **) &deviceC, sizeC); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here cudaMemcpy(deviceA, hostA, sizeA, cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB, sizeB, cudaMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here int BLOCK_WIDTH = 16; dim3 DimGrid(ceil(numCColumns/(BLOCK_WIDTH*1.0)), ceil(numCRows/(BLOCK_WIDTH*1.0)), 1); dim3 DimBlock(BLOCK_WIDTH, BLOCK_WIDTH, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here matrixMultiply<<<DimGrid, DimBlock>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here cudaMemcpy(hostC, deviceC, sizeC, cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
e49097a86c3d4461874cd6f91c193805589feb98.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdlib> #include <cstdio> #include <string> #include <iostream> #include <color_spinor_field.h> #include <clover_field.h> // Do we need this now? // these control the Wilson-type actions #ifdef GPU_WILSON_DIRAC //#define DIRECT_ACCESS_WILSON_PACK_SPINOR #endif // GPU_WILSON_DIRAC #include <quda_internal.h> #include <dslash_quda.h> #include <sys/time.h> #include <blas_quda.h> #include <inline_ptx.h> namespace quda { namespace pack { #include <dslash_constants.h> #include <dslash_textures.h> } // end namespace pack using namespace pack; #ifdef MULTI_GPU static int commDim[QUDA_MAX_DIM]; // Whether to do comms or not void setPackComms(const int *comm_dim) { for (int i=0; i<QUDA_MAX_DIM; i++) commDim[i] = comm_dim[i]; } #else void setPackComms(const int *comm_dim) { ; } #endif #include <dslash_index.cuh> // routines for packing the ghost zones (multi-GPU only) #ifdef MULTI_GPU template <typename FloatN> struct PackParam { FloatN *out[2*4]; float *outNorm[2*4]; FloatN *in; float *inNorm; int threads; // total number of threads // offsets which determine thread mapping to dimension int threadDimMapLower[4]; // lowest thread which maps to dim int threadDimMapUpper[4]; // greatest thread + 1 which maps to dim int parity; #ifdef USE_TEXTURE_OBJECTS hipTextureObject_t inTex; hipTextureObject_t inTexNorm; #endif int dim; int face_num; int X[QUDA_MAX_DIM]; // lattice dimensions int ghostFace[4]; int sp_stride; }; template<typename FloatN> std::ostream& operator<<(std::ostream& output, const PackParam<FloatN>& param) { output << "threads = " << param.threads << std::endl; output << "threadDimMapLower = {" << param.threadDimMapLower[0] << "," << param.threadDimMapLower[1] << "," << param.threadDimMapLower[2] << "," << param.threadDimMapLower[3] << "}" << std::endl; output << "threadDimMapUpper = {" << param.threadDimMapUpper[0] << "," << param.threadDimMapUpper[1] << "," << param.threadDimMapUpper[2] << "," << param.threadDimMapUpper[3] << "}" << std::endl; output << "parity = " << param.parity << std::endl; output << "dim = " << param.dim << std::endl; output << "face_num = " << param.face_num << std::endl; output << "X = {" << param.X[0] << ","<< param.X[1] << "," << param.X[2] << "," << param.X[3] << "}" << std::endl; output << "ghostFace = {" << param.ghostFace[0] << ","<< param.ghostFace[1] << "," << param.ghostFace[2] << "," << param.ghostFace[3] << "}" << std::endl; output << "sp_stride = " << param.sp_stride << std::endl; return output; } // Extend the PackParam class to PackExtendedParam template<typename Float> struct PackExtendedParam : public PackParam<Float> { PackExtendedParam(){} PackExtendedParam(const PackParam<Float>& base) : PackParam<Float>(base) {} int R[QUDA_MAX_DIM]; // boundary dimensions }; /** * Determines which face a given thread is computing. Also rescale * face_idx so that is relative to a given dimension. */ /* template <typename Param> __device__ inline int dimFromFaceIndex (int &face_idx, const Param &param) { if (face_idx < param.threadDimMapUpper[0]) { return 0; } else if (face_idx < param.threadDimMapUpper[1]) { face_idx -= param.threadDimMapLower[1]; return 1; } else if (face_idx < param.threadDimMapUpper[2]) { face_idx -= param.threadDimMapLower[2]; return 2; } else { // this is only called if we use T kernel packing face_idx -= param.threadDimMapLower[3]; return 3; } } */ #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) // double precision #if (defined DIRECT_ACCESS_WILSON_PACK_SPINOR) || (defined FERMI_NO_DBLE_TEX) #define READ_SPINOR READ_SPINOR_DOUBLE #define READ_SPINOR_UP READ_SPINOR_DOUBLE_UP #define READ_SPINOR_DOWN READ_SPINOR_DOUBLE_DOWN #define SPINORTEX in #else #define READ_SPINOR READ_SPINOR_DOUBLE_TEX #define READ_SPINOR_UP READ_SPINOR_DOUBLE_UP_TEX #define READ_SPINOR_DOWN READ_SPINOR_DOUBLE_DOWN_TEX #ifdef USE_TEXTURE_OBJECTS #define SPINORTEX param.inTex #else #define SPINORTEX spinorTexDouble #endif #endif #define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_DOUBLE2 #define SPINOR_DOUBLE template <int dim, int dagger, int face_num> static inline __device__ void packFaceWilsonCore(double2 *out, float *outNorm, const double2 *in, const float *inNorm, const int &idx, const int &face_idx, const int &face_volume, PackParam<double2> &param) { if (dagger) { #include "wilson_pack_face_dagger_core.h" } else { #include "wilson_pack_face_core.h" } } template <int dim, int dagger, int face_num> static inline __device__ void unpackFaceWilsonCore(double2 *out, float *outNorm, const double2 *in, const float *inNorm, const int &idx, const int &face_idx, const int &face_volume, PackParam<double2> &param) { if (dagger) { #include "wilson_pack_face_dagger_core.h" } else { #include "wilson_pack_face_core.h" } } #undef READ_SPINOR #undef READ_SPINOR #undef READ_SPINOR_UP #undef READ_SPINOR_DOWN #undef SPINORTEX #undef WRITE_HALF_SPINOR #undef SPINOR_DOUBLE // single precision #ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR #define READ_SPINOR READ_SPINOR_SINGLE #define READ_SPINOR_UP READ_SPINOR_SINGLE_UP #define READ_SPINOR_DOWN READ_SPINOR_SINGLE_DOWN #define SPINORTEX in #else #define READ_SPINOR READ_SPINOR_SINGLE_TEX #define READ_SPINOR_UP READ_SPINOR_SINGLE_UP_TEX #define READ_SPINOR_DOWN READ_SPINOR_SINGLE_DOWN_TEX #ifdef USE_TEXTURE_OBJECTS #define SPINORTEX param.inTex #else #define SPINORTEX spinorTexSingle #endif #endif #define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_FLOAT4 template <int dim, int dagger, int face_num> static inline __device__ void packFaceWilsonCore(float4 *out, float *outNorm, const float4 *in, const float *inNorm, const int &idx, const int &face_idx, const int &face_volume, const PackParam<float4> &param) { if (dagger) { #include "wilson_pack_face_dagger_core.h" } else { #include "wilson_pack_face_core.h" } } template <int dim, int dagger, int face_num> static inline __device__ void unpackFaceWilsonCore(float4 *out, float *outNorm, const float4 *in, const float *inNorm, const int &idx, const int &face_idx, const int &face_volume, const PackParam<float4> &param) { if (dagger) { #include "wilson_pack_face_dagger_core.h" } else { #include "wilson_pack_face_core.h" } } #undef READ_SPINOR #undef READ_SPINOR_UP #undef READ_SPINOR_DOWN #undef SPINORTEX #undef WRITE_HALF_SPINOR // half precision #ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR #define READ_SPINOR READ_SPINOR_HALF #define READ_SPINOR_UP READ_SPINOR_HALF_UP #define READ_SPINOR_DOWN READ_SPINOR_HALF_DOWN #define SPINORTEX in #else #define READ_SPINOR READ_SPINOR_HALF_TEX #define READ_SPINOR_UP READ_SPINOR_HALF_UP_TEX #define READ_SPINOR_DOWN READ_SPINOR_HALF_DOWN_TEX #ifdef USE_TEXTURE_OBJECTS #define SPINORTEX param.inTex #else #define SPINORTEX spinorTexHalf #endif #endif #define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_SHORT4 template <int dim, int dagger, int face_num> static inline __device__ void packFaceWilsonCore(short4 *out, float *outNorm, const short4 *in, const float *inNorm, const int &idx, const int &face_idx, const int &face_volume, const PackParam<short4> &param) { if (dagger) { #include "wilson_pack_face_dagger_core.h" } else { #include "wilson_pack_face_core.h" } } template <int dim, int dagger, int face_num> static inline __device__ void unpackFaceWilsonCore(short4 *out, float *outNorm, const short4 *in, const float *inNorm, const int &idx, const int &face_idx, const int &face_volume, const PackParam<short4> &param) { if (dagger) { #include "wilson_pack_face_dagger_core.h" } else { #include "wilson_pack_face_core.h" } } #undef READ_SPINOR #undef READ_SPINOR_UP #undef READ_SPINOR_DOWN #undef SPINORTEX #undef WRITE_HALF_SPINOR template <int dagger, typename FloatN> __global__ void packFaceWilsonKernel(PackParam<FloatN> param) { const int nFace = 1; // 1 face for Wilson int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor, spin-project, and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end const int face_num = (face_idx >= nFace*param.ghostFace[0]) ? 1 : 0; face_idx -= face_num*nFace*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromFaceIndex<0,nFace,0>(face_idx,param.ghostFace[0],param.parity,param.X); packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in, param.inNorm,idx, face_idx, param.ghostFace[0], param); } else { const int idx = indexFromFaceIndex<0,nFace,1>(face_idx,param.ghostFace[0],param.parity,param.X); packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in, param.inNorm,idx, face_idx, param.ghostFace[0], param); } } else if (dim == 1) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end const int face_num = (face_idx >= nFace*param.ghostFace[1]) ? 1 : 0; face_idx -= face_num*nFace*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromFaceIndex<1,nFace,0>(face_idx,param.ghostFace[1],param.parity,param.X); packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in, param.inNorm,idx, face_idx, param.ghostFace[1], param); } else { const int idx = indexFromFaceIndex<1,nFace,1>(face_idx,param.ghostFace[1],param.parity,param.X); packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in, param.inNorm,idx, face_idx, param.ghostFace[1], param); } } else if (dim == 2) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end const int face_num = (face_idx >= nFace*param.ghostFace[2]) ? 1 : 0; face_idx -= face_num*nFace*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromFaceIndex<2,nFace,0>(face_idx,param.ghostFace[2],param.parity,param.X); packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in, param.inNorm,idx, face_idx, param.ghostFace[2], param); } else { const int idx = indexFromFaceIndex<2,nFace,1>(face_idx,param.ghostFace[2],param.parity,param.X); packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in, param.inNorm,idx, face_idx, param.ghostFace[2], param); } } else { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end const int face_num = (face_idx >= nFace*param.ghostFace[3]) ? 1 : 0; face_idx -= face_num*nFace*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromFaceIndex<3,nFace,0>(face_idx,param.ghostFace[3],param.parity,param.X); packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in, param.inNorm,idx, face_idx, param.ghostFace[3], param); } else { const int idx = indexFromFaceIndex<3,nFace,1>(face_idx,param.ghostFace[3],param.parity,param.X); packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in, param.inNorm,idx, face_idx, param.ghostFace[3], param); } } } template <int dagger, typename FloatN, int nFace> __global__ void packFaceExtendedWilsonKernel(PackParam<FloatN> param) { int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor, spin-project, and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end // if param.face_num==2 pack both the start and the end, otherwise pack the region of the lattice // specified by param.face_num const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[0]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromFaceIndexExtended<0,nFace,0>(face_idx,param.ghostFace[0],param.parity,param.X,param.R); packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in, param.inNorm,idx, face_idx, param.ghostFace[0], param); } else { const int idx = indexFromFaceIndexExtended<0,nFace,1>(face_idx,param.ghostFace[0],param.parity,param.X,param.R); packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in, param.inNorm,idx, face_idx, param.ghostFace[0], param); } } else if (dim == 1) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[1]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromFaceIndexExtended<1,nFace,0>(face_idx,param.ghostFace[1],param.parity,param.X,param.R); packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in, param.inNorm,idx, face_idx, param.ghostFace[1], param); } else { const int idx = indexFromFaceIndexExtended<1,nFace,1>(face_idx,param.ghostFace[1],param.parity,param.X,param.R); packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in, param.inNorm,idx, face_idx, param.ghostFace[1], param); } } else if (dim == 2) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[2]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromFaceIndexExtended<2,nFace,0>(face_idx,param.ghostFace[2],param.parity,param.X,param.R); packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in, param.inNorm,idx, face_idx, param.ghostFace[2], param); } else { const int idx = indexFromFaceIndexExtended<2,nFace,1>(face_idx,param.ghostFace[2],param.parity,param.X,param.R); packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in, param.inNorm,idx, face_idx, param.ghostFace[2], param); } } else { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[3]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromFaceIndexExtended<3,nFace,0>(face_idx,param.ghostFace[3],param.parity,param.X,param.R); packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in, param.inNorm,idx, face_idx, param.ghostFace[3], param); } else { const int idx = indexFromFaceIndexExtended<3,nFace,1>(face_idx,param.ghostFace[3],param.parity,param.X,param.R); packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in, param.inNorm,idx, face_idx, param.ghostFace[3], param); } } } template <int dagger, typename FloatN, int nFace> __global__ void unpackFaceExtendedWilsonKernel(PackParam<FloatN> param) { int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor, spin-project, and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end // if param.face_num==2 pack both the start and the end, otherwise pack the region of the lattice // specified by param.face_num const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[0]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromFaceIndexExtended<0,nFace,0>(face_idx,param.ghostFace[0],param.parity,param.X,param.R); unpackFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in, param.inNorm,idx, face_idx, param.ghostFace[0], param); } else { const int idx = indexFromFaceIndexExtended<0,nFace,1>(face_idx,param.ghostFace[0],param.parity,param.X,param.R); unpackFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in, param.inNorm,idx, face_idx, param.ghostFace[0], param); } } else if (dim == 1) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[1]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromFaceIndexExtended<1,nFace,0>(face_idx,param.ghostFace[1],param.parity,param.X,param.R); unpackFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in, param.inNorm,idx, face_idx, param.ghostFace[1], param); } else { const int idx = indexFromFaceIndexExtended<1,nFace,1>(face_idx,param.ghostFace[1],param.parity,param.X,param.R); unpackFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in, param.inNorm,idx, face_idx, param.ghostFace[1], param); } } else if (dim == 2) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[2]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromFaceIndexExtended<2,nFace,0>(face_idx,param.ghostFace[2],param.parity,param.X,param.R); unpackFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in, param.inNorm,idx, face_idx, param.ghostFace[2], param); } else { const int idx = indexFromFaceIndexExtended<2,nFace,1>(face_idx,param.ghostFace[2],param.parity,param.X,param.R); unpackFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in, param.inNorm,idx, face_idx, param.ghostFace[2], param); } } else { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[3]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromFaceIndexExtended<3,nFace,0>(face_idx,param.ghostFace[3],param.parity,param.X,param.R); unpackFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in, param.inNorm,idx, face_idx, param.ghostFace[3], param); } else { const int idx = indexFromFaceIndexExtended<3,nFace,1>(face_idx,param.ghostFace[3],param.parity,param.X,param.R); unpackFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in, param.inNorm,idx, face_idx, param.ghostFace[3], param); } } } #endif // GPU_WILSON_DIRAC || GPU_DOMAIN_WALL_DIRAC #if defined(GPU_WILSON_DIRAC) || defined(GPU_TWISTED_MASS_DIRAC) #endif // GPU_WILSON_DIRAC || GPU_DOMAIN_WALL_DIRAC #if defined(GPU_WILSON_DIRAC) || defined(GPU_TWISTED_MASS_DIRAC) // double precision #endif // GPU_WILSON_DIRAC || GPU_DOMAIN_WALL_DIRAC #if defined(GPU_WILSON_DIRAC) || defined(GPU_TWISTED_MASS_DIRAC) // double precision #if (defined DIRECT_ACCESS_WILSON_PACK_SPINOR) || (defined FERMI_NO_DBLE_TEX) #define READ_SPINOR READ_SPINOR_DOUBLE #define READ_SPINOR_UP READ_SPINOR_DOUBLE_UP #define READ_SPINOR_DOWN READ_SPINOR_DOUBLE_DOWN #define SPINORTEX in #else #define READ_SPINOR READ_SPINOR_DOUBLE_TEX #define READ_SPINOR_UP READ_SPINOR_DOUBLE_UP_TEX #define READ_SPINOR_DOWN READ_SPINOR_DOUBLE_DOWN_TEX #ifdef USE_TEXTURE_OBJECTS #define SPINORTEX param.inTex #else #define SPINORTEX spinorTexDouble #endif #endif #define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_DOUBLE2 #define SPINOR_DOUBLE template <int dim, int dagger, int face_num> static inline __device__ void packTwistedFaceWilsonCore(double2 *out, float *outNorm, const double2 *in, const float *inNorm, double a, double b, const int &idx, const int &face_idx, const int &face_volume, PackParam<double2> &param) { if (dagger) { #include "wilson_pack_twisted_face_dagger_core.h" } else { #include "wilson_pack_twisted_face_core.h" } } #undef READ_SPINOR #undef READ_SPINOR_UP #undef READ_SPINOR_DOWN #undef SPINORTEX #undef WRITE_HALF_SPINOR #undef SPINOR_DOUBLE // single precision #ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR #define READ_SPINOR READ_SPINOR_SINGLE #define READ_SPINOR_UP READ_SPINOR_SINGLE_UP #define READ_SPINOR_DOWN READ_SPINOR_SINGLE_DOWN #define SPINORTEX in #else #define READ_SPINOR READ_SPINOR_SINGLE_TEX #define READ_SPINOR_UP READ_SPINOR_SINGLE_UP_TEX #define READ_SPINOR_DOWN READ_SPINOR_SINGLE_DOWN_TEX #ifdef USE_TEXTURE_OBJECTS #define SPINORTEX param.inTex #else #define SPINORTEX spinorTexSingle #endif #endif #define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_FLOAT4 template <int dim, int dagger, int face_num> static inline __device__ void packTwistedFaceWilsonCore(float4 *out, float *outNorm, const float4 *in, const float *inNorm, float a, float b, const int &idx, const int &face_idx, const int &face_volume, const PackParam<float4> &param) { if (dagger) { #include "wilson_pack_twisted_face_dagger_core.h" } else { #include "wilson_pack_twisted_face_core.h" } } #undef READ_SPINOR #undef READ_SPINOR_UP #undef READ_SPINOR_DOWN #undef SPINORTEX #undef WRITE_HALF_SPINOR // half precision #ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR #define READ_SPINOR READ_SPINOR_HALF #define READ_SPINOR_UP READ_SPINOR_HALF_UP #define READ_SPINOR_DOWN READ_SPINOR_HALF_DOWN #define SPINORTEX in #else #define READ_SPINOR READ_SPINOR_HALF_TEX #define READ_SPINOR_UP READ_SPINOR_HALF_UP_TEX #define READ_SPINOR_DOWN READ_SPINOR_HALF_DOWN_TEX #ifdef USE_TEXTURE_OBJECTS #define SPINORTEX param.inTex #else #define SPINORTEX spinorTexHalf #endif #endif #define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_SHORT4 template <int dim, int dagger, int face_num> static inline __device__ void packTwistedFaceWilsonCore(short4 *out, float *outNorm, const short4 *in, const float *inNorm, float a, float b, const int &idx, const int &face_idx, const int &face_volume, const PackParam<short4> &param) { if (dagger) { #include "wilson_pack_twisted_face_dagger_core.h" } else { #include "wilson_pack_twisted_face_core.h" } } #undef READ_SPINOR #undef READ_SPINOR_UP #undef READ_SPINOR_DOWN #undef SPINORTEX #undef WRITE_HALF_SPINOR template <int dagger, typename FloatN, typename Float> __global__ void packTwistedFaceWilsonKernel(Float a, Float b, PackParam<FloatN> param) { const int nFace = 1; // 1 face for Wilson int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor, spin-project, and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end const int face_num = (face_idx >= nFace*param.ghostFace[0]) ? 1 : 0; face_idx -= face_num*nFace*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromFaceIndex<0,nFace,0>(face_idx,param.ghostFace[0],param.parity,param.X); packTwistedFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in, param.inNorm, a, b, idx, face_idx, param.ghostFace[0], param); } else { const int idx = indexFromFaceIndex<0,nFace,1>(face_idx,param.ghostFace[0],param.parity,param.X); packTwistedFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in, param.inNorm, a, b, idx, face_idx, param.ghostFace[0], param); } } else if (dim == 1) { const int face_num = (face_idx >= nFace*param.ghostFace[1]) ? 1 : 0; face_idx -= face_num*nFace*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromFaceIndex<1,nFace,0>(face_idx,param.ghostFace[1],param.parity,param.X); packTwistedFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in, param.inNorm, a, b, idx, face_idx, param.ghostFace[1], param); } else { const int idx = indexFromFaceIndex<1,nFace,1>(face_idx,param.ghostFace[1],param.parity,param.X); packTwistedFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in, param.inNorm, a, b, idx, face_idx, param.ghostFace[1], param); } } else if (dim == 2) { const int face_num = (face_idx >= nFace*param.ghostFace[2]) ? 1 : 0; face_idx -= face_num*nFace*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromFaceIndex<2,nFace,0>(face_idx,param.ghostFace[2],param.parity,param.X); packTwistedFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in, param.inNorm, a, b, idx, face_idx, param.ghostFace[2], param); } else { const int idx = indexFromFaceIndex<2,nFace,1>(face_idx,param.ghostFace[2],param.parity,param.X); packTwistedFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in, param.inNorm, a, b, idx, face_idx, param.ghostFace[2], param); } } else { const int face_num = (face_idx >= nFace*param.ghostFace[3]) ? 1 : 0; face_idx -= face_num*nFace*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromFaceIndex<3,nFace,0>(face_idx,param.ghostFace[3],param.parity,param.X); packTwistedFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in, param.inNorm, a, b,idx, face_idx, param.ghostFace[3], param); } else { const int idx = indexFromFaceIndex<3,nFace,1>(face_idx,param.ghostFace[3],param.parity,param.X); packTwistedFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in, param.inNorm, a, b, idx, face_idx, param.ghostFace[3], param); } } } #endif // GPU_TWISTED_MASS_DIRAC template <typename FloatN, typename Float> class PackFace : public Tunable { protected: FloatN *faces; const cudaColorSpinorField *in; const int dagger; const int parity; const int nFace; const int dim; const int face_num; // compute how many threads we need in total for the face packing unsigned int threads() const { unsigned int threads = 0; if(dim < 0){ // if dim is negative, pack all dimensions for (int i=0; i<4; i++) { if (!commDim[i]) continue; if ((i==3 && !(getKernelPackT() || getTwistPack()))) continue; threads += 2*nFace*in->GhostFace()[i]; // 2 for forwards and backwards faces } }else{ // pack only in dim dimension if(commDim[dim] && dim!=3 || (getKernelPackT() || getTwistPack())){ threads = nFace*in->GhostFace()[dim]; if(face_num==2) threads *= 2; // sending data forwards and backwards } } return threads; } virtual int inputPerSite() const = 0; virtual int outputPerSite() const = 0; // prepare the param struct with kernel arguments PackParam<FloatN> prepareParam(int dim=-1, int face_num=2) { PackParam<FloatN> param; param.in = (FloatN*)in->V(); param.inNorm = (float*)in->Norm(); param.dim = dim; param.face_num = face_num; param.parity = parity; for(int d=0; d<QUDA_MAX_DIM; d++) param.X[d] = in->X()[d]; param.X[0] *= 2; #ifdef USE_TEXTURE_OBJECTS param.inTex = in->Tex(); param.inTexNorm = in->TexNorm(); #endif param.threads = threads(); param.sp_stride = in->Stride(); int prev = -1; // previous dimension that was partitioned for (int i=0; i<4; i++) { param.threadDimMapLower[i] = 0; param.threadDimMapUpper[i] = 0; if (!commDim[i]) continue; param.threadDimMapLower[i] = (prev>=0 ? param.threadDimMapUpper[prev] : 0); param.threadDimMapUpper[i] = param.threadDimMapLower[i] + 2*nFace*in->GhostFace()[i]; size_t faceBytes = nFace*outputPerSite()*in->GhostFace()[i]*sizeof(faces->x); if (typeid(FloatN) == typeid(short4) || typeid(FloatN) == typeid(short2)) { faceBytes += nFace*in->GhostFace()[i]*sizeof(float); param.out[2*i] = (FloatN*)((char*)faces + (outputPerSite()*sizeof(faces->x) + sizeof(float))*param.threadDimMapLower[i]); param.outNorm[2*i] = (float*)((char*)param.out[2*i] + nFace*outputPerSite()*in->GhostFace()[i]*sizeof(faces->x)); } else { param.out[2*i] = (FloatN*)((char*)faces+outputPerSite()*sizeof(faces->x)*param.threadDimMapLower[i]); } param.out[2*i+1] = (FloatN*)((char*)param.out[2*i] + faceBytes); param.outNorm[2*i+1] = (float*)((char*)param.outNorm[2*i] + faceBytes); prev=i; } param.ghostFace[0] = param.X[1]*param.X[2]*param.X[3]/2; param.ghostFace[1] = param.X[0]*param.X[2]*param.X[3]/2; param.ghostFace[2] = param.X[0]*param.X[1]*param.X[3]/2; param.ghostFace[3] = param.X[0]*param.X[1]*param.X[2]/2; return param; } unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return threads(); } void fillAux() { strcpy(aux, in->AuxString()); char comm[5]; comm[0] = (commDim[0] ? '1' : '0'); comm[1] = (commDim[1] ? '1' : '0'); comm[2] = (commDim[2] ? '1' : '0'); comm[3] = (commDim[3] ? '1' : '0'); comm[4] = '\0'; strcat(aux,",comm="); strcat(aux,comm); if (getKernelPackT() || getTwistPack()) { strcat(aux,",kernelPackT"); } switch (nFace) { case 1: strcat(aux,",nFace=1"); break; case 3: strcat(aux,",nFace=3"); break; default: errorQuda("Number of faces not supported"); } } public: PackFace(FloatN *faces, const cudaColorSpinorField *in, const int dagger, const int parity, const int nFace, const int dim=-1, const int face_num=2) : faces(faces), in(in), dagger(dagger), parity(parity), nFace(nFace), dim(dim), face_num(face_num) { fillAux(); bindSpinorTex<FloatN>(in); } virtual ~PackFace() { unbindSpinorTex<FloatN>(in); } virtual int tuningIter() const { return 3; } virtual TuneKey tuneKey() const { return TuneKey(in->VolString(), typeid(*this).name(), aux); } virtual void apply(const hipStream_t &stream) = 0; long long bytes() const { size_t faceBytes = (inputPerSite() + outputPerSite())*this->threads()*sizeof(((FloatN*)0)->x); if (sizeof(((FloatN*)0)->x) == QUDA_HALF_PRECISION) faceBytes += 2*this->threads()*sizeof(float); // 2 is from input and output return faceBytes; } }; template <typename FloatN, typename Float> class PackFaceWilson : public PackFace<FloatN, Float> { private: int inputPerSite() const { return 24; } // input is full spinor int outputPerSite() const { return 12; } // output is spin projected public: PackFaceWilson(FloatN *faces, const cudaColorSpinorField *in, const int dagger, const int parity) : PackFace<FloatN, Float>(faces, in, dagger, parity, 1) { } virtual ~PackFaceWilson() { } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef GPU_WILSON_DIRAC PackParam<FloatN> param = this->prepareParam(); if (this->dagger) { hipLaunchKernelGGL(( packFaceWilsonKernel<1>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, param); } else { hipLaunchKernelGGL(( packFaceWilsonKernel<0>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, param); } #else errorQuda("Wilson face packing kernel is not built"); #endif } long long flops() const { return outputPerSite()*this->threads(); } }; void packFaceWilson(void *ghost_buf, cudaColorSpinorField &in, const int dagger, const int parity, const hipStream_t &stream) { switch(in.Precision()) { case QUDA_DOUBLE_PRECISION: { PackFaceWilson<double2, double> pack((double2*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; case QUDA_SINGLE_PRECISION: { PackFaceWilson<float4, float> pack((float4*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; case QUDA_HALF_PRECISION: { PackFaceWilson<short4, float> pack((short4*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; } } template <typename FloatN, typename Float> class PackFaceTwisted : public PackFace<FloatN, Float> { private: int inputPerSite() const { return 24; } // input is full spinor int outputPerSite() const { return 12; } // output is spin projected Float a; Float b; public: PackFaceTwisted(FloatN *faces, const cudaColorSpinorField *in, const int dagger, const int parity, Float a, Float b) : PackFace<FloatN, Float>(faces, in, dagger, parity, 1), a(a), b(b) { } virtual ~PackFaceTwisted() { } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef GPU_TWISTED_MASS_DIRAC PackParam<FloatN> param = this->prepareParam(); if (this->dagger) { hipLaunchKernelGGL(( packTwistedFaceWilsonKernel<1>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, a, b, param); } else { hipLaunchKernelGGL(( packTwistedFaceWilsonKernel<0>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, a, b, param); } #else errorQuda("Twisted face packing kernel is not built"); #endif } long long flops() const { return outputPerSite()*this->threads(); } }; //! void packTwistedFaceWilson(void *ghost_buf, cudaColorSpinorField &in, const int dagger, const int parity, const double a, const double b, const hipStream_t &stream) { switch(in.Precision()) { case QUDA_DOUBLE_PRECISION: { PackFaceTwisted<double2, double> pack((double2*)ghost_buf, &in, dagger, parity, a, b); pack.apply(stream); } break; case QUDA_SINGLE_PRECISION: { PackFaceTwisted<float4, float> pack((float4*)ghost_buf, &in, dagger, parity, (float)a, (float)b); pack.apply(stream); } break; case QUDA_HALF_PRECISION: { PackFaceTwisted<short4, float> pack((short4*)ghost_buf, &in, dagger, parity, (float)a, (float)b); pack.apply(stream); } break; } } #ifdef GPU_STAGGERED_DIRAC #ifdef USE_TEXTURE_OBJECTS #define SPINORTEXDOUBLE param.inTex #define SPINORTEXSINGLE param.inTex #define SPINORTEXHALF param.inTex #define SPINORTEXHALFNORM param.inTexNorm #else #define SPINORTEXDOUBLE spinorTexDouble #define SPINORTEXSINGLE spinorTexSingle2 #define SPINORTEXHALF spinorTexHalf2 #define SPINORTEXHALFNORM spinorTexHalf2Norm #endif template <typename Float2> __device__ void packFaceStaggeredCore(Float2 *out, float *outNorm, const int out_idx, const int out_stride, const Float2 *in, const float *inNorm, const int in_idx, const int in_stride) { out[out_idx + 0*out_stride] = in[in_idx + 0*in_stride]; out[out_idx + 1*out_stride] = in[in_idx + 1*in_stride]; out[out_idx + 2*out_stride] = in[in_idx + 2*in_stride]; } template<> __device__ void packFaceStaggeredCore(short2 *out, float *outNorm, const int out_idx, const int out_stride, const short2 *in, const float *inNorm, const int in_idx, const int in_stride) { out[out_idx + 0*out_stride] = in[in_idx + 0*in_stride]; out[out_idx + 1*out_stride] = in[in_idx + 1*in_stride]; out[out_idx + 2*out_stride] = in[in_idx + 2*in_stride]; outNorm[out_idx] = inNorm[in_idx]; } #if (defined DIRECT_ACCESS_PACK) || (defined FERMI_NO_DBLE_TEX) template <typename Float2> __device__ void packFaceStaggeredCore(Float2 *out, float *outNorm, const int out_idx, const int out_stride, const Float2 *in, const float *inNorm, const int in_idx, const PackParam<double2> &param) { out[out_idx + 0*out_stride] = in[in_idx + 0*param.sp_stride]; out[out_idx + 1*out_stride] = in[in_idx + 1*param.sp_stride]; out[out_idx + 2*out_stride] = in[in_idx + 2*param.sp_stride]; } template<> __device__ void packFaceStaggeredCore(short2 *out, float *outNorm, const int out_idx, const int out_stride, const short2 *in, const float *inNorm, const int in_idx, const PackParam<double2> &param) { out[out_idx + 0*out_stride] = in[in_idx + 0*param.sp_stride]; out[out_idx + 1*out_stride] = in[in_idx + 1*param.sp_stride]; out[out_idx + 2*out_stride] = in[in_idx + 2*param.sp_stride]; outNorm[out_idx] = inNorm[in_idx]; } #else __device__ void packFaceStaggeredCore(double2 *out, float *outNorm, const int out_idx, const int out_stride, const double2 *in, const float *inNorm, const int in_idx, const PackParam<double2> &param) { out[out_idx + 0*out_stride] = fetch_double2(SPINORTEXDOUBLE, in_idx + 0*param.sp_stride); out[out_idx + 1*out_stride] = fetch_double2(SPINORTEXDOUBLE, in_idx + 1*param.sp_stride); out[out_idx + 2*out_stride] = fetch_double2(SPINORTEXDOUBLE, in_idx + 2*param.sp_stride); } __device__ void packFaceStaggeredCore(float2 *out, float *outNorm, const int out_idx, const int out_stride, const float2 *in, const float *inNorm, const int in_idx, const PackParam<float2> &param) { out[out_idx + 0*out_stride] = TEX1DFETCH(float2, SPINORTEXSINGLE, in_idx + 0*param.sp_stride); out[out_idx + 1*out_stride] = TEX1DFETCH(float2, SPINORTEXSINGLE, in_idx + 1*param.sp_stride); out[out_idx + 2*out_stride] = TEX1DFETCH(float2, SPINORTEXSINGLE, in_idx + 2*param.sp_stride); } // this is rather dumb: undoing the texture load because cudaNormalizedReadMode is used // should really bind to an appropriate texture instead of reusing static inline __device__ short2 float22short2(float c, float2 a) { return make_short2((short)(a.x*c*MAX_SHORT), (short)(a.y*c*MAX_SHORT)); } __device__ void packFaceStaggeredCore(short2 *out, float *outNorm, const int out_idx, const int out_stride, const short2 *in, const float *inNorm, const int in_idx, const PackParam<short2> &param) { out[out_idx + 0*out_stride] = float22short2(1.0f,TEX1DFETCH(float2,SPINORTEXHALF,in_idx+0*param.sp_stride)); out[out_idx + 1*out_stride] = float22short2(1.0f,TEX1DFETCH(float2,SPINORTEXHALF,in_idx+1*param.sp_stride)); out[out_idx + 2*out_stride] = float22short2(1.0f,TEX1DFETCH(float2,SPINORTEXHALF,in_idx+2*param.sp_stride)); outNorm[out_idx] = TEX1DFETCH(float, SPINORTEXHALFNORM, in_idx); } #endif template <typename FloatN, int nFace> __global__ void packFaceStaggeredKernel(PackParam<FloatN> param) { int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor and write to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[0]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromFaceIndexStaggered<0,nFace,0>(face_idx,param.ghostFace[0],param.parity,param.X); packFaceStaggeredCore(param.out[0], param.outNorm[0], face_idx, nFace*param.ghostFace[0], param.in, param.inNorm, idx, param); } else { const int idx = indexFromFaceIndexStaggered<0,nFace,1>(face_idx,param.ghostFace[0],param.parity,param.X); packFaceStaggeredCore(param.out[1], param.outNorm[1], face_idx, nFace*param.ghostFace[0], param.in, param.inNorm, idx, param); } } else if (dim == 1) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[1]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromFaceIndexStaggered<1,nFace,0>(face_idx,param.ghostFace[1],param.parity,param.X); packFaceStaggeredCore(param.out[2], param.outNorm[2], face_idx, nFace*param.ghostFace[1], param.in, param.inNorm, idx, param); } else { const int idx = indexFromFaceIndexStaggered<1,nFace,1>(face_idx,param.ghostFace[1],param.parity,param.X); packFaceStaggeredCore(param.out[3], param.outNorm[3], face_idx, nFace*param.ghostFace[1], param.in, param.inNorm, idx, param); } } else if (dim == 2) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[2]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromFaceIndexStaggered<2,nFace,0>(face_idx,param.ghostFace[2],param.parity,param.X); packFaceStaggeredCore(param.out[4], param.outNorm[4], face_idx, nFace*param.ghostFace[2], param.in, param.inNorm, idx, param); } else { const int idx = indexFromFaceIndexStaggered<2,nFace,1>(face_idx,param.ghostFace[2],param.parity,param.X); packFaceStaggeredCore(param.out[5], param.outNorm[5], face_idx, nFace*param.ghostFace[2], param.in, param.inNorm, idx, param); } } else { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[3]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromFaceIndexStaggered<3,nFace,0>(face_idx,param.ghostFace[3],param.parity,param.X); packFaceStaggeredCore(param.out[6], param.outNorm[6], face_idx, nFace*param.ghostFace[3], param.in, param.inNorm,idx, param); } else { const int idx = indexFromFaceIndexStaggered<3,nFace,1>(face_idx,param.ghostFace[3],param.parity,param.X); packFaceStaggeredCore(param.out[7], param.outNorm[7], face_idx, nFace*param.ghostFace[3], param.in, param.inNorm, idx, param); } } } template <typename FloatN, int nFace> __global__ void packFaceExtendedStaggeredKernel(PackExtendedParam<FloatN> param) { int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end // if param.face_num==2 pack both the start and the end, otherwise pack the region of the // lattice specified by param.face_num const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[0]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromFaceIndexExtendedStaggered<0,nFace,0>(face_idx,param.ghostFace[0],param.parity,param.X,param.R); packFaceStaggeredCore(param.out[0], param.outNorm[0], face_idx, nFace*param.ghostFace[0], param.in, param.inNorm, idx, param); } else { const int idx = indexFromFaceIndexExtendedStaggered<0,nFace,1>(face_idx,param.ghostFace[0],param.parity,param.X,param.R); packFaceStaggeredCore(param.out[1], param.outNorm[1], face_idx, nFace*param.ghostFace[0], param.in, param.inNorm, idx, param); } } else if (dim == 1) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[1]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromFaceIndexExtendedStaggered<1,nFace,0>(face_idx,param.ghostFace[1],param.parity,param.X,param.R); packFaceStaggeredCore(param.out[2], param.outNorm[2], face_idx, nFace*param.ghostFace[1], param.in, param.inNorm, idx, param); } else { const int idx = indexFromFaceIndexExtendedStaggered<1,nFace,1>(face_idx,param.ghostFace[1],param.parity,param.X,param.R); packFaceStaggeredCore(param.out[3], param.outNorm[3], face_idx, nFace*param.ghostFace[1], param.in, param.inNorm, idx, param); } } else if (dim == 2) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[2]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromFaceIndexExtendedStaggered<2,nFace,0>(face_idx,param.ghostFace[2],param.parity,param.X,param.R); packFaceStaggeredCore(param.out[4], param.outNorm[4], face_idx, nFace*param.ghostFace[2], param.in, param.inNorm, idx, param); } else { const int idx = indexFromFaceIndexExtendedStaggered<2,nFace,1>(face_idx,param.ghostFace[2],param.parity,param.X,param.R); packFaceStaggeredCore(param.out[5], param.outNorm[5], face_idx, nFace*param.ghostFace[2], param.in, param.inNorm, idx, param); } } else { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[3]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromFaceIndexExtendedStaggered<3,nFace,0>(face_idx,param.ghostFace[3],param.parity,param.X,param.R); packFaceStaggeredCore(param.out[6], param.outNorm[6], face_idx, nFace*param.ghostFace[3], param.in, param.inNorm,idx, param); } else { const int idx = indexFromFaceIndexExtendedStaggered<3,nFace,1>(face_idx,param.ghostFace[3],param.parity,param.X,param.R); packFaceStaggeredCore(param.out[7], param.outNorm[7], face_idx, nFace*param.ghostFace[3], param.in, param.inNorm, idx, param); } } } template <typename FloatN, int nFace> __global__ void unpackFaceExtendedStaggeredKernel(PackExtendedParam<FloatN> param) { int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor, spin-project, and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end // if param.face_num==2 pack both the start and the end, otherwist pack the region of the // lattice specified by param.face_num const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[0]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromFaceIndexExtendedStaggered<0,nFace,0>(face_idx,param.ghostFace[0],param.parity,param.X,param.R); packFaceStaggeredCore(param.in, param.inNorm, idx, param.sp_stride, param.out[0], param.outNorm[0], face_idx, nFace*param.ghostFace[0]); } else { const int idx = indexFromFaceIndexExtendedStaggered<0,nFace,1>(face_idx,param.ghostFace[0],param.parity,param.X,param.R); packFaceStaggeredCore(param.in, param.inNorm, idx, param.sp_stride, param.out[1], param.outNorm[1], face_idx, nFace*param.ghostFace[0]); } } else if (dim == 1) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[1]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromFaceIndexExtendedStaggered<1,nFace,0>(face_idx,param.ghostFace[1],param.parity,param.X,param.R); packFaceStaggeredCore(param.in, param.inNorm, idx, param.sp_stride, param.out[2], param.outNorm[2], face_idx, nFace*param.ghostFace[1]); } else { const int idx = indexFromFaceIndexExtendedStaggered<1,nFace,1>(face_idx,param.ghostFace[1],param.parity,param.X,param.R); packFaceStaggeredCore(param.in, param.inNorm, idx, param.sp_stride, param.out[3], param.outNorm[3], face_idx, nFace*param.ghostFace[1]); } } else if (dim == 2) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[2]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromFaceIndexExtendedStaggered<2,nFace,0>(face_idx,param.ghostFace[2],param.parity,param.X,param.R); packFaceStaggeredCore(param.in, param.inNorm, idx, param.sp_stride, param.out[4], param.outNorm[4], face_idx, nFace*param.ghostFace[2]); } else { const int idx = indexFromFaceIndexExtendedStaggered<2,nFace,1>(face_idx,param.ghostFace[2],param.parity,param.X,param.R); packFaceStaggeredCore(param.in, param.inNorm, idx, param.sp_stride, param.out[5], param.outNorm[5], face_idx, nFace*param.ghostFace[2]); } } else { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[3]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromFaceIndexExtendedStaggered<3,nFace,0>(face_idx,param.ghostFace[3],param.parity,param.X,param.R); packFaceStaggeredCore(param.in, param.inNorm, idx, param.sp_stride, param.out[6], param.outNorm[6], face_idx, nFace*param.ghostFace[3]); } else { const int idx = indexFromFaceIndexExtendedStaggered<3,nFace,1>(face_idx,param.ghostFace[3],param.parity,param.X,param.R); packFaceStaggeredCore(param.in, param.inNorm, idx, param.sp_stride, param.out[7], param.outNorm[7], face_idx, nFace*param.ghostFace[3]); } } } #undef SPINORTEXDOUBLE #undef SPINORTEXSINGLE #undef SPINORTEXHALF #endif // GPU_STAGGERED_DIRAC template <typename FloatN, typename Float> class PackFaceStaggered : public PackFace<FloatN, Float> { private: const int* R; // boundary dimensions for extended field const bool unpack; int inputPerSite() const { return 6; } // input is full spinor int outputPerSite() const { return 6; } // output is full spinor public: PackFaceStaggered(FloatN *faces, const cudaColorSpinorField *in, const int nFace, const int dagger, const int parity, const int dim, const int face_num, const int* R=NULL, const bool unpack=false) : PackFace<FloatN, Float>(faces, in, dagger, parity, nFace, dim, face_num), R(R), unpack(unpack) { } virtual ~PackFaceStaggered() { } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef GPU_STAGGERED_DIRAC PackParam<FloatN> param = this->prepareParam(this->dim, this->face_num); if(!R){ if (PackFace<FloatN,Float>::nFace==1) { hipLaunchKernelGGL(( packFaceStaggeredKernel<FloatN, 1>) , dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, param); } else { hipLaunchKernelGGL(( packFaceStaggeredKernel<FloatN, 3>) , dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, param); } }else{ // R!=NULL => this is an extended field PackExtendedParam<FloatN> extendedParam(param); if(!unpack){ for(int d=0; d<QUDA_MAX_DIM; ++d) extendedParam.R[d] = R[d]; switch(PackFace<FloatN,Float>::nFace){ case 1: hipLaunchKernelGGL(( packFaceExtendedStaggeredKernel<FloatN,1>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, extendedParam); break; case 2: hipLaunchKernelGGL(( packFaceExtendedStaggeredKernel<FloatN,2>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, extendedParam); break; case 3: hipLaunchKernelGGL(( packFaceExtendedStaggeredKernel<FloatN,3>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, extendedParam); break; case 4: hipLaunchKernelGGL(( packFaceExtendedStaggeredKernel<FloatN,4>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, extendedParam); break; default: errorQuda("Unsupported boundary width"); break; } }else{ // extended field unpack switch(PackFace<FloatN,Float>::nFace){ case 1: hipLaunchKernelGGL(( unpackFaceExtendedStaggeredKernel<FloatN,1>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, extendedParam); break; case 2: hipLaunchKernelGGL(( unpackFaceExtendedStaggeredKernel<FloatN,2>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, extendedParam); break; case 3: hipLaunchKernelGGL(( unpackFaceExtendedStaggeredKernel<FloatN,3>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, extendedParam); break; case 4: hipLaunchKernelGGL(( unpackFaceExtendedStaggeredKernel<FloatN,4>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, extendedParam); break; default: errorQuda("Unsupported boundary width"); break; } } } #else errorQuda("Staggered face packing kernel is not built"); #endif } long long flops() const { return 0; } }; void packFaceStaggered(void *ghost_buf, cudaColorSpinorField &in, int nFace, int dagger, int parity, const int dim, const int face_num, const hipStream_t &stream) { switch(in.Precision()) { case QUDA_DOUBLE_PRECISION: { PackFaceStaggered<double2, double> pack((double2*)ghost_buf, &in, nFace, dagger, parity, dim, face_num); pack.apply(stream); } break; case QUDA_SINGLE_PRECISION: { PackFaceStaggered<float2, float> pack((float2*)ghost_buf, &in, nFace, dagger, parity, dim, face_num); pack.apply(stream); } break; case QUDA_HALF_PRECISION: { PackFaceStaggered<short2, float> pack((short2*)ghost_buf, &in, nFace, dagger, parity, dim, face_num); pack.apply(stream); } break; } } void packFaceExtendedStaggered(void *buffer, cudaColorSpinorField &field, const int nFace, const int R[], int dagger, int parity, const int dim, const int face_num, const hipStream_t &stream, bool unpack=false) { switch(field.Precision()){ case QUDA_DOUBLE_PRECISION: { PackFaceStaggered<double2,double> pack(static_cast<double2*>(buffer), &field, nFace, dagger, parity, dim, face_num, R, unpack); pack.apply(stream); } break; case QUDA_SINGLE_PRECISION: { PackFaceStaggered<float2,float> pack(static_cast<float2*>(buffer), &field, nFace, dagger, parity, dim, face_num, R, unpack); pack.apply(stream); } break; case QUDA_HALF_PRECISION: { PackFaceStaggered<short2,float> pack(static_cast<short2*>(buffer), &field, nFace, dagger, parity, dim, face_num, R, unpack); pack.apply(stream); } break; } // switch(field.Precision()) } #ifdef GPU_DOMAIN_WALL_DIRAC template <int dagger, typename FloatN> __global__ void packFaceDWKernel(PackParam<FloatN> param) { const int nFace = 1; // 1 face for dwf int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); const int Ls = param.X[4]; // compute where the output is located // compute an index into the local volume from the index into the face // read spinor, spin-project, and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = beginning, 1 = end // FIXME these param.ghostFace constants do not incude the Ls dimension const int face_num = (face_idx >= nFace*Ls*param.ghostFace[0]) ? 1 : 0; face_idx -= face_num*nFace*Ls*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromDWFaceIndex<0,nFace,0>(face_idx,Ls*param.ghostFace[0],param.parity,param.X); packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[0], param); } else { const int idx = indexFromDWFaceIndex<0,nFace,1>(face_idx,Ls*param.ghostFace[0],param.parity,param.X); packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[0], param); } } else if (dim == 1) { const int face_num = (face_idx >= nFace*Ls*param.ghostFace[1]) ? 1 : 0; face_idx -= face_num*nFace*Ls*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromDWFaceIndex<1,nFace,0>(face_idx,Ls*param.ghostFace[1],param.parity,param.X); packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[1], param); } else { const int idx = indexFromDWFaceIndex<1,nFace,1>(face_idx,Ls*param.ghostFace[1],param.parity,param.X); packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[1], param); } } else if (dim == 2) { const int face_num = (face_idx >= nFace*Ls*param.ghostFace[2]) ? 1 : 0; face_idx -= face_num*nFace*Ls*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromDWFaceIndex<2,nFace,0>(face_idx,Ls*param.ghostFace[2],param.parity,param.X); packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[2], param); } else { const int idx = indexFromDWFaceIndex<2,nFace,1>(face_idx,Ls*param.ghostFace[2],param.parity,param.X); packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[2], param); } } else { const int face_num = (face_idx >= nFace*Ls*param.ghostFace[3]) ? 1 : 0; face_idx -= face_num*nFace*Ls*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromDWFaceIndex<3,nFace,0>(face_idx,Ls*param.ghostFace[3],param.parity,param.X); packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[3], param); } else { const int idx = indexFromDWFaceIndex<3,nFace,1>(face_idx,Ls*param.ghostFace[3],param.parity,param.X); packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[3], param); } } } template <int dagger, typename FloatN> __global__ void packFaceDW4DKernel(PackParam<FloatN> param) { const int nFace = 1; // 1 face for Wilson int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; const int Ls = param.X[4]; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor, spin-project, and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = beginning, 1 = end // FIXME these param.ghostFace constants do not incude the Ls dimension const int face_num = (face_idx >= nFace*Ls*param.ghostFace[0]) ? 1 : 0; face_idx -= face_num*nFace*Ls*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromDW4DFaceIndex<0,nFace,0>(face_idx,Ls*param.ghostFace[0],param.parity,param.X); packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[0], param); } else { const int idx = indexFromDW4DFaceIndex<0,nFace,1>(face_idx,Ls*param.ghostFace[0],param.parity,param.X); packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[0], param); } } else if (dim == 1) { const int face_num = (face_idx >= nFace*Ls*param.ghostFace[1]) ? 1 : 0; face_idx -= face_num*nFace*Ls*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromDW4DFaceIndex<1,nFace,0>(face_idx,Ls*param.ghostFace[1],param.parity,param.X); packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[1], param); } else { const int idx = indexFromDW4DFaceIndex<1,nFace,1>(face_idx,Ls*param.ghostFace[1],param.parity,param.X); packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[1], param); } } else if (dim == 2) { const int face_num = (face_idx >= nFace*Ls*param.ghostFace[2]) ? 1 : 0; face_idx -= face_num*nFace*Ls*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromDW4DFaceIndex<2,nFace,0>(face_idx,Ls*param.ghostFace[2],param.parity,param.X); packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[2], param); } else { const int idx = indexFromDW4DFaceIndex<2,nFace,1>(face_idx,Ls*param.ghostFace[2],param.parity,param.X); packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[2], param); } } else { const int face_num = (face_idx >= nFace*Ls*param.ghostFace[3]) ? 1 : 0; face_idx -= face_num*nFace*Ls*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromDW4DFaceIndex<3,nFace,0>(face_idx,Ls*param.ghostFace[3],param.parity,param.X); packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[3], param); } else { const int idx = indexFromDW4DFaceIndex<3,nFace,1>(face_idx,Ls*param.ghostFace[3],param.parity,param.X); packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[3], param); } } } #endif template <typename FloatN, typename Float> class PackFaceDW : public PackFace<FloatN, Float> { private: int inputPerSite() const { return 24; } // input is full spinor int outputPerSite() const { return 12; } // output is spin projected public: PackFaceDW(FloatN *faces, const cudaColorSpinorField *in, const int dagger, const int parity) : PackFace<FloatN, Float>(faces, in, dagger, parity, 1) { } virtual ~PackFaceDW() { } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef GPU_DOMAIN_WALL_DIRAC PackParam<FloatN> param = this->prepareParam(); if (this->dagger) { hipLaunchKernelGGL(( packFaceDWKernel<1>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, param); } else { hipLaunchKernelGGL(( packFaceDWKernel<0>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, param); } #else errorQuda("DW face packing kernel is not built"); #endif } long long flops() const { return outputPerSite()*this->threads(); } }; template <typename FloatN, typename Float> class PackFaceDW4D : public PackFace<FloatN, Float> { private: int inputPerSite() const { return 24; } // input is full spinor int outputPerSite() const { return 12; } // output is spin projected public: PackFaceDW4D(FloatN *faces, const cudaColorSpinorField *in, const int dagger, const int parity) : PackFace<FloatN, Float>(faces, in, dagger, parity, 1) { } virtual ~PackFaceDW4D() { } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef GPU_DOMAIN_WALL_DIRAC PackParam<FloatN> param = this->prepareParam(); if (this->dagger) { hipLaunchKernelGGL(( packFaceDW4DKernel<1>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, param); } else { hipLaunchKernelGGL(( packFaceDW4DKernel<0>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, param); } #else errorQuda("4D preconditioned DW face packing kernel is not built"); #endif } long long flops() const { return outputPerSite()*this->threads(); } }; void packFaceDW(void *ghost_buf, cudaColorSpinorField &in, const int dagger, const int parity, const hipStream_t &stream) { if(in.DWFPCtype() == QUDA_4D_PC) { switch(in.Precision()) { case QUDA_DOUBLE_PRECISION: { PackFaceDW4D<double2, double> pack((double2*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; case QUDA_SINGLE_PRECISION: { PackFaceDW4D<float4, float> pack((float4*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; case QUDA_HALF_PRECISION: { PackFaceDW4D<short4, float> pack((short4*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; } } else { switch(in.Precision()) { case QUDA_DOUBLE_PRECISION: { PackFaceDW<double2, double> pack((double2*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; case QUDA_SINGLE_PRECISION: { PackFaceDW<float4, float> pack((float4*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; case QUDA_HALF_PRECISION: { PackFaceDW<short4, float> pack((short4*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; } } } #ifdef GPU_NDEG_TWISTED_MASS_DIRAC template <int dagger, typename FloatN> __global__ void packFaceNdegTMKernel(PackParam<FloatN> param) { const int nFace = 1; // 1 face for Wilson const int Nf = 2; int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor, spin-project, and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = beginning, 1 = end // FIXME these param.ghostFace constants do not include the Nf dimension const int face_num = (face_idx >= nFace*Nf*param.ghostFace[0]) ? 1 : 0; face_idx -= face_num*nFace*Nf*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromNdegTMFaceIndex<0,nFace,0>(face_idx,Nf*param.ghostFace[0],param.parity,param.X); packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in, param.inNorm, idx, face_idx, Nf*param.ghostFace[0], param); } else { const int idx = indexFromNdegTMFaceIndex<0,nFace,1>(face_idx,Nf*param.ghostFace[0],param.parity,param.X); packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in, param.inNorm, idx, face_idx, Nf*param.ghostFace[0], param); } } else if (dim == 1) { const int face_num = (face_idx >= nFace*Nf*param.ghostFace[1]) ? 1 : 0; face_idx -= face_num*nFace*Nf*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromNdegTMFaceIndex<1,nFace,0>(face_idx,Nf*param.ghostFace[1],param.parity,param.X); packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in, param.inNorm, idx, face_idx, Nf*param.ghostFace[1], param); } else { const int idx = indexFromNdegTMFaceIndex<1,nFace,1>(face_idx,Nf*param.ghostFace[1],param.parity,param.X); packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in, param.inNorm, idx, face_idx, Nf*param.ghostFace[1], param); } } else if (dim == 2) { const int face_num = (face_idx >= nFace*Nf*param.ghostFace[2]) ? 1 : 0; face_idx -= face_num*nFace*Nf*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromNdegTMFaceIndex<2,nFace,0>(face_idx,Nf*param.ghostFace[2],param.parity,param.X); packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in, param.inNorm, idx, face_idx, Nf*param.ghostFace[2], param); } else { const int idx = indexFromNdegTMFaceIndex<2,nFace,1>(face_idx,Nf*param.ghostFace[2],param.parity,param.X); packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in, param.inNorm, idx, face_idx, Nf*param.ghostFace[2], param); } } else { const int face_num = (face_idx >= nFace*Nf*param.ghostFace[3]) ? 1 : 0; face_idx -= face_num*nFace*Nf*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromNdegTMFaceIndex<3,nFace,0>(face_idx,Nf*param.ghostFace[3],param.parity,param.X); packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in, param.inNorm, idx, face_idx, Nf*param.ghostFace[3], param); } else { const int idx = indexFromNdegTMFaceIndex<3,nFace,1>(face_idx,Nf*param.ghostFace[3],param.parity,param.X); packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in, param.inNorm, idx, face_idx, Nf*param.ghostFace[3], param); } } } #endif template <typename FloatN, typename Float> class PackFaceNdegTM : public PackFace<FloatN, Float> { private: int inputPerSite() const { return 24; } // input is full spinor int outputPerSite() const { return 12; } // output is spin projected public: PackFaceNdegTM(FloatN *faces, const cudaColorSpinorField *in, const int dagger, const int parity) : PackFace<FloatN, Float>(faces, in, dagger, parity, 1) { } virtual ~PackFaceNdegTM() { } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef GPU_NDEG_TWISTED_MASS_DIRAC PackParam<FloatN> param = this->prepareParam(); if (this->dagger) { hipLaunchKernelGGL(( packFaceNdegTMKernel<1>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, param); } else { hipLaunchKernelGGL(( packFaceNdegTMKernel<0>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, param); } #else errorQuda("Non-degenerate twisted mass face packing kernel is not built"); #endif } long long flops() const { return outputPerSite()*this->threads(); } }; void packFaceNdegTM(void *ghost_buf, cudaColorSpinorField &in, const int dagger, const int parity, const hipStream_t &stream) { switch(in.Precision()) { case QUDA_DOUBLE_PRECISION: { PackFaceNdegTM<double2, double> pack((double2*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; case QUDA_SINGLE_PRECISION: { PackFaceNdegTM<float4, float> pack((float4*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; case QUDA_HALF_PRECISION: { PackFaceNdegTM<short4, float> pack((short4*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; } } void packFace(void *ghost_buf, cudaColorSpinorField &in, const int nFace, const int dagger, const int parity, const int dim, const int face_num, const hipStream_t &stream, const double a, const double b) { int nDimPack = 0; if(dim < 0){ for (int d=0; d<4; d++) { if(!commDim[d]) continue; if (d != 3 || getKernelPackT() || a != 0.0 || b!= 0.0) nDimPack++; } }else{ if(commDim[dim]){ if(dim!=3 || getKernelPackT() || a!=0.0 || b != 0.0) nDimPack++; } } if (!nDimPack) return; // if zero then we have nothing to pack if (nFace != 1 && in.Nspin() != 1) errorQuda("Unsupported number of faces %d", nFace); // Need to update this logic for other multi-src dslash packing if (in.Nspin() == 1) { packFaceStaggered(ghost_buf, in, nFace, dagger, parity, dim, face_num, stream); } else if (a!=0.0 || b!=0.0) { // Need to update this logic for other multi-src dslash packing if(in.TwistFlavor() == QUDA_TWIST_PLUS || in.TwistFlavor() == QUDA_TWIST_MINUS) { packTwistedFaceWilson(ghost_buf, in, dagger, parity, a, b, stream); } else { errorQuda("Cannot perform twisted packing for the spinor."); } } else if (in.Ndim() == 5) { if(in.TwistFlavor() == QUDA_TWIST_INVALID) { packFaceDW(ghost_buf, in, dagger, parity, stream); } else { packFaceNdegTM(ghost_buf, in, dagger, parity, stream); } } else { packFaceWilson(ghost_buf, in, dagger, parity, stream); } } void packFaceExtended(void* buffer, cudaColorSpinorField &field, const int nFace, const int R[], const int dagger, const int parity, const int dim, const int face_num, const hipStream_t &stream, const bool unpack) { int nDimPack = 0; if(dim < 0){ for(int d=0; d<4; d++){ if(R[d]) nDimPack++; } }else{ if(R[dim]) nDimPack++; } if(!nDimPack) return; // if zero then we have nothing to pack if(field.Nspin() == 1){ packFaceExtendedStaggered(buffer, field, nFace, R, dagger, parity, dim, face_num, stream, unpack); }else{ errorQuda("Extended quark field is not supported"); } } #endif // MULTI_GPU }
e49097a86c3d4461874cd6f91c193805589feb98.cu
#include <cstdlib> #include <cstdio> #include <string> #include <iostream> #include <color_spinor_field.h> #include <clover_field.h> // Do we need this now? // these control the Wilson-type actions #ifdef GPU_WILSON_DIRAC //#define DIRECT_ACCESS_WILSON_PACK_SPINOR #endif // GPU_WILSON_DIRAC #include <quda_internal.h> #include <dslash_quda.h> #include <sys/time.h> #include <blas_quda.h> #include <inline_ptx.h> namespace quda { namespace pack { #include <dslash_constants.h> #include <dslash_textures.h> } // end namespace pack using namespace pack; #ifdef MULTI_GPU static int commDim[QUDA_MAX_DIM]; // Whether to do comms or not void setPackComms(const int *comm_dim) { for (int i=0; i<QUDA_MAX_DIM; i++) commDim[i] = comm_dim[i]; } #else void setPackComms(const int *comm_dim) { ; } #endif #include <dslash_index.cuh> // routines for packing the ghost zones (multi-GPU only) #ifdef MULTI_GPU template <typename FloatN> struct PackParam { FloatN *out[2*4]; float *outNorm[2*4]; FloatN *in; float *inNorm; int threads; // total number of threads // offsets which determine thread mapping to dimension int threadDimMapLower[4]; // lowest thread which maps to dim int threadDimMapUpper[4]; // greatest thread + 1 which maps to dim int parity; #ifdef USE_TEXTURE_OBJECTS cudaTextureObject_t inTex; cudaTextureObject_t inTexNorm; #endif int dim; int face_num; int X[QUDA_MAX_DIM]; // lattice dimensions int ghostFace[4]; int sp_stride; }; template<typename FloatN> std::ostream& operator<<(std::ostream& output, const PackParam<FloatN>& param) { output << "threads = " << param.threads << std::endl; output << "threadDimMapLower = {" << param.threadDimMapLower[0] << "," << param.threadDimMapLower[1] << "," << param.threadDimMapLower[2] << "," << param.threadDimMapLower[3] << "}" << std::endl; output << "threadDimMapUpper = {" << param.threadDimMapUpper[0] << "," << param.threadDimMapUpper[1] << "," << param.threadDimMapUpper[2] << "," << param.threadDimMapUpper[3] << "}" << std::endl; output << "parity = " << param.parity << std::endl; output << "dim = " << param.dim << std::endl; output << "face_num = " << param.face_num << std::endl; output << "X = {" << param.X[0] << ","<< param.X[1] << "," << param.X[2] << "," << param.X[3] << "}" << std::endl; output << "ghostFace = {" << param.ghostFace[0] << ","<< param.ghostFace[1] << "," << param.ghostFace[2] << "," << param.ghostFace[3] << "}" << std::endl; output << "sp_stride = " << param.sp_stride << std::endl; return output; } // Extend the PackParam class to PackExtendedParam template<typename Float> struct PackExtendedParam : public PackParam<Float> { PackExtendedParam(){} PackExtendedParam(const PackParam<Float>& base) : PackParam<Float>(base) {} int R[QUDA_MAX_DIM]; // boundary dimensions }; /** * Determines which face a given thread is computing. Also rescale * face_idx so that is relative to a given dimension. */ /* template <typename Param> __device__ inline int dimFromFaceIndex (int &face_idx, const Param &param) { if (face_idx < param.threadDimMapUpper[0]) { return 0; } else if (face_idx < param.threadDimMapUpper[1]) { face_idx -= param.threadDimMapLower[1]; return 1; } else if (face_idx < param.threadDimMapUpper[2]) { face_idx -= param.threadDimMapLower[2]; return 2; } else { // this is only called if we use T kernel packing face_idx -= param.threadDimMapLower[3]; return 3; } } */ #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) // double precision #if (defined DIRECT_ACCESS_WILSON_PACK_SPINOR) || (defined FERMI_NO_DBLE_TEX) #define READ_SPINOR READ_SPINOR_DOUBLE #define READ_SPINOR_UP READ_SPINOR_DOUBLE_UP #define READ_SPINOR_DOWN READ_SPINOR_DOUBLE_DOWN #define SPINORTEX in #else #define READ_SPINOR READ_SPINOR_DOUBLE_TEX #define READ_SPINOR_UP READ_SPINOR_DOUBLE_UP_TEX #define READ_SPINOR_DOWN READ_SPINOR_DOUBLE_DOWN_TEX #ifdef USE_TEXTURE_OBJECTS #define SPINORTEX param.inTex #else #define SPINORTEX spinorTexDouble #endif #endif #define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_DOUBLE2 #define SPINOR_DOUBLE template <int dim, int dagger, int face_num> static inline __device__ void packFaceWilsonCore(double2 *out, float *outNorm, const double2 *in, const float *inNorm, const int &idx, const int &face_idx, const int &face_volume, PackParam<double2> &param) { if (dagger) { #include "wilson_pack_face_dagger_core.h" } else { #include "wilson_pack_face_core.h" } } template <int dim, int dagger, int face_num> static inline __device__ void unpackFaceWilsonCore(double2 *out, float *outNorm, const double2 *in, const float *inNorm, const int &idx, const int &face_idx, const int &face_volume, PackParam<double2> &param) { if (dagger) { #include "wilson_pack_face_dagger_core.h" } else { #include "wilson_pack_face_core.h" } } #undef READ_SPINOR #undef READ_SPINOR #undef READ_SPINOR_UP #undef READ_SPINOR_DOWN #undef SPINORTEX #undef WRITE_HALF_SPINOR #undef SPINOR_DOUBLE // single precision #ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR #define READ_SPINOR READ_SPINOR_SINGLE #define READ_SPINOR_UP READ_SPINOR_SINGLE_UP #define READ_SPINOR_DOWN READ_SPINOR_SINGLE_DOWN #define SPINORTEX in #else #define READ_SPINOR READ_SPINOR_SINGLE_TEX #define READ_SPINOR_UP READ_SPINOR_SINGLE_UP_TEX #define READ_SPINOR_DOWN READ_SPINOR_SINGLE_DOWN_TEX #ifdef USE_TEXTURE_OBJECTS #define SPINORTEX param.inTex #else #define SPINORTEX spinorTexSingle #endif #endif #define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_FLOAT4 template <int dim, int dagger, int face_num> static inline __device__ void packFaceWilsonCore(float4 *out, float *outNorm, const float4 *in, const float *inNorm, const int &idx, const int &face_idx, const int &face_volume, const PackParam<float4> &param) { if (dagger) { #include "wilson_pack_face_dagger_core.h" } else { #include "wilson_pack_face_core.h" } } template <int dim, int dagger, int face_num> static inline __device__ void unpackFaceWilsonCore(float4 *out, float *outNorm, const float4 *in, const float *inNorm, const int &idx, const int &face_idx, const int &face_volume, const PackParam<float4> &param) { if (dagger) { #include "wilson_pack_face_dagger_core.h" } else { #include "wilson_pack_face_core.h" } } #undef READ_SPINOR #undef READ_SPINOR_UP #undef READ_SPINOR_DOWN #undef SPINORTEX #undef WRITE_HALF_SPINOR // half precision #ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR #define READ_SPINOR READ_SPINOR_HALF #define READ_SPINOR_UP READ_SPINOR_HALF_UP #define READ_SPINOR_DOWN READ_SPINOR_HALF_DOWN #define SPINORTEX in #else #define READ_SPINOR READ_SPINOR_HALF_TEX #define READ_SPINOR_UP READ_SPINOR_HALF_UP_TEX #define READ_SPINOR_DOWN READ_SPINOR_HALF_DOWN_TEX #ifdef USE_TEXTURE_OBJECTS #define SPINORTEX param.inTex #else #define SPINORTEX spinorTexHalf #endif #endif #define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_SHORT4 template <int dim, int dagger, int face_num> static inline __device__ void packFaceWilsonCore(short4 *out, float *outNorm, const short4 *in, const float *inNorm, const int &idx, const int &face_idx, const int &face_volume, const PackParam<short4> &param) { if (dagger) { #include "wilson_pack_face_dagger_core.h" } else { #include "wilson_pack_face_core.h" } } template <int dim, int dagger, int face_num> static inline __device__ void unpackFaceWilsonCore(short4 *out, float *outNorm, const short4 *in, const float *inNorm, const int &idx, const int &face_idx, const int &face_volume, const PackParam<short4> &param) { if (dagger) { #include "wilson_pack_face_dagger_core.h" } else { #include "wilson_pack_face_core.h" } } #undef READ_SPINOR #undef READ_SPINOR_UP #undef READ_SPINOR_DOWN #undef SPINORTEX #undef WRITE_HALF_SPINOR template <int dagger, typename FloatN> __global__ void packFaceWilsonKernel(PackParam<FloatN> param) { const int nFace = 1; // 1 face for Wilson int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor, spin-project, and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end const int face_num = (face_idx >= nFace*param.ghostFace[0]) ? 1 : 0; face_idx -= face_num*nFace*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromFaceIndex<0,nFace,0>(face_idx,param.ghostFace[0],param.parity,param.X); packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in, param.inNorm,idx, face_idx, param.ghostFace[0], param); } else { const int idx = indexFromFaceIndex<0,nFace,1>(face_idx,param.ghostFace[0],param.parity,param.X); packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in, param.inNorm,idx, face_idx, param.ghostFace[0], param); } } else if (dim == 1) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end const int face_num = (face_idx >= nFace*param.ghostFace[1]) ? 1 : 0; face_idx -= face_num*nFace*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromFaceIndex<1,nFace,0>(face_idx,param.ghostFace[1],param.parity,param.X); packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in, param.inNorm,idx, face_idx, param.ghostFace[1], param); } else { const int idx = indexFromFaceIndex<1,nFace,1>(face_idx,param.ghostFace[1],param.parity,param.X); packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in, param.inNorm,idx, face_idx, param.ghostFace[1], param); } } else if (dim == 2) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end const int face_num = (face_idx >= nFace*param.ghostFace[2]) ? 1 : 0; face_idx -= face_num*nFace*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromFaceIndex<2,nFace,0>(face_idx,param.ghostFace[2],param.parity,param.X); packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in, param.inNorm,idx, face_idx, param.ghostFace[2], param); } else { const int idx = indexFromFaceIndex<2,nFace,1>(face_idx,param.ghostFace[2],param.parity,param.X); packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in, param.inNorm,idx, face_idx, param.ghostFace[2], param); } } else { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end const int face_num = (face_idx >= nFace*param.ghostFace[3]) ? 1 : 0; face_idx -= face_num*nFace*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromFaceIndex<3,nFace,0>(face_idx,param.ghostFace[3],param.parity,param.X); packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in, param.inNorm,idx, face_idx, param.ghostFace[3], param); } else { const int idx = indexFromFaceIndex<3,nFace,1>(face_idx,param.ghostFace[3],param.parity,param.X); packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in, param.inNorm,idx, face_idx, param.ghostFace[3], param); } } } template <int dagger, typename FloatN, int nFace> __global__ void packFaceExtendedWilsonKernel(PackParam<FloatN> param) { int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor, spin-project, and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end // if param.face_num==2 pack both the start and the end, otherwise pack the region of the lattice // specified by param.face_num const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[0]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromFaceIndexExtended<0,nFace,0>(face_idx,param.ghostFace[0],param.parity,param.X,param.R); packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in, param.inNorm,idx, face_idx, param.ghostFace[0], param); } else { const int idx = indexFromFaceIndexExtended<0,nFace,1>(face_idx,param.ghostFace[0],param.parity,param.X,param.R); packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in, param.inNorm,idx, face_idx, param.ghostFace[0], param); } } else if (dim == 1) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[1]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromFaceIndexExtended<1,nFace,0>(face_idx,param.ghostFace[1],param.parity,param.X,param.R); packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in, param.inNorm,idx, face_idx, param.ghostFace[1], param); } else { const int idx = indexFromFaceIndexExtended<1,nFace,1>(face_idx,param.ghostFace[1],param.parity,param.X,param.R); packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in, param.inNorm,idx, face_idx, param.ghostFace[1], param); } } else if (dim == 2) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[2]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromFaceIndexExtended<2,nFace,0>(face_idx,param.ghostFace[2],param.parity,param.X,param.R); packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in, param.inNorm,idx, face_idx, param.ghostFace[2], param); } else { const int idx = indexFromFaceIndexExtended<2,nFace,1>(face_idx,param.ghostFace[2],param.parity,param.X,param.R); packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in, param.inNorm,idx, face_idx, param.ghostFace[2], param); } } else { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[3]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromFaceIndexExtended<3,nFace,0>(face_idx,param.ghostFace[3],param.parity,param.X,param.R); packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in, param.inNorm,idx, face_idx, param.ghostFace[3], param); } else { const int idx = indexFromFaceIndexExtended<3,nFace,1>(face_idx,param.ghostFace[3],param.parity,param.X,param.R); packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in, param.inNorm,idx, face_idx, param.ghostFace[3], param); } } } template <int dagger, typename FloatN, int nFace> __global__ void unpackFaceExtendedWilsonKernel(PackParam<FloatN> param) { int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor, spin-project, and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end // if param.face_num==2 pack both the start and the end, otherwise pack the region of the lattice // specified by param.face_num const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[0]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromFaceIndexExtended<0,nFace,0>(face_idx,param.ghostFace[0],param.parity,param.X,param.R); unpackFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in, param.inNorm,idx, face_idx, param.ghostFace[0], param); } else { const int idx = indexFromFaceIndexExtended<0,nFace,1>(face_idx,param.ghostFace[0],param.parity,param.X,param.R); unpackFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in, param.inNorm,idx, face_idx, param.ghostFace[0], param); } } else if (dim == 1) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[1]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromFaceIndexExtended<1,nFace,0>(face_idx,param.ghostFace[1],param.parity,param.X,param.R); unpackFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in, param.inNorm,idx, face_idx, param.ghostFace[1], param); } else { const int idx = indexFromFaceIndexExtended<1,nFace,1>(face_idx,param.ghostFace[1],param.parity,param.X,param.R); unpackFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in, param.inNorm,idx, face_idx, param.ghostFace[1], param); } } else if (dim == 2) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[2]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromFaceIndexExtended<2,nFace,0>(face_idx,param.ghostFace[2],param.parity,param.X,param.R); unpackFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in, param.inNorm,idx, face_idx, param.ghostFace[2], param); } else { const int idx = indexFromFaceIndexExtended<2,nFace,1>(face_idx,param.ghostFace[2],param.parity,param.X,param.R); unpackFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in, param.inNorm,idx, face_idx, param.ghostFace[2], param); } } else { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[3]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromFaceIndexExtended<3,nFace,0>(face_idx,param.ghostFace[3],param.parity,param.X,param.R); unpackFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in, param.inNorm,idx, face_idx, param.ghostFace[3], param); } else { const int idx = indexFromFaceIndexExtended<3,nFace,1>(face_idx,param.ghostFace[3],param.parity,param.X,param.R); unpackFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in, param.inNorm,idx, face_idx, param.ghostFace[3], param); } } } #endif // GPU_WILSON_DIRAC || GPU_DOMAIN_WALL_DIRAC #if defined(GPU_WILSON_DIRAC) || defined(GPU_TWISTED_MASS_DIRAC) #endif // GPU_WILSON_DIRAC || GPU_DOMAIN_WALL_DIRAC #if defined(GPU_WILSON_DIRAC) || defined(GPU_TWISTED_MASS_DIRAC) // double precision #endif // GPU_WILSON_DIRAC || GPU_DOMAIN_WALL_DIRAC #if defined(GPU_WILSON_DIRAC) || defined(GPU_TWISTED_MASS_DIRAC) // double precision #if (defined DIRECT_ACCESS_WILSON_PACK_SPINOR) || (defined FERMI_NO_DBLE_TEX) #define READ_SPINOR READ_SPINOR_DOUBLE #define READ_SPINOR_UP READ_SPINOR_DOUBLE_UP #define READ_SPINOR_DOWN READ_SPINOR_DOUBLE_DOWN #define SPINORTEX in #else #define READ_SPINOR READ_SPINOR_DOUBLE_TEX #define READ_SPINOR_UP READ_SPINOR_DOUBLE_UP_TEX #define READ_SPINOR_DOWN READ_SPINOR_DOUBLE_DOWN_TEX #ifdef USE_TEXTURE_OBJECTS #define SPINORTEX param.inTex #else #define SPINORTEX spinorTexDouble #endif #endif #define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_DOUBLE2 #define SPINOR_DOUBLE template <int dim, int dagger, int face_num> static inline __device__ void packTwistedFaceWilsonCore(double2 *out, float *outNorm, const double2 *in, const float *inNorm, double a, double b, const int &idx, const int &face_idx, const int &face_volume, PackParam<double2> &param) { if (dagger) { #include "wilson_pack_twisted_face_dagger_core.h" } else { #include "wilson_pack_twisted_face_core.h" } } #undef READ_SPINOR #undef READ_SPINOR_UP #undef READ_SPINOR_DOWN #undef SPINORTEX #undef WRITE_HALF_SPINOR #undef SPINOR_DOUBLE // single precision #ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR #define READ_SPINOR READ_SPINOR_SINGLE #define READ_SPINOR_UP READ_SPINOR_SINGLE_UP #define READ_SPINOR_DOWN READ_SPINOR_SINGLE_DOWN #define SPINORTEX in #else #define READ_SPINOR READ_SPINOR_SINGLE_TEX #define READ_SPINOR_UP READ_SPINOR_SINGLE_UP_TEX #define READ_SPINOR_DOWN READ_SPINOR_SINGLE_DOWN_TEX #ifdef USE_TEXTURE_OBJECTS #define SPINORTEX param.inTex #else #define SPINORTEX spinorTexSingle #endif #endif #define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_FLOAT4 template <int dim, int dagger, int face_num> static inline __device__ void packTwistedFaceWilsonCore(float4 *out, float *outNorm, const float4 *in, const float *inNorm, float a, float b, const int &idx, const int &face_idx, const int &face_volume, const PackParam<float4> &param) { if (dagger) { #include "wilson_pack_twisted_face_dagger_core.h" } else { #include "wilson_pack_twisted_face_core.h" } } #undef READ_SPINOR #undef READ_SPINOR_UP #undef READ_SPINOR_DOWN #undef SPINORTEX #undef WRITE_HALF_SPINOR // half precision #ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR #define READ_SPINOR READ_SPINOR_HALF #define READ_SPINOR_UP READ_SPINOR_HALF_UP #define READ_SPINOR_DOWN READ_SPINOR_HALF_DOWN #define SPINORTEX in #else #define READ_SPINOR READ_SPINOR_HALF_TEX #define READ_SPINOR_UP READ_SPINOR_HALF_UP_TEX #define READ_SPINOR_DOWN READ_SPINOR_HALF_DOWN_TEX #ifdef USE_TEXTURE_OBJECTS #define SPINORTEX param.inTex #else #define SPINORTEX spinorTexHalf #endif #endif #define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_SHORT4 template <int dim, int dagger, int face_num> static inline __device__ void packTwistedFaceWilsonCore(short4 *out, float *outNorm, const short4 *in, const float *inNorm, float a, float b, const int &idx, const int &face_idx, const int &face_volume, const PackParam<short4> &param) { if (dagger) { #include "wilson_pack_twisted_face_dagger_core.h" } else { #include "wilson_pack_twisted_face_core.h" } } #undef READ_SPINOR #undef READ_SPINOR_UP #undef READ_SPINOR_DOWN #undef SPINORTEX #undef WRITE_HALF_SPINOR template <int dagger, typename FloatN, typename Float> __global__ void packTwistedFaceWilsonKernel(Float a, Float b, PackParam<FloatN> param) { const int nFace = 1; // 1 face for Wilson int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor, spin-project, and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end const int face_num = (face_idx >= nFace*param.ghostFace[0]) ? 1 : 0; face_idx -= face_num*nFace*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromFaceIndex<0,nFace,0>(face_idx,param.ghostFace[0],param.parity,param.X); packTwistedFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in, param.inNorm, a, b, idx, face_idx, param.ghostFace[0], param); } else { const int idx = indexFromFaceIndex<0,nFace,1>(face_idx,param.ghostFace[0],param.parity,param.X); packTwistedFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in, param.inNorm, a, b, idx, face_idx, param.ghostFace[0], param); } } else if (dim == 1) { const int face_num = (face_idx >= nFace*param.ghostFace[1]) ? 1 : 0; face_idx -= face_num*nFace*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromFaceIndex<1,nFace,0>(face_idx,param.ghostFace[1],param.parity,param.X); packTwistedFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in, param.inNorm, a, b, idx, face_idx, param.ghostFace[1], param); } else { const int idx = indexFromFaceIndex<1,nFace,1>(face_idx,param.ghostFace[1],param.parity,param.X); packTwistedFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in, param.inNorm, a, b, idx, face_idx, param.ghostFace[1], param); } } else if (dim == 2) { const int face_num = (face_idx >= nFace*param.ghostFace[2]) ? 1 : 0; face_idx -= face_num*nFace*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromFaceIndex<2,nFace,0>(face_idx,param.ghostFace[2],param.parity,param.X); packTwistedFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in, param.inNorm, a, b, idx, face_idx, param.ghostFace[2], param); } else { const int idx = indexFromFaceIndex<2,nFace,1>(face_idx,param.ghostFace[2],param.parity,param.X); packTwistedFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in, param.inNorm, a, b, idx, face_idx, param.ghostFace[2], param); } } else { const int face_num = (face_idx >= nFace*param.ghostFace[3]) ? 1 : 0; face_idx -= face_num*nFace*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromFaceIndex<3,nFace,0>(face_idx,param.ghostFace[3],param.parity,param.X); packTwistedFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in, param.inNorm, a, b,idx, face_idx, param.ghostFace[3], param); } else { const int idx = indexFromFaceIndex<3,nFace,1>(face_idx,param.ghostFace[3],param.parity,param.X); packTwistedFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in, param.inNorm, a, b, idx, face_idx, param.ghostFace[3], param); } } } #endif // GPU_TWISTED_MASS_DIRAC template <typename FloatN, typename Float> class PackFace : public Tunable { protected: FloatN *faces; const cudaColorSpinorField *in; const int dagger; const int parity; const int nFace; const int dim; const int face_num; // compute how many threads we need in total for the face packing unsigned int threads() const { unsigned int threads = 0; if(dim < 0){ // if dim is negative, pack all dimensions for (int i=0; i<4; i++) { if (!commDim[i]) continue; if ((i==3 && !(getKernelPackT() || getTwistPack()))) continue; threads += 2*nFace*in->GhostFace()[i]; // 2 for forwards and backwards faces } }else{ // pack only in dim dimension if(commDim[dim] && dim!=3 || (getKernelPackT() || getTwistPack())){ threads = nFace*in->GhostFace()[dim]; if(face_num==2) threads *= 2; // sending data forwards and backwards } } return threads; } virtual int inputPerSite() const = 0; virtual int outputPerSite() const = 0; // prepare the param struct with kernel arguments PackParam<FloatN> prepareParam(int dim=-1, int face_num=2) { PackParam<FloatN> param; param.in = (FloatN*)in->V(); param.inNorm = (float*)in->Norm(); param.dim = dim; param.face_num = face_num; param.parity = parity; for(int d=0; d<QUDA_MAX_DIM; d++) param.X[d] = in->X()[d]; param.X[0] *= 2; #ifdef USE_TEXTURE_OBJECTS param.inTex = in->Tex(); param.inTexNorm = in->TexNorm(); #endif param.threads = threads(); param.sp_stride = in->Stride(); int prev = -1; // previous dimension that was partitioned for (int i=0; i<4; i++) { param.threadDimMapLower[i] = 0; param.threadDimMapUpper[i] = 0; if (!commDim[i]) continue; param.threadDimMapLower[i] = (prev>=0 ? param.threadDimMapUpper[prev] : 0); param.threadDimMapUpper[i] = param.threadDimMapLower[i] + 2*nFace*in->GhostFace()[i]; size_t faceBytes = nFace*outputPerSite()*in->GhostFace()[i]*sizeof(faces->x); if (typeid(FloatN) == typeid(short4) || typeid(FloatN) == typeid(short2)) { faceBytes += nFace*in->GhostFace()[i]*sizeof(float); param.out[2*i] = (FloatN*)((char*)faces + (outputPerSite()*sizeof(faces->x) + sizeof(float))*param.threadDimMapLower[i]); param.outNorm[2*i] = (float*)((char*)param.out[2*i] + nFace*outputPerSite()*in->GhostFace()[i]*sizeof(faces->x)); } else { param.out[2*i] = (FloatN*)((char*)faces+outputPerSite()*sizeof(faces->x)*param.threadDimMapLower[i]); } param.out[2*i+1] = (FloatN*)((char*)param.out[2*i] + faceBytes); param.outNorm[2*i+1] = (float*)((char*)param.outNorm[2*i] + faceBytes); prev=i; } param.ghostFace[0] = param.X[1]*param.X[2]*param.X[3]/2; param.ghostFace[1] = param.X[0]*param.X[2]*param.X[3]/2; param.ghostFace[2] = param.X[0]*param.X[1]*param.X[3]/2; param.ghostFace[3] = param.X[0]*param.X[1]*param.X[2]/2; return param; } unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return threads(); } void fillAux() { strcpy(aux, in->AuxString()); char comm[5]; comm[0] = (commDim[0] ? '1' : '0'); comm[1] = (commDim[1] ? '1' : '0'); comm[2] = (commDim[2] ? '1' : '0'); comm[3] = (commDim[3] ? '1' : '0'); comm[4] = '\0'; strcat(aux,",comm="); strcat(aux,comm); if (getKernelPackT() || getTwistPack()) { strcat(aux,",kernelPackT"); } switch (nFace) { case 1: strcat(aux,",nFace=1"); break; case 3: strcat(aux,",nFace=3"); break; default: errorQuda("Number of faces not supported"); } } public: PackFace(FloatN *faces, const cudaColorSpinorField *in, const int dagger, const int parity, const int nFace, const int dim=-1, const int face_num=2) : faces(faces), in(in), dagger(dagger), parity(parity), nFace(nFace), dim(dim), face_num(face_num) { fillAux(); bindSpinorTex<FloatN>(in); } virtual ~PackFace() { unbindSpinorTex<FloatN>(in); } virtual int tuningIter() const { return 3; } virtual TuneKey tuneKey() const { return TuneKey(in->VolString(), typeid(*this).name(), aux); } virtual void apply(const cudaStream_t &stream) = 0; long long bytes() const { size_t faceBytes = (inputPerSite() + outputPerSite())*this->threads()*sizeof(((FloatN*)0)->x); if (sizeof(((FloatN*)0)->x) == QUDA_HALF_PRECISION) faceBytes += 2*this->threads()*sizeof(float); // 2 is from input and output return faceBytes; } }; template <typename FloatN, typename Float> class PackFaceWilson : public PackFace<FloatN, Float> { private: int inputPerSite() const { return 24; } // input is full spinor int outputPerSite() const { return 12; } // output is spin projected public: PackFaceWilson(FloatN *faces, const cudaColorSpinorField *in, const int dagger, const int parity) : PackFace<FloatN, Float>(faces, in, dagger, parity, 1) { } virtual ~PackFaceWilson() { } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef GPU_WILSON_DIRAC PackParam<FloatN> param = this->prepareParam(); if (this->dagger) { packFaceWilsonKernel<1><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(param); } else { packFaceWilsonKernel<0><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(param); } #else errorQuda("Wilson face packing kernel is not built"); #endif } long long flops() const { return outputPerSite()*this->threads(); } }; void packFaceWilson(void *ghost_buf, cudaColorSpinorField &in, const int dagger, const int parity, const cudaStream_t &stream) { switch(in.Precision()) { case QUDA_DOUBLE_PRECISION: { PackFaceWilson<double2, double> pack((double2*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; case QUDA_SINGLE_PRECISION: { PackFaceWilson<float4, float> pack((float4*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; case QUDA_HALF_PRECISION: { PackFaceWilson<short4, float> pack((short4*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; } } template <typename FloatN, typename Float> class PackFaceTwisted : public PackFace<FloatN, Float> { private: int inputPerSite() const { return 24; } // input is full spinor int outputPerSite() const { return 12; } // output is spin projected Float a; Float b; public: PackFaceTwisted(FloatN *faces, const cudaColorSpinorField *in, const int dagger, const int parity, Float a, Float b) : PackFace<FloatN, Float>(faces, in, dagger, parity, 1), a(a), b(b) { } virtual ~PackFaceTwisted() { } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef GPU_TWISTED_MASS_DIRAC PackParam<FloatN> param = this->prepareParam(); if (this->dagger) { packTwistedFaceWilsonKernel<1><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(a, b, param); } else { packTwistedFaceWilsonKernel<0><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(a, b, param); } #else errorQuda("Twisted face packing kernel is not built"); #endif } long long flops() const { return outputPerSite()*this->threads(); } }; //! void packTwistedFaceWilson(void *ghost_buf, cudaColorSpinorField &in, const int dagger, const int parity, const double a, const double b, const cudaStream_t &stream) { switch(in.Precision()) { case QUDA_DOUBLE_PRECISION: { PackFaceTwisted<double2, double> pack((double2*)ghost_buf, &in, dagger, parity, a, b); pack.apply(stream); } break; case QUDA_SINGLE_PRECISION: { PackFaceTwisted<float4, float> pack((float4*)ghost_buf, &in, dagger, parity, (float)a, (float)b); pack.apply(stream); } break; case QUDA_HALF_PRECISION: { PackFaceTwisted<short4, float> pack((short4*)ghost_buf, &in, dagger, parity, (float)a, (float)b); pack.apply(stream); } break; } } #ifdef GPU_STAGGERED_DIRAC #ifdef USE_TEXTURE_OBJECTS #define SPINORTEXDOUBLE param.inTex #define SPINORTEXSINGLE param.inTex #define SPINORTEXHALF param.inTex #define SPINORTEXHALFNORM param.inTexNorm #else #define SPINORTEXDOUBLE spinorTexDouble #define SPINORTEXSINGLE spinorTexSingle2 #define SPINORTEXHALF spinorTexHalf2 #define SPINORTEXHALFNORM spinorTexHalf2Norm #endif template <typename Float2> __device__ void packFaceStaggeredCore(Float2 *out, float *outNorm, const int out_idx, const int out_stride, const Float2 *in, const float *inNorm, const int in_idx, const int in_stride) { out[out_idx + 0*out_stride] = in[in_idx + 0*in_stride]; out[out_idx + 1*out_stride] = in[in_idx + 1*in_stride]; out[out_idx + 2*out_stride] = in[in_idx + 2*in_stride]; } template<> __device__ void packFaceStaggeredCore(short2 *out, float *outNorm, const int out_idx, const int out_stride, const short2 *in, const float *inNorm, const int in_idx, const int in_stride) { out[out_idx + 0*out_stride] = in[in_idx + 0*in_stride]; out[out_idx + 1*out_stride] = in[in_idx + 1*in_stride]; out[out_idx + 2*out_stride] = in[in_idx + 2*in_stride]; outNorm[out_idx] = inNorm[in_idx]; } #if (defined DIRECT_ACCESS_PACK) || (defined FERMI_NO_DBLE_TEX) template <typename Float2> __device__ void packFaceStaggeredCore(Float2 *out, float *outNorm, const int out_idx, const int out_stride, const Float2 *in, const float *inNorm, const int in_idx, const PackParam<double2> &param) { out[out_idx + 0*out_stride] = in[in_idx + 0*param.sp_stride]; out[out_idx + 1*out_stride] = in[in_idx + 1*param.sp_stride]; out[out_idx + 2*out_stride] = in[in_idx + 2*param.sp_stride]; } template<> __device__ void packFaceStaggeredCore(short2 *out, float *outNorm, const int out_idx, const int out_stride, const short2 *in, const float *inNorm, const int in_idx, const PackParam<double2> &param) { out[out_idx + 0*out_stride] = in[in_idx + 0*param.sp_stride]; out[out_idx + 1*out_stride] = in[in_idx + 1*param.sp_stride]; out[out_idx + 2*out_stride] = in[in_idx + 2*param.sp_stride]; outNorm[out_idx] = inNorm[in_idx]; } #else __device__ void packFaceStaggeredCore(double2 *out, float *outNorm, const int out_idx, const int out_stride, const double2 *in, const float *inNorm, const int in_idx, const PackParam<double2> &param) { out[out_idx + 0*out_stride] = fetch_double2(SPINORTEXDOUBLE, in_idx + 0*param.sp_stride); out[out_idx + 1*out_stride] = fetch_double2(SPINORTEXDOUBLE, in_idx + 1*param.sp_stride); out[out_idx + 2*out_stride] = fetch_double2(SPINORTEXDOUBLE, in_idx + 2*param.sp_stride); } __device__ void packFaceStaggeredCore(float2 *out, float *outNorm, const int out_idx, const int out_stride, const float2 *in, const float *inNorm, const int in_idx, const PackParam<float2> &param) { out[out_idx + 0*out_stride] = TEX1DFETCH(float2, SPINORTEXSINGLE, in_idx + 0*param.sp_stride); out[out_idx + 1*out_stride] = TEX1DFETCH(float2, SPINORTEXSINGLE, in_idx + 1*param.sp_stride); out[out_idx + 2*out_stride] = TEX1DFETCH(float2, SPINORTEXSINGLE, in_idx + 2*param.sp_stride); } // this is rather dumb: undoing the texture load because cudaNormalizedReadMode is used // should really bind to an appropriate texture instead of reusing static inline __device__ short2 float22short2(float c, float2 a) { return make_short2((short)(a.x*c*MAX_SHORT), (short)(a.y*c*MAX_SHORT)); } __device__ void packFaceStaggeredCore(short2 *out, float *outNorm, const int out_idx, const int out_stride, const short2 *in, const float *inNorm, const int in_idx, const PackParam<short2> &param) { out[out_idx + 0*out_stride] = float22short2(1.0f,TEX1DFETCH(float2,SPINORTEXHALF,in_idx+0*param.sp_stride)); out[out_idx + 1*out_stride] = float22short2(1.0f,TEX1DFETCH(float2,SPINORTEXHALF,in_idx+1*param.sp_stride)); out[out_idx + 2*out_stride] = float22short2(1.0f,TEX1DFETCH(float2,SPINORTEXHALF,in_idx+2*param.sp_stride)); outNorm[out_idx] = TEX1DFETCH(float, SPINORTEXHALFNORM, in_idx); } #endif template <typename FloatN, int nFace> __global__ void packFaceStaggeredKernel(PackParam<FloatN> param) { int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor and write to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[0]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromFaceIndexStaggered<0,nFace,0>(face_idx,param.ghostFace[0],param.parity,param.X); packFaceStaggeredCore(param.out[0], param.outNorm[0], face_idx, nFace*param.ghostFace[0], param.in, param.inNorm, idx, param); } else { const int idx = indexFromFaceIndexStaggered<0,nFace,1>(face_idx,param.ghostFace[0],param.parity,param.X); packFaceStaggeredCore(param.out[1], param.outNorm[1], face_idx, nFace*param.ghostFace[0], param.in, param.inNorm, idx, param); } } else if (dim == 1) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[1]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromFaceIndexStaggered<1,nFace,0>(face_idx,param.ghostFace[1],param.parity,param.X); packFaceStaggeredCore(param.out[2], param.outNorm[2], face_idx, nFace*param.ghostFace[1], param.in, param.inNorm, idx, param); } else { const int idx = indexFromFaceIndexStaggered<1,nFace,1>(face_idx,param.ghostFace[1],param.parity,param.X); packFaceStaggeredCore(param.out[3], param.outNorm[3], face_idx, nFace*param.ghostFace[1], param.in, param.inNorm, idx, param); } } else if (dim == 2) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[2]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromFaceIndexStaggered<2,nFace,0>(face_idx,param.ghostFace[2],param.parity,param.X); packFaceStaggeredCore(param.out[4], param.outNorm[4], face_idx, nFace*param.ghostFace[2], param.in, param.inNorm, idx, param); } else { const int idx = indexFromFaceIndexStaggered<2,nFace,1>(face_idx,param.ghostFace[2],param.parity,param.X); packFaceStaggeredCore(param.out[5], param.outNorm[5], face_idx, nFace*param.ghostFace[2], param.in, param.inNorm, idx, param); } } else { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[3]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromFaceIndexStaggered<3,nFace,0>(face_idx,param.ghostFace[3],param.parity,param.X); packFaceStaggeredCore(param.out[6], param.outNorm[6], face_idx, nFace*param.ghostFace[3], param.in, param.inNorm,idx, param); } else { const int idx = indexFromFaceIndexStaggered<3,nFace,1>(face_idx,param.ghostFace[3],param.parity,param.X); packFaceStaggeredCore(param.out[7], param.outNorm[7], face_idx, nFace*param.ghostFace[3], param.in, param.inNorm, idx, param); } } } template <typename FloatN, int nFace> __global__ void packFaceExtendedStaggeredKernel(PackExtendedParam<FloatN> param) { int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end // if param.face_num==2 pack both the start and the end, otherwise pack the region of the // lattice specified by param.face_num const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[0]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromFaceIndexExtendedStaggered<0,nFace,0>(face_idx,param.ghostFace[0],param.parity,param.X,param.R); packFaceStaggeredCore(param.out[0], param.outNorm[0], face_idx, nFace*param.ghostFace[0], param.in, param.inNorm, idx, param); } else { const int idx = indexFromFaceIndexExtendedStaggered<0,nFace,1>(face_idx,param.ghostFace[0],param.parity,param.X,param.R); packFaceStaggeredCore(param.out[1], param.outNorm[1], face_idx, nFace*param.ghostFace[0], param.in, param.inNorm, idx, param); } } else if (dim == 1) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[1]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromFaceIndexExtendedStaggered<1,nFace,0>(face_idx,param.ghostFace[1],param.parity,param.X,param.R); packFaceStaggeredCore(param.out[2], param.outNorm[2], face_idx, nFace*param.ghostFace[1], param.in, param.inNorm, idx, param); } else { const int idx = indexFromFaceIndexExtendedStaggered<1,nFace,1>(face_idx,param.ghostFace[1],param.parity,param.X,param.R); packFaceStaggeredCore(param.out[3], param.outNorm[3], face_idx, nFace*param.ghostFace[1], param.in, param.inNorm, idx, param); } } else if (dim == 2) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[2]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromFaceIndexExtendedStaggered<2,nFace,0>(face_idx,param.ghostFace[2],param.parity,param.X,param.R); packFaceStaggeredCore(param.out[4], param.outNorm[4], face_idx, nFace*param.ghostFace[2], param.in, param.inNorm, idx, param); } else { const int idx = indexFromFaceIndexExtendedStaggered<2,nFace,1>(face_idx,param.ghostFace[2],param.parity,param.X,param.R); packFaceStaggeredCore(param.out[5], param.outNorm[5], face_idx, nFace*param.ghostFace[2], param.in, param.inNorm, idx, param); } } else { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[3]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromFaceIndexExtendedStaggered<3,nFace,0>(face_idx,param.ghostFace[3],param.parity,param.X,param.R); packFaceStaggeredCore(param.out[6], param.outNorm[6], face_idx, nFace*param.ghostFace[3], param.in, param.inNorm,idx, param); } else { const int idx = indexFromFaceIndexExtendedStaggered<3,nFace,1>(face_idx,param.ghostFace[3],param.parity,param.X,param.R); packFaceStaggeredCore(param.out[7], param.outNorm[7], face_idx, nFace*param.ghostFace[3], param.in, param.inNorm, idx, param); } } } template <typename FloatN, int nFace> __global__ void unpackFaceExtendedStaggeredKernel(PackExtendedParam<FloatN> param) { int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor, spin-project, and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = start, 1 = end // if param.face_num==2 pack both the start and the end, otherwist pack the region of the // lattice specified by param.face_num const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[0]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromFaceIndexExtendedStaggered<0,nFace,0>(face_idx,param.ghostFace[0],param.parity,param.X,param.R); packFaceStaggeredCore(param.in, param.inNorm, idx, param.sp_stride, param.out[0], param.outNorm[0], face_idx, nFace*param.ghostFace[0]); } else { const int idx = indexFromFaceIndexExtendedStaggered<0,nFace,1>(face_idx,param.ghostFace[0],param.parity,param.X,param.R); packFaceStaggeredCore(param.in, param.inNorm, idx, param.sp_stride, param.out[1], param.outNorm[1], face_idx, nFace*param.ghostFace[0]); } } else if (dim == 1) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[1]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromFaceIndexExtendedStaggered<1,nFace,0>(face_idx,param.ghostFace[1],param.parity,param.X,param.R); packFaceStaggeredCore(param.in, param.inNorm, idx, param.sp_stride, param.out[2], param.outNorm[2], face_idx, nFace*param.ghostFace[1]); } else { const int idx = indexFromFaceIndexExtendedStaggered<1,nFace,1>(face_idx,param.ghostFace[1],param.parity,param.X,param.R); packFaceStaggeredCore(param.in, param.inNorm, idx, param.sp_stride, param.out[3], param.outNorm[3], face_idx, nFace*param.ghostFace[1]); } } else if (dim == 2) { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[2]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromFaceIndexExtendedStaggered<2,nFace,0>(face_idx,param.ghostFace[2],param.parity,param.X,param.R); packFaceStaggeredCore(param.in, param.inNorm, idx, param.sp_stride, param.out[4], param.outNorm[4], face_idx, nFace*param.ghostFace[2]); } else { const int idx = indexFromFaceIndexExtendedStaggered<2,nFace,1>(face_idx,param.ghostFace[2],param.parity,param.X,param.R); packFaceStaggeredCore(param.in, param.inNorm, idx, param.sp_stride, param.out[5], param.outNorm[5], face_idx, nFace*param.ghostFace[2]); } } else { const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.ghostFace[3]) ? 1 : 0) : param.face_num; if(param.face_num==2) face_idx -= face_num*nFace*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromFaceIndexExtendedStaggered<3,nFace,0>(face_idx,param.ghostFace[3],param.parity,param.X,param.R); packFaceStaggeredCore(param.in, param.inNorm, idx, param.sp_stride, param.out[6], param.outNorm[6], face_idx, nFace*param.ghostFace[3]); } else { const int idx = indexFromFaceIndexExtendedStaggered<3,nFace,1>(face_idx,param.ghostFace[3],param.parity,param.X,param.R); packFaceStaggeredCore(param.in, param.inNorm, idx, param.sp_stride, param.out[7], param.outNorm[7], face_idx, nFace*param.ghostFace[3]); } } } #undef SPINORTEXDOUBLE #undef SPINORTEXSINGLE #undef SPINORTEXHALF #endif // GPU_STAGGERED_DIRAC template <typename FloatN, typename Float> class PackFaceStaggered : public PackFace<FloatN, Float> { private: const int* R; // boundary dimensions for extended field const bool unpack; int inputPerSite() const { return 6; } // input is full spinor int outputPerSite() const { return 6; } // output is full spinor public: PackFaceStaggered(FloatN *faces, const cudaColorSpinorField *in, const int nFace, const int dagger, const int parity, const int dim, const int face_num, const int* R=NULL, const bool unpack=false) : PackFace<FloatN, Float>(faces, in, dagger, parity, nFace, dim, face_num), R(R), unpack(unpack) { } virtual ~PackFaceStaggered() { } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef GPU_STAGGERED_DIRAC PackParam<FloatN> param = this->prepareParam(this->dim, this->face_num); if(!R){ if (PackFace<FloatN,Float>::nFace==1) { packFaceStaggeredKernel<FloatN, 1> <<<tp.grid, tp.block, tp.shared_bytes, stream>>>(param); } else { packFaceStaggeredKernel<FloatN, 3> <<<tp.grid, tp.block, tp.shared_bytes, stream>>>(param); } }else{ // R!=NULL => this is an extended field PackExtendedParam<FloatN> extendedParam(param); if(!unpack){ for(int d=0; d<QUDA_MAX_DIM; ++d) extendedParam.R[d] = R[d]; switch(PackFace<FloatN,Float>::nFace){ case 1: packFaceExtendedStaggeredKernel<FloatN,1><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(extendedParam); break; case 2: packFaceExtendedStaggeredKernel<FloatN,2><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(extendedParam); break; case 3: packFaceExtendedStaggeredKernel<FloatN,3><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(extendedParam); break; case 4: packFaceExtendedStaggeredKernel<FloatN,4><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(extendedParam); break; default: errorQuda("Unsupported boundary width"); break; } }else{ // extended field unpack switch(PackFace<FloatN,Float>::nFace){ case 1: unpackFaceExtendedStaggeredKernel<FloatN,1><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(extendedParam); break; case 2: unpackFaceExtendedStaggeredKernel<FloatN,2><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(extendedParam); break; case 3: unpackFaceExtendedStaggeredKernel<FloatN,3><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(extendedParam); break; case 4: unpackFaceExtendedStaggeredKernel<FloatN,4><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(extendedParam); break; default: errorQuda("Unsupported boundary width"); break; } } } #else errorQuda("Staggered face packing kernel is not built"); #endif } long long flops() const { return 0; } }; void packFaceStaggered(void *ghost_buf, cudaColorSpinorField &in, int nFace, int dagger, int parity, const int dim, const int face_num, const cudaStream_t &stream) { switch(in.Precision()) { case QUDA_DOUBLE_PRECISION: { PackFaceStaggered<double2, double> pack((double2*)ghost_buf, &in, nFace, dagger, parity, dim, face_num); pack.apply(stream); } break; case QUDA_SINGLE_PRECISION: { PackFaceStaggered<float2, float> pack((float2*)ghost_buf, &in, nFace, dagger, parity, dim, face_num); pack.apply(stream); } break; case QUDA_HALF_PRECISION: { PackFaceStaggered<short2, float> pack((short2*)ghost_buf, &in, nFace, dagger, parity, dim, face_num); pack.apply(stream); } break; } } void packFaceExtendedStaggered(void *buffer, cudaColorSpinorField &field, const int nFace, const int R[], int dagger, int parity, const int dim, const int face_num, const cudaStream_t &stream, bool unpack=false) { switch(field.Precision()){ case QUDA_DOUBLE_PRECISION: { PackFaceStaggered<double2,double> pack(static_cast<double2*>(buffer), &field, nFace, dagger, parity, dim, face_num, R, unpack); pack.apply(stream); } break; case QUDA_SINGLE_PRECISION: { PackFaceStaggered<float2,float> pack(static_cast<float2*>(buffer), &field, nFace, dagger, parity, dim, face_num, R, unpack); pack.apply(stream); } break; case QUDA_HALF_PRECISION: { PackFaceStaggered<short2,float> pack(static_cast<short2*>(buffer), &field, nFace, dagger, parity, dim, face_num, R, unpack); pack.apply(stream); } break; } // switch(field.Precision()) } #ifdef GPU_DOMAIN_WALL_DIRAC template <int dagger, typename FloatN> __global__ void packFaceDWKernel(PackParam<FloatN> param) { const int nFace = 1; // 1 face for dwf int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); const int Ls = param.X[4]; // compute where the output is located // compute an index into the local volume from the index into the face // read spinor, spin-project, and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = beginning, 1 = end // FIXME these param.ghostFace constants do not incude the Ls dimension const int face_num = (face_idx >= nFace*Ls*param.ghostFace[0]) ? 1 : 0; face_idx -= face_num*nFace*Ls*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromDWFaceIndex<0,nFace,0>(face_idx,Ls*param.ghostFace[0],param.parity,param.X); packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[0], param); } else { const int idx = indexFromDWFaceIndex<0,nFace,1>(face_idx,Ls*param.ghostFace[0],param.parity,param.X); packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[0], param); } } else if (dim == 1) { const int face_num = (face_idx >= nFace*Ls*param.ghostFace[1]) ? 1 : 0; face_idx -= face_num*nFace*Ls*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromDWFaceIndex<1,nFace,0>(face_idx,Ls*param.ghostFace[1],param.parity,param.X); packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[1], param); } else { const int idx = indexFromDWFaceIndex<1,nFace,1>(face_idx,Ls*param.ghostFace[1],param.parity,param.X); packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[1], param); } } else if (dim == 2) { const int face_num = (face_idx >= nFace*Ls*param.ghostFace[2]) ? 1 : 0; face_idx -= face_num*nFace*Ls*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromDWFaceIndex<2,nFace,0>(face_idx,Ls*param.ghostFace[2],param.parity,param.X); packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[2], param); } else { const int idx = indexFromDWFaceIndex<2,nFace,1>(face_idx,Ls*param.ghostFace[2],param.parity,param.X); packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[2], param); } } else { const int face_num = (face_idx >= nFace*Ls*param.ghostFace[3]) ? 1 : 0; face_idx -= face_num*nFace*Ls*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromDWFaceIndex<3,nFace,0>(face_idx,Ls*param.ghostFace[3],param.parity,param.X); packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[3], param); } else { const int idx = indexFromDWFaceIndex<3,nFace,1>(face_idx,Ls*param.ghostFace[3],param.parity,param.X); packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[3], param); } } } template <int dagger, typename FloatN> __global__ void packFaceDW4DKernel(PackParam<FloatN> param) { const int nFace = 1; // 1 face for Wilson int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; const int Ls = param.X[4]; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor, spin-project, and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = beginning, 1 = end // FIXME these param.ghostFace constants do not incude the Ls dimension const int face_num = (face_idx >= nFace*Ls*param.ghostFace[0]) ? 1 : 0; face_idx -= face_num*nFace*Ls*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromDW4DFaceIndex<0,nFace,0>(face_idx,Ls*param.ghostFace[0],param.parity,param.X); packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[0], param); } else { const int idx = indexFromDW4DFaceIndex<0,nFace,1>(face_idx,Ls*param.ghostFace[0],param.parity,param.X); packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[0], param); } } else if (dim == 1) { const int face_num = (face_idx >= nFace*Ls*param.ghostFace[1]) ? 1 : 0; face_idx -= face_num*nFace*Ls*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromDW4DFaceIndex<1,nFace,0>(face_idx,Ls*param.ghostFace[1],param.parity,param.X); packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[1], param); } else { const int idx = indexFromDW4DFaceIndex<1,nFace,1>(face_idx,Ls*param.ghostFace[1],param.parity,param.X); packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[1], param); } } else if (dim == 2) { const int face_num = (face_idx >= nFace*Ls*param.ghostFace[2]) ? 1 : 0; face_idx -= face_num*nFace*Ls*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromDW4DFaceIndex<2,nFace,0>(face_idx,Ls*param.ghostFace[2],param.parity,param.X); packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[2], param); } else { const int idx = indexFromDW4DFaceIndex<2,nFace,1>(face_idx,Ls*param.ghostFace[2],param.parity,param.X); packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[2], param); } } else { const int face_num = (face_idx >= nFace*Ls*param.ghostFace[3]) ? 1 : 0; face_idx -= face_num*nFace*Ls*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromDW4DFaceIndex<3,nFace,0>(face_idx,Ls*param.ghostFace[3],param.parity,param.X); packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[3], param); } else { const int idx = indexFromDW4DFaceIndex<3,nFace,1>(face_idx,Ls*param.ghostFace[3],param.parity,param.X); packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in, param.inNorm, idx, face_idx, Ls*param.ghostFace[3], param); } } } #endif template <typename FloatN, typename Float> class PackFaceDW : public PackFace<FloatN, Float> { private: int inputPerSite() const { return 24; } // input is full spinor int outputPerSite() const { return 12; } // output is spin projected public: PackFaceDW(FloatN *faces, const cudaColorSpinorField *in, const int dagger, const int parity) : PackFace<FloatN, Float>(faces, in, dagger, parity, 1) { } virtual ~PackFaceDW() { } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef GPU_DOMAIN_WALL_DIRAC PackParam<FloatN> param = this->prepareParam(); if (this->dagger) { packFaceDWKernel<1><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(param); } else { packFaceDWKernel<0><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(param); } #else errorQuda("DW face packing kernel is not built"); #endif } long long flops() const { return outputPerSite()*this->threads(); } }; template <typename FloatN, typename Float> class PackFaceDW4D : public PackFace<FloatN, Float> { private: int inputPerSite() const { return 24; } // input is full spinor int outputPerSite() const { return 12; } // output is spin projected public: PackFaceDW4D(FloatN *faces, const cudaColorSpinorField *in, const int dagger, const int parity) : PackFace<FloatN, Float>(faces, in, dagger, parity, 1) { } virtual ~PackFaceDW4D() { } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef GPU_DOMAIN_WALL_DIRAC PackParam<FloatN> param = this->prepareParam(); if (this->dagger) { packFaceDW4DKernel<1><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(param); } else { packFaceDW4DKernel<0><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(param); } #else errorQuda("4D preconditioned DW face packing kernel is not built"); #endif } long long flops() const { return outputPerSite()*this->threads(); } }; void packFaceDW(void *ghost_buf, cudaColorSpinorField &in, const int dagger, const int parity, const cudaStream_t &stream) { if(in.DWFPCtype() == QUDA_4D_PC) { switch(in.Precision()) { case QUDA_DOUBLE_PRECISION: { PackFaceDW4D<double2, double> pack((double2*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; case QUDA_SINGLE_PRECISION: { PackFaceDW4D<float4, float> pack((float4*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; case QUDA_HALF_PRECISION: { PackFaceDW4D<short4, float> pack((short4*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; } } else { switch(in.Precision()) { case QUDA_DOUBLE_PRECISION: { PackFaceDW<double2, double> pack((double2*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; case QUDA_SINGLE_PRECISION: { PackFaceDW<float4, float> pack((float4*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; case QUDA_HALF_PRECISION: { PackFaceDW<short4, float> pack((short4*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; } } } #ifdef GPU_NDEG_TWISTED_MASS_DIRAC template <int dagger, typename FloatN> __global__ void packFaceNdegTMKernel(PackParam<FloatN> param) { const int nFace = 1; // 1 face for Wilson const int Nf = 2; int face_idx = blockIdx.x*blockDim.x + threadIdx.x; if (face_idx >= param.threads) return; // determine which dimension we are packing const int dim = dimFromFaceIndex(face_idx, param); // compute where the output is located // compute an index into the local volume from the index into the face // read spinor, spin-project, and write half spinor to face if (dim == 0) { // face_num determines which end of the lattice we are packing: 0 = beginning, 1 = end // FIXME these param.ghostFace constants do not include the Nf dimension const int face_num = (face_idx >= nFace*Nf*param.ghostFace[0]) ? 1 : 0; face_idx -= face_num*nFace*Nf*param.ghostFace[0]; if (face_num == 0) { const int idx = indexFromNdegTMFaceIndex<0,nFace,0>(face_idx,Nf*param.ghostFace[0],param.parity,param.X); packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in, param.inNorm, idx, face_idx, Nf*param.ghostFace[0], param); } else { const int idx = indexFromNdegTMFaceIndex<0,nFace,1>(face_idx,Nf*param.ghostFace[0],param.parity,param.X); packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in, param.inNorm, idx, face_idx, Nf*param.ghostFace[0], param); } } else if (dim == 1) { const int face_num = (face_idx >= nFace*Nf*param.ghostFace[1]) ? 1 : 0; face_idx -= face_num*nFace*Nf*param.ghostFace[1]; if (face_num == 0) { const int idx = indexFromNdegTMFaceIndex<1,nFace,0>(face_idx,Nf*param.ghostFace[1],param.parity,param.X); packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in, param.inNorm, idx, face_idx, Nf*param.ghostFace[1], param); } else { const int idx = indexFromNdegTMFaceIndex<1,nFace,1>(face_idx,Nf*param.ghostFace[1],param.parity,param.X); packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in, param.inNorm, idx, face_idx, Nf*param.ghostFace[1], param); } } else if (dim == 2) { const int face_num = (face_idx >= nFace*Nf*param.ghostFace[2]) ? 1 : 0; face_idx -= face_num*nFace*Nf*param.ghostFace[2]; if (face_num == 0) { const int idx = indexFromNdegTMFaceIndex<2,nFace,0>(face_idx,Nf*param.ghostFace[2],param.parity,param.X); packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in, param.inNorm, idx, face_idx, Nf*param.ghostFace[2], param); } else { const int idx = indexFromNdegTMFaceIndex<2,nFace,1>(face_idx,Nf*param.ghostFace[2],param.parity,param.X); packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in, param.inNorm, idx, face_idx, Nf*param.ghostFace[2], param); } } else { const int face_num = (face_idx >= nFace*Nf*param.ghostFace[3]) ? 1 : 0; face_idx -= face_num*nFace*Nf*param.ghostFace[3]; if (face_num == 0) { const int idx = indexFromNdegTMFaceIndex<3,nFace,0>(face_idx,Nf*param.ghostFace[3],param.parity,param.X); packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in, param.inNorm, idx, face_idx, Nf*param.ghostFace[3], param); } else { const int idx = indexFromNdegTMFaceIndex<3,nFace,1>(face_idx,Nf*param.ghostFace[3],param.parity,param.X); packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in, param.inNorm, idx, face_idx, Nf*param.ghostFace[3], param); } } } #endif template <typename FloatN, typename Float> class PackFaceNdegTM : public PackFace<FloatN, Float> { private: int inputPerSite() const { return 24; } // input is full spinor int outputPerSite() const { return 12; } // output is spin projected public: PackFaceNdegTM(FloatN *faces, const cudaColorSpinorField *in, const int dagger, const int parity) : PackFace<FloatN, Float>(faces, in, dagger, parity, 1) { } virtual ~PackFaceNdegTM() { } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); #ifdef GPU_NDEG_TWISTED_MASS_DIRAC PackParam<FloatN> param = this->prepareParam(); if (this->dagger) { packFaceNdegTMKernel<1><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(param); } else { packFaceNdegTMKernel<0><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(param); } #else errorQuda("Non-degenerate twisted mass face packing kernel is not built"); #endif } long long flops() const { return outputPerSite()*this->threads(); } }; void packFaceNdegTM(void *ghost_buf, cudaColorSpinorField &in, const int dagger, const int parity, const cudaStream_t &stream) { switch(in.Precision()) { case QUDA_DOUBLE_PRECISION: { PackFaceNdegTM<double2, double> pack((double2*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; case QUDA_SINGLE_PRECISION: { PackFaceNdegTM<float4, float> pack((float4*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; case QUDA_HALF_PRECISION: { PackFaceNdegTM<short4, float> pack((short4*)ghost_buf, &in, dagger, parity); pack.apply(stream); } break; } } void packFace(void *ghost_buf, cudaColorSpinorField &in, const int nFace, const int dagger, const int parity, const int dim, const int face_num, const cudaStream_t &stream, const double a, const double b) { int nDimPack = 0; if(dim < 0){ for (int d=0; d<4; d++) { if(!commDim[d]) continue; if (d != 3 || getKernelPackT() || a != 0.0 || b!= 0.0) nDimPack++; } }else{ if(commDim[dim]){ if(dim!=3 || getKernelPackT() || a!=0.0 || b != 0.0) nDimPack++; } } if (!nDimPack) return; // if zero then we have nothing to pack if (nFace != 1 && in.Nspin() != 1) errorQuda("Unsupported number of faces %d", nFace); // Need to update this logic for other multi-src dslash packing if (in.Nspin() == 1) { packFaceStaggered(ghost_buf, in, nFace, dagger, parity, dim, face_num, stream); } else if (a!=0.0 || b!=0.0) { // Need to update this logic for other multi-src dslash packing if(in.TwistFlavor() == QUDA_TWIST_PLUS || in.TwistFlavor() == QUDA_TWIST_MINUS) { packTwistedFaceWilson(ghost_buf, in, dagger, parity, a, b, stream); } else { errorQuda("Cannot perform twisted packing for the spinor."); } } else if (in.Ndim() == 5) { if(in.TwistFlavor() == QUDA_TWIST_INVALID) { packFaceDW(ghost_buf, in, dagger, parity, stream); } else { packFaceNdegTM(ghost_buf, in, dagger, parity, stream); } } else { packFaceWilson(ghost_buf, in, dagger, parity, stream); } } void packFaceExtended(void* buffer, cudaColorSpinorField &field, const int nFace, const int R[], const int dagger, const int parity, const int dim, const int face_num, const cudaStream_t &stream, const bool unpack) { int nDimPack = 0; if(dim < 0){ for(int d=0; d<4; d++){ if(R[d]) nDimPack++; } }else{ if(R[dim]) nDimPack++; } if(!nDimPack) return; // if zero then we have nothing to pack if(field.Nspin() == 1){ packFaceExtendedStaggered(buffer, field, nFace, R, dagger, parity, dim, face_num, stream, unpack); }else{ errorQuda("Extended quark field is not supported"); } } #endif // MULTI_GPU }
67a9465e2be09193cdaaa4091c37dd6810bb02a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/binary_search.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/scan.h> #include <thrust/unique.h> #include "cuda_utils.h" #include "parameters.h" #include "seed_filter.h" #include "seed_filter_interface.h" #include "store.h" #include "store_gpu.h" // Each segmentPair is 16B // With 64MB for the HSPs array per 1GB GPU memory // With higher GPU memory, the size just linearly increases #define MAX_HITS_PER_GB 4194304 int MAX_SEEDS; int MAX_HITS; uint32_t seed_size; int **d_sub_mat; int xdrop; int hspthresh; bool noentropy; char** d_query_seq; char** d_query_rc_seq; uint32_t query_length[BUFFER_DEPTH]; uint64_t** d_seed_offsets; uint32_t** d_hit_num_array; std::vector<thrust::device_vector<uint32_t> > d_hit_num_vec; uint32_t** d_done; std::vector<thrust::device_vector<uint32_t> > d_done_vec; segmentPair** d_hsp; std::vector<thrust::device_vector<segmentPair> > d_hsp_vec; segmentPair** d_hsp_reduced; std::vector<thrust::device_vector<segmentPair> > d_hsp_reduced_vec; struct hspEqual{ __host__ __device__ bool operator()(segmentPair x, segmentPair y){ return ( ( (x.ref_start - x.query_start) == (y.ref_start - y.query_start) ) && ( ( (x.ref_start >= y.ref_start) && ( (x.ref_start + x.len) <= (y.ref_start + y.len) ) ) || ( ( y.ref_start >= x.ref_start ) && ( (y.ref_start + y.len) <= (x.ref_start + x.len) ) ) ) ); } }; struct hspComp{ __host__ __device__ bool operator()(segmentPair x, segmentPair y){ if((x.ref_start - x.query_start) < (y.ref_start - y.query_start)) return true; else if((x.ref_start - x.query_start) == (y.ref_start - y.query_start)){ if(x.ref_start < y.ref_start) return true; else if(x.ref_start == y.ref_start){ if(x.len < y.len) return true; else if(x.len == y.len){ if(x.score > y.score) return true; else return false; } else return false; } else return false; } else return false; } }; __global__ void compress_string_rev_comp (uint32_t len, char* src_seq, char* dst_seq, char* dst_seq_rc){ int thread_id = threadIdx.x; int block_dim = blockDim.x; int grid_dim = gridDim.x; int block_id = blockIdx.x; int stride = block_dim * grid_dim; uint32_t start = block_dim * block_id + thread_id; for (uint32_t i = start; i < len; i += stride) { char ch = src_seq[i]; char dst = X_NT; char dst_rc = X_NT; if (ch == 'A'){ dst = A_NT; dst_rc = T_NT; } else if (ch == 'C'){ dst = C_NT; dst_rc = G_NT; } else if (ch == 'G'){ dst = G_NT; dst_rc = C_NT; } else if (ch == 'T'){ dst = T_NT; dst_rc = A_NT; } else if ((ch == 'a') || (ch == 'c') || (ch == 'g') || (ch == 't')){ dst = L_NT; dst_rc = L_NT; } else if ((ch == 'n') || (ch == 'N')){ dst = N_NT; dst_rc = N_NT; } else if (ch == '&'){ dst = E_NT; dst_rc = E_NT; } dst_seq[i] = dst; dst_seq_rc[len -1 -i] = dst_rc; } } __global__ void find_num_hits (int num_seeds, const uint32_t* __restrict__ d_index_table, uint64_t* seed_offsets, uint32_t* seed_hit_num){ int thread_id = threadIdx.x; int block_dim = blockDim.x; int grid_dim = gridDim.x; int block_id = blockIdx.x; int stride = block_dim * grid_dim; uint32_t start = block_dim * block_id + thread_id; uint32_t num_seed_hit; uint32_t seed; for (uint32_t id = start; id < num_seeds; id += stride) { seed = (seed_offsets[id] >> 32); // start and end from the seed block_id table num_seed_hit = d_index_table[seed]; if (seed > 0){ num_seed_hit -= d_index_table[seed-1]; } seed_hit_num[id] = num_seed_hit; } } __global__ void find_hits (const uint32_t* __restrict__ d_index_table, const uint32_t* __restrict__ d_pos_table, uint64_t* d_seed_offsets, uint32_t seed_size, uint32_t* seed_hit_num, int num_hits, segmentPair* d_hsp, uint32_t start_seed_index, uint32_t start_hit_index){ int thread_id = threadIdx.x; int block_id = blockIdx.x; int warp_size = warpSize; int lane_id = thread_id%warp_size; int warp_id = (thread_id-lane_id)/warp_size; __shared__ uint32_t start, end; __shared__ uint32_t seed; __shared__ uint64_t seed_offset; __shared__ uint32_t ref_loc[NUM_WARPS]; __shared__ uint32_t query_loc; __shared__ uint32_t seed_hit_prefix; if(thread_id == 0){ seed_offset = d_seed_offsets[block_id+start_seed_index]; seed = (seed_offset >> 32); query_loc = ((seed_offset << 32) >> 32) + seed_size; // start and end from the seed block_id table end = d_index_table[seed]; start = 0; if (seed > 0){ start = d_index_table[seed-1]; } seed_hit_prefix = seed_hit_num[block_id+start_seed_index]; } __syncthreads(); for (int id1 = start; id1 < end; id1 += NUM_WARPS) { if(id1+warp_id < end){ if(lane_id == 0){ ref_loc[warp_id] = d_pos_table[id1+warp_id] + seed_size; int dram_address = seed_hit_prefix -id1 - warp_id+start-1-start_hit_index; d_hsp[dram_address].ref_start = ref_loc[warp_id]; d_hsp[dram_address].query_start = query_loc; d_hsp[dram_address].len = 0; d_hsp[dram_address].score = 0; } } } } __global__ void find_hsps (const char* __restrict__ d_ref_seq, const char* __restrict__ d_query_seq, uint32_t ref_len, uint32_t query_len, int *d_sub_mat, bool noentropy, int xdrop, int hspthresh, int num_hits, segmentPair* d_hsp, uint32_t* d_done){ int thread_id = threadIdx.x; int block_id = blockIdx.x; int num_blocks = gridDim.x; int warp_size = warpSize; int lane_id = thread_id%warp_size; int warp_id = (thread_id-lane_id)/warp_size; __shared__ uint32_t ref_loc[NUM_WARPS]; __shared__ uint32_t query_loc[NUM_WARPS]; __shared__ int total_score[NUM_WARPS]; __shared__ int prev_score[NUM_WARPS]; __shared__ int prev_max_score[NUM_WARPS]; __shared__ int prev_max_pos[NUM_WARPS]; __shared__ bool edge_found[NUM_WARPS]; __shared__ bool xdrop_found[NUM_WARPS]; __shared__ bool new_max_found[NUM_WARPS]; __shared__ uint32_t left_extent[NUM_WARPS]; __shared__ int extent[NUM_WARPS]; __shared__ uint32_t tile[NUM_WARPS]; __shared__ double entropy[NUM_WARPS]; int thread_score; int max_thread_score; int max_pos; int temp_pos; bool xdrop_done; bool temp_xdrop_done; int temp; short count[4]; short count_del[4]; char r_chr; char q_chr; uint32_t ref_pos; uint32_t query_pos; int pos_offset; __shared__ int sub_mat[NUC2]; if(thread_id < NUC2){ sub_mat[thread_id] = d_sub_mat[thread_id]; } __syncthreads(); for(int hid0 = block_id*NUM_WARPS; hid0 < num_hits; hid0 += NUM_WARPS*num_blocks){ int hid = hid0 + warp_id; if(hid < num_hits){ if(lane_id == 0){ ref_loc[warp_id] = d_hsp[hid].ref_start; query_loc[warp_id] = d_hsp[hid].query_start; total_score[warp_id] = 0; } } else{ if(lane_id == 0){ ref_loc[warp_id] = d_hsp[hid0].ref_start; query_loc[warp_id] = d_hsp[hid0].query_start; total_score[warp_id] = 0; } } __syncwarp(); ////////////////////////////////////////////////////////////////// //Right extension if(lane_id ==0){ tile[warp_id] = 0; xdrop_found[warp_id] = false; edge_found[warp_id] = false; new_max_found[warp_id] = false; entropy[warp_id] = 1.0f; prev_score[warp_id] = 0; prev_max_score[warp_id] = 0; prev_max_pos[warp_id] = -1; extent[warp_id] = 0; } count[0] = 0; count[1] = 0; count[2] = 0; count[3] = 0; count_del[0] = 0; count_del[1] = 0; count_del[2] = 0; count_del[3] = 0; max_pos = 0; __syncwarp(); while(!xdrop_found[warp_id] && !edge_found[warp_id]){ pos_offset = lane_id + tile[warp_id]; ref_pos = ref_loc[warp_id] + pos_offset; query_pos = query_loc[warp_id] + pos_offset; thread_score = 0; if(ref_pos < ref_len && query_pos < query_len){ r_chr = d_ref_seq[ref_pos]; q_chr = d_query_seq[query_pos]; thread_score = sub_mat[r_chr*NUC+q_chr]; } __syncwarp(); #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ temp = __shfl_up_sync(0xFFFFFFFF, thread_score, offset); if(lane_id >= offset){ thread_score += temp; } } thread_score += prev_score[warp_id]; if(thread_score > prev_max_score[warp_id]){ max_thread_score = thread_score; max_pos = pos_offset; } else{ max_thread_score = prev_max_score[warp_id]; max_pos = prev_max_pos[warp_id]; } __syncwarp(); #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ temp = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset); temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset); if(lane_id >= offset){ if(temp >= max_thread_score){ max_thread_score = temp; max_pos = temp_pos; } } } xdrop_done = ((max_thread_score-thread_score) > xdrop); __syncwarp(); #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ temp_xdrop_done = __shfl_up_sync(0xFFFFFFFF, xdrop_done, offset); if(lane_id >= offset){ xdrop_done |= temp_xdrop_done; } } if(xdrop_done == 1){ max_thread_score = prev_max_score[warp_id]; max_pos = prev_max_pos[warp_id]; } __syncwarp(); #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ temp = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset); temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset); if(lane_id >= offset){ if(temp >= max_thread_score){ max_thread_score = temp; max_pos = temp_pos; } } } __syncwarp(); if(lane_id == warp_size-1){ if(max_pos > prev_max_pos[warp_id]) new_max_found[warp_id] = true; else new_max_found[warp_id] = false; if(xdrop_done){ total_score[warp_id] += max_thread_score; xdrop_found[warp_id] = true; extent[warp_id] = max_pos; prev_max_pos[warp_id] = max_pos; tile[warp_id] = max_pos; } else if(ref_pos >= ref_len || query_pos >= query_len){ total_score[warp_id] += max_thread_score; edge_found[warp_id] = true; extent[warp_id] = max_pos; prev_max_pos[warp_id] = max_pos; tile[warp_id] = max_pos; } else{ prev_score[warp_id] = thread_score; prev_max_score[warp_id] = max_thread_score; prev_max_pos[warp_id] = max_pos; tile[warp_id]+= warp_size; } } __syncwarp(); if(new_max_found[warp_id]){ for(int i = 0; i < 4; i++){ count[i] = count[i] + count_del[i]; count_del[i] = 0; } } __syncwarp(); if(r_chr == q_chr){ if(pos_offset <= prev_max_pos[warp_id]){ count[r_chr] += 1; } else{ count_del[r_chr] += 1; } } __syncwarp(); } __syncwarp(); //////////////////////////////////////////////////////////////// //Left extension if(lane_id ==0){ tile[warp_id] = 0; xdrop_found[warp_id] = false; edge_found[warp_id] = false; new_max_found[warp_id] = false; prev_score[warp_id] = 0; prev_max_score[warp_id] = 0; prev_max_pos[warp_id] = 0; left_extent[warp_id] = 0; } count_del[0] = 0; count_del[1] = 0; count_del[2] = 0; count_del[3] = 0; max_pos = 0; __syncwarp(); while(!xdrop_found[warp_id] && !edge_found[warp_id]){ pos_offset = lane_id+1+tile[warp_id]; thread_score = 0; if(ref_loc[warp_id] >= pos_offset && query_loc[warp_id] >= pos_offset){ ref_pos = ref_loc[warp_id] - pos_offset; query_pos = query_loc[warp_id] - pos_offset; r_chr = d_ref_seq[ref_pos]; q_chr = d_query_seq[query_pos]; thread_score = sub_mat[r_chr*NUC+q_chr]; } #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ temp = __shfl_up_sync(0xFFFFFFFF, thread_score, offset); if(lane_id >= offset){ thread_score += temp; } } thread_score += prev_score[warp_id]; if(thread_score > prev_max_score[warp_id]){ max_thread_score = thread_score; max_pos = pos_offset; } else{ max_thread_score = prev_max_score[warp_id]; max_pos = prev_max_pos[warp_id]; } __syncwarp(); #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ temp = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset); temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset); if(lane_id >= offset){ if(temp >= max_thread_score){ max_thread_score = temp; max_pos = temp_pos; } } } xdrop_done = ((max_thread_score-thread_score) > xdrop); __syncwarp(); #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ temp_xdrop_done = __shfl_up_sync(0xFFFFFFFF, xdrop_done, offset); if(lane_id >= offset){ xdrop_done |= temp_xdrop_done; } } if(xdrop_done){ max_thread_score = prev_max_score[warp_id]; max_pos = prev_max_pos[warp_id]; } __syncwarp(); #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ temp = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset); temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset); if(lane_id >= offset){ if(temp >= max_thread_score){ max_thread_score = temp; max_pos = temp_pos; } } } __syncwarp(); if(lane_id == warp_size-1){ if(max_pos > prev_max_pos[warp_id]) new_max_found[warp_id] = true; else new_max_found[warp_id] = false; if(xdrop_done){ total_score[warp_id]+=max_thread_score; xdrop_found[warp_id] = true; left_extent[warp_id] = max_pos; extent[warp_id] += left_extent[warp_id]; prev_max_pos[warp_id] = max_pos; tile[warp_id] = max_pos; } else if(ref_loc[warp_id] < pos_offset || query_loc[warp_id] < pos_offset){ total_score[warp_id]+=max_thread_score; edge_found[warp_id] = true; left_extent[warp_id] = max_pos; extent[warp_id] += left_extent[warp_id]; prev_max_pos[warp_id] = max_pos; tile[warp_id] = max_pos; } else{ prev_score[warp_id] = thread_score; prev_max_score[warp_id] = max_thread_score; prev_max_pos[warp_id] = max_pos; tile[warp_id]+=warp_size; } } __syncwarp(); if(new_max_found[warp_id]){ for(int i = 0; i < 4; i++){ count[i] = count[i] + count_del[i]; count_del[i] = 0; } } __syncwarp(); if(r_chr == q_chr){ if(pos_offset <= prev_max_pos[warp_id]){ count[r_chr] += 1; } else{ count_del[r_chr] += 1; } } __syncwarp(); } ////////////////////////////////////////////////////////////////// if(total_score[warp_id] >= hspthresh && total_score[warp_id] <= 3*hspthresh && !noentropy){ for(int i = 0; i < 4; i++){ #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ count[i] += __shfl_up_sync(0xFFFFFFFF, count[i], offset); } } __syncwarp(); if(lane_id == warp_size-1 && ((count[0]+count[1]+count[2]+count[3]) >= 20)){ entropy[warp_id] = 0.f; for(int i = 0; i < 4; i++){ entropy[warp_id] += ((double) count[i])/((double) (extent[warp_id]+1)) * ((count[i] != 0) ? log(((double) count[i]) / ((double) (extent[warp_id]+1))): 0.f); } entropy[warp_id] = -entropy[warp_id]/log(4.0f); } } __syncwarp(); ////////////////////////////////////////////////////////////////// if(hid < num_hits){ if(lane_id == 0){ if( ((int) (((float) total_score[warp_id]) * entropy[warp_id])) >= hspthresh){ d_hsp[hid].ref_start = ref_loc[warp_id] - left_extent[warp_id]; d_hsp[hid].query_start = query_loc[warp_id] - left_extent[warp_id]; d_hsp[hid].len = extent[warp_id]; if(entropy[warp_id] > 0) d_hsp[hid].score = total_score[warp_id]*entropy[warp_id]; d_done[hid] = 1; } else{ d_hsp[hid].ref_start = ref_loc[warp_id]; d_hsp[hid].query_start = query_loc[warp_id]; d_hsp[hid].len = 0; d_hsp[hid].score = 0; d_done[hid] = 0; } } } __syncwarp(); } } __global__ void compress_output (uint32_t* d_done, segmentPair* d_hsp, segmentPair* d_hsp_reduced, int num_hits){ int thread_id = threadIdx.x; int block_dim = blockDim.x; int grid_dim = gridDim.x; int block_id = blockIdx.x; int stride = block_dim * grid_dim; uint32_t start = block_dim * block_id + thread_id; int index = 0; for (uint32_t id = start; id < num_hits; id += stride) { index = d_done[id]; if(id > 0){ if(index > d_done[id-1]){ d_hsp_reduced[index-1] = d_hsp[id]; } } else{ if(index == 1){ d_hsp_reduced[0] = d_hsp[0]; } } } } std::vector<segmentPair> SeedAndFilter (std::vector<uint64_t> seed_offset_vector, bool rev, uint32_t buffer){ uint32_t num_hits = 0; uint32_t total_anchors = 0; uint32_t num_seeds = seed_offset_vector.size(); assert(num_seeds <= MAX_SEEDS); uint64_t* tmp_offset = (uint64_t*) malloc(num_seeds*sizeof(uint64_t)); for (uint32_t i = 0; i < num_seeds; i++) { tmp_offset[i] = seed_offset_vector[i]; } int g; std::unique_lock<std::mutex> locker(mu); if (available_gpus.empty()) { cv.wait(locker, [](){return !available_gpus.empty();}); } g = available_gpus.back(); available_gpus.pop_back(); locker.unlock(); check_cuda_setDevice(g, "SeedAndFilter"); check_cuda_memcpy((void*)d_seed_offsets[g], (void*)tmp_offset, num_seeds*sizeof(uint64_t), hipMemcpyHostToDevice, "seed_offsets"); hipLaunchKernelGGL(( find_num_hits) , dim3(MAX_BLOCKS), dim3(MAX_THREADS), 0, 0, num_seeds, d_index_table[g], d_seed_offsets[g], d_hit_num_array[g]); thrust::inclusive_scan(d_hit_num_vec[g].begin(), d_hit_num_vec[g].begin() + num_seeds, d_hit_num_vec[g].begin()); check_cuda_memcpy((void*)&num_hits, (void*)(d_hit_num_array[g]+num_seeds-1), sizeof(uint32_t), hipMemcpyDeviceToHost, "num_hits"); int num_iter = num_hits/MAX_HITS+1; uint32_t iter_hit_limit = MAX_HITS; thrust::device_vector<uint32_t> limit_pos (num_iter); for(int i = 0; i < num_iter-1; i++){ thrust::device_vector<uint32_t>::iterator result_end = thrust::lower_bound(d_hit_num_vec[g].begin(), d_hit_num_vec[g].begin()+num_seeds, iter_hit_limit); uint32_t pos = thrust::distance(d_hit_num_vec[g].begin(), result_end)-1; iter_hit_limit = d_hit_num_vec[g][pos]+MAX_HITS; limit_pos[i] = pos; } limit_pos[num_iter-1] = num_seeds-1; segmentPair** h_hsp = (segmentPair**) malloc(num_iter*sizeof(segmentPair*)); uint32_t* num_anchors = (uint32_t*) calloc(num_iter, sizeof(uint32_t)); uint32_t start_seed_index = 0; uint32_t start_hit_val = 0; uint32_t iter_num_seeds, iter_num_hits; if(num_hits > 0){ for(int i = 0; i < num_iter; i++){ iter_num_seeds = limit_pos[i] + 1 - start_seed_index; iter_num_hits = d_hit_num_vec[g][limit_pos[i]] - start_hit_val; hipLaunchKernelGGL(( find_hits) , dim3(iter_num_seeds), dim3(BLOCK_SIZE), 0, 0, d_index_table[g], d_pos_table[g], d_seed_offsets[g], seed_size, d_hit_num_array[g], iter_num_hits, d_hsp[g], start_seed_index, start_hit_val); if(rev){ hipLaunchKernelGGL(( find_hsps) , dim3(1024), dim3(BLOCK_SIZE), 0, 0, d_ref_seq[g], d_query_rc_seq[buffer*NUM_DEVICES+g], ref_len, query_length[buffer], d_sub_mat[g], noentropy, xdrop, hspthresh, iter_num_hits, d_hsp[g], d_done[g]); } else{ hipLaunchKernelGGL(( find_hsps) , dim3(1024), dim3(BLOCK_SIZE), 0, 0, d_ref_seq[g], d_query_seq[buffer*NUM_DEVICES+g], ref_len, query_length[buffer], d_sub_mat[g], noentropy, xdrop, hspthresh, iter_num_hits, d_hsp[g], d_done[g]); } thrust::inclusive_scan(d_done_vec[g].begin(), d_done_vec[g].begin() + iter_num_hits, d_done_vec[g].begin()); check_cuda_memcpy((void*)&num_anchors[i], (void*)(d_done[g]+iter_num_hits-1), sizeof(uint32_t), hipMemcpyDeviceToHost, "num_anchors"); if(num_anchors[i] > 0){ hipLaunchKernelGGL(( compress_output) , dim3(MAX_BLOCKS), dim3(MAX_THREADS), 0, 0, d_done[g], d_hsp[g], d_hsp_reduced[g], iter_num_hits); thrust::stable_sort(d_hsp_reduced_vec[g].begin(), d_hsp_reduced_vec[g].begin()+num_anchors[i], hspComp()); thrust::device_vector<segmentPair>::iterator result_end = thrust::unique_copy(d_hsp_reduced_vec[g].begin(), d_hsp_reduced_vec[g].begin()+num_anchors[i], d_hsp_vec[g].begin(), hspEqual()); num_anchors[i] = thrust::distance(d_hsp_vec[g].begin(), result_end), num_anchors[i]; total_anchors += num_anchors[i]; h_hsp[i] = (segmentPair*) calloc(num_anchors[i], sizeof(segmentPair)); check_cuda_memcpy((void*)h_hsp[i], (void*)d_hsp[g], num_anchors[i]*sizeof(segmentPair), hipMemcpyDeviceToHost, "hsp_output"); } start_seed_index = limit_pos[i] + 1; start_hit_val = d_hit_num_vec[g][limit_pos[i]]; } } limit_pos.clear(); { std::unique_lock<std::mutex> locker(mu); available_gpus.push_back(g); locker.unlock(); cv.notify_one(); } std::vector<segmentPair> gpu_filter_output; segmentPair first_el; first_el.len = total_anchors; first_el.score = num_hits; gpu_filter_output.push_back(first_el); if(total_anchors > 0){ for(int it = 0; it < num_iter; it++){ for(int i = 0; i < num_anchors[it]; i++){ gpu_filter_output.push_back(h_hsp[it][i]); } if(num_anchors[it] > 0){ free(h_hsp[it]); } } } free(h_hsp); free(num_anchors); free(tmp_offset); return gpu_filter_output; } void InitializeProcessor (bool transition, uint32_t WGA_CHUNK, uint32_t input_seed_size, int* sub_mat, int input_xdrop, int input_hspthresh, bool input_noentropy){ hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, 0); float global_mem_gb = static_cast<float>(deviceProp.totalGlobalMem / 1073741824.0f); if(transition) MAX_SEEDS = 13*WGA_CHUNK; else MAX_SEEDS = WGA_CHUNK; MAX_HITS = MAX_HITS_PER_GB*global_mem_gb; seed_size = input_seed_size; xdrop = input_xdrop; hspthresh = input_hspthresh; noentropy = input_noentropy; d_sub_mat = (int**) malloc(NUM_DEVICES*sizeof(int*)); d_query_seq = (char**) malloc(BUFFER_DEPTH*NUM_DEVICES*sizeof(char*)); d_query_rc_seq = (char**) malloc(BUFFER_DEPTH*NUM_DEVICES*sizeof(char*)); d_seed_offsets = (uint64_t**) malloc(NUM_DEVICES*sizeof(uint64_t*)); d_hit_num_array = (uint32_t**) malloc(NUM_DEVICES*sizeof(uint32_t*)); d_hit_num_vec.reserve(NUM_DEVICES); d_done = (uint32_t**) malloc(NUM_DEVICES*sizeof(uint32_t*)); d_done_vec.reserve(NUM_DEVICES); d_hsp = (segmentPair**) malloc(NUM_DEVICES*sizeof(segmentPair*)); d_hsp_vec.reserve(NUM_DEVICES); d_hsp_reduced = (segmentPair**) malloc(NUM_DEVICES*sizeof(segmentPair*)); d_hsp_reduced_vec.reserve(NUM_DEVICES); segmentPair zeroHsp; zeroHsp.ref_start = 0; zeroHsp.query_start = 0; zeroHsp.len = 0; zeroHsp.score = 0; for(int g = 0; g < NUM_DEVICES; g++){ check_cuda_setDevice(g, "InitializeProcessor"); check_cuda_malloc((void**)&d_sub_mat[g], NUC2*sizeof(int), "sub_mat"); check_cuda_memcpy((void*)d_sub_mat[g], (void*)sub_mat, NUC2*sizeof(int), hipMemcpyHostToDevice, "sub_mat"); check_cuda_malloc((void**)&d_seed_offsets[g], MAX_SEEDS*sizeof(uint64_t), "seed_offsets"); d_hit_num_vec.emplace_back(MAX_SEEDS, 0); d_hit_num_array[g] = thrust::raw_pointer_cast(d_hit_num_vec.at(g).data()); d_done_vec.emplace_back(MAX_HITS, 0); d_done[g] = thrust::raw_pointer_cast(d_done_vec.at(g).data()); d_hsp_vec.emplace_back(MAX_HITS, zeroHsp); d_hsp[g] = thrust::raw_pointer_cast(d_hsp_vec.at(g).data()); d_hsp_reduced_vec.emplace_back(MAX_HITS, zeroHsp); d_hsp_reduced[g] = thrust::raw_pointer_cast(d_hsp_reduced_vec.at(g).data()); available_gpus.push_back(g); } } void SendQueryWriteRequest (size_t start_addr, uint32_t len, uint32_t buffer){ query_length[buffer] = len; for(int g = 0; g < NUM_DEVICES; g++){ check_cuda_setDevice(g, "SendQueryWriteRequest"); char* d_query_seq_tmp; check_cuda_malloc((void**)&d_query_seq_tmp, len*sizeof(char), "tmp query_seq"); check_cuda_memcpy((void*)d_query_seq_tmp, (void*)(query_DRAM->buffer + start_addr), len*sizeof(char), hipMemcpyHostToDevice, "query_seq"); check_cuda_malloc((void**)&d_query_seq[buffer*NUM_DEVICES+g], len*sizeof(char), "query_seq"); check_cuda_malloc((void**)&d_query_rc_seq[buffer*NUM_DEVICES+g], len*sizeof(char), "query_rc_seq"); hipLaunchKernelGGL(( compress_string_rev_comp) , dim3(MAX_BLOCKS), dim3(MAX_THREADS), 0, 0, len, d_query_seq_tmp, d_query_seq[buffer*NUM_DEVICES+g], d_query_rc_seq[buffer*NUM_DEVICES+g]); check_cuda_free((void*)d_query_seq_tmp, "d_query_seq_tmp"); } } void ClearQuery(uint32_t buffer){ for(int g = 0; g < NUM_DEVICES; g++){ check_cuda_setDevice(g, "ClearQuery"); check_cuda_free((void*)d_query_seq[buffer*NUM_DEVICES+g], "d_query_seq"); check_cuda_free((void*)d_query_rc_seq[buffer*NUM_DEVICES+g], "d_query_rc_seq"); } } void ShutdownProcessor(){ d_done_vec.clear(); d_hit_num_vec.clear(); d_hsp_vec.clear(); d_hsp_reduced_vec.clear(); hipDeviceReset(); } InitializeProcessor_ptr g_InitializeProcessor = InitializeProcessor; SendQueryWriteRequest_ptr g_SendQueryWriteRequest = SendQueryWriteRequest; SeedAndFilter_ptr g_SeedAndFilter = SeedAndFilter; ClearQuery_ptr g_ClearQuery = ClearQuery; ShutdownProcessor_ptr g_ShutdownProcessor = ShutdownProcessor;
67a9465e2be09193cdaaa4091c37dd6810bb02a3.cu
#include <thrust/binary_search.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/scan.h> #include <thrust/unique.h> #include "cuda_utils.h" #include "parameters.h" #include "seed_filter.h" #include "seed_filter_interface.h" #include "store.h" #include "store_gpu.h" // Each segmentPair is 16B // With 64MB for the HSPs array per 1GB GPU memory // With higher GPU memory, the size just linearly increases #define MAX_HITS_PER_GB 4194304 int MAX_SEEDS; int MAX_HITS; uint32_t seed_size; int **d_sub_mat; int xdrop; int hspthresh; bool noentropy; char** d_query_seq; char** d_query_rc_seq; uint32_t query_length[BUFFER_DEPTH]; uint64_t** d_seed_offsets; uint32_t** d_hit_num_array; std::vector<thrust::device_vector<uint32_t> > d_hit_num_vec; uint32_t** d_done; std::vector<thrust::device_vector<uint32_t> > d_done_vec; segmentPair** d_hsp; std::vector<thrust::device_vector<segmentPair> > d_hsp_vec; segmentPair** d_hsp_reduced; std::vector<thrust::device_vector<segmentPair> > d_hsp_reduced_vec; struct hspEqual{ __host__ __device__ bool operator()(segmentPair x, segmentPair y){ return ( ( (x.ref_start - x.query_start) == (y.ref_start - y.query_start) ) && ( ( (x.ref_start >= y.ref_start) && ( (x.ref_start + x.len) <= (y.ref_start + y.len) ) ) || ( ( y.ref_start >= x.ref_start ) && ( (y.ref_start + y.len) <= (x.ref_start + x.len) ) ) ) ); } }; struct hspComp{ __host__ __device__ bool operator()(segmentPair x, segmentPair y){ if((x.ref_start - x.query_start) < (y.ref_start - y.query_start)) return true; else if((x.ref_start - x.query_start) == (y.ref_start - y.query_start)){ if(x.ref_start < y.ref_start) return true; else if(x.ref_start == y.ref_start){ if(x.len < y.len) return true; else if(x.len == y.len){ if(x.score > y.score) return true; else return false; } else return false; } else return false; } else return false; } }; __global__ void compress_string_rev_comp (uint32_t len, char* src_seq, char* dst_seq, char* dst_seq_rc){ int thread_id = threadIdx.x; int block_dim = blockDim.x; int grid_dim = gridDim.x; int block_id = blockIdx.x; int stride = block_dim * grid_dim; uint32_t start = block_dim * block_id + thread_id; for (uint32_t i = start; i < len; i += stride) { char ch = src_seq[i]; char dst = X_NT; char dst_rc = X_NT; if (ch == 'A'){ dst = A_NT; dst_rc = T_NT; } else if (ch == 'C'){ dst = C_NT; dst_rc = G_NT; } else if (ch == 'G'){ dst = G_NT; dst_rc = C_NT; } else if (ch == 'T'){ dst = T_NT; dst_rc = A_NT; } else if ((ch == 'a') || (ch == 'c') || (ch == 'g') || (ch == 't')){ dst = L_NT; dst_rc = L_NT; } else if ((ch == 'n') || (ch == 'N')){ dst = N_NT; dst_rc = N_NT; } else if (ch == '&'){ dst = E_NT; dst_rc = E_NT; } dst_seq[i] = dst; dst_seq_rc[len -1 -i] = dst_rc; } } __global__ void find_num_hits (int num_seeds, const uint32_t* __restrict__ d_index_table, uint64_t* seed_offsets, uint32_t* seed_hit_num){ int thread_id = threadIdx.x; int block_dim = blockDim.x; int grid_dim = gridDim.x; int block_id = blockIdx.x; int stride = block_dim * grid_dim; uint32_t start = block_dim * block_id + thread_id; uint32_t num_seed_hit; uint32_t seed; for (uint32_t id = start; id < num_seeds; id += stride) { seed = (seed_offsets[id] >> 32); // start and end from the seed block_id table num_seed_hit = d_index_table[seed]; if (seed > 0){ num_seed_hit -= d_index_table[seed-1]; } seed_hit_num[id] = num_seed_hit; } } __global__ void find_hits (const uint32_t* __restrict__ d_index_table, const uint32_t* __restrict__ d_pos_table, uint64_t* d_seed_offsets, uint32_t seed_size, uint32_t* seed_hit_num, int num_hits, segmentPair* d_hsp, uint32_t start_seed_index, uint32_t start_hit_index){ int thread_id = threadIdx.x; int block_id = blockIdx.x; int warp_size = warpSize; int lane_id = thread_id%warp_size; int warp_id = (thread_id-lane_id)/warp_size; __shared__ uint32_t start, end; __shared__ uint32_t seed; __shared__ uint64_t seed_offset; __shared__ uint32_t ref_loc[NUM_WARPS]; __shared__ uint32_t query_loc; __shared__ uint32_t seed_hit_prefix; if(thread_id == 0){ seed_offset = d_seed_offsets[block_id+start_seed_index]; seed = (seed_offset >> 32); query_loc = ((seed_offset << 32) >> 32) + seed_size; // start and end from the seed block_id table end = d_index_table[seed]; start = 0; if (seed > 0){ start = d_index_table[seed-1]; } seed_hit_prefix = seed_hit_num[block_id+start_seed_index]; } __syncthreads(); for (int id1 = start; id1 < end; id1 += NUM_WARPS) { if(id1+warp_id < end){ if(lane_id == 0){ ref_loc[warp_id] = d_pos_table[id1+warp_id] + seed_size; int dram_address = seed_hit_prefix -id1 - warp_id+start-1-start_hit_index; d_hsp[dram_address].ref_start = ref_loc[warp_id]; d_hsp[dram_address].query_start = query_loc; d_hsp[dram_address].len = 0; d_hsp[dram_address].score = 0; } } } } __global__ void find_hsps (const char* __restrict__ d_ref_seq, const char* __restrict__ d_query_seq, uint32_t ref_len, uint32_t query_len, int *d_sub_mat, bool noentropy, int xdrop, int hspthresh, int num_hits, segmentPair* d_hsp, uint32_t* d_done){ int thread_id = threadIdx.x; int block_id = blockIdx.x; int num_blocks = gridDim.x; int warp_size = warpSize; int lane_id = thread_id%warp_size; int warp_id = (thread_id-lane_id)/warp_size; __shared__ uint32_t ref_loc[NUM_WARPS]; __shared__ uint32_t query_loc[NUM_WARPS]; __shared__ int total_score[NUM_WARPS]; __shared__ int prev_score[NUM_WARPS]; __shared__ int prev_max_score[NUM_WARPS]; __shared__ int prev_max_pos[NUM_WARPS]; __shared__ bool edge_found[NUM_WARPS]; __shared__ bool xdrop_found[NUM_WARPS]; __shared__ bool new_max_found[NUM_WARPS]; __shared__ uint32_t left_extent[NUM_WARPS]; __shared__ int extent[NUM_WARPS]; __shared__ uint32_t tile[NUM_WARPS]; __shared__ double entropy[NUM_WARPS]; int thread_score; int max_thread_score; int max_pos; int temp_pos; bool xdrop_done; bool temp_xdrop_done; int temp; short count[4]; short count_del[4]; char r_chr; char q_chr; uint32_t ref_pos; uint32_t query_pos; int pos_offset; __shared__ int sub_mat[NUC2]; if(thread_id < NUC2){ sub_mat[thread_id] = d_sub_mat[thread_id]; } __syncthreads(); for(int hid0 = block_id*NUM_WARPS; hid0 < num_hits; hid0 += NUM_WARPS*num_blocks){ int hid = hid0 + warp_id; if(hid < num_hits){ if(lane_id == 0){ ref_loc[warp_id] = d_hsp[hid].ref_start; query_loc[warp_id] = d_hsp[hid].query_start; total_score[warp_id] = 0; } } else{ if(lane_id == 0){ ref_loc[warp_id] = d_hsp[hid0].ref_start; query_loc[warp_id] = d_hsp[hid0].query_start; total_score[warp_id] = 0; } } __syncwarp(); ////////////////////////////////////////////////////////////////// //Right extension if(lane_id ==0){ tile[warp_id] = 0; xdrop_found[warp_id] = false; edge_found[warp_id] = false; new_max_found[warp_id] = false; entropy[warp_id] = 1.0f; prev_score[warp_id] = 0; prev_max_score[warp_id] = 0; prev_max_pos[warp_id] = -1; extent[warp_id] = 0; } count[0] = 0; count[1] = 0; count[2] = 0; count[3] = 0; count_del[0] = 0; count_del[1] = 0; count_del[2] = 0; count_del[3] = 0; max_pos = 0; __syncwarp(); while(!xdrop_found[warp_id] && !edge_found[warp_id]){ pos_offset = lane_id + tile[warp_id]; ref_pos = ref_loc[warp_id] + pos_offset; query_pos = query_loc[warp_id] + pos_offset; thread_score = 0; if(ref_pos < ref_len && query_pos < query_len){ r_chr = d_ref_seq[ref_pos]; q_chr = d_query_seq[query_pos]; thread_score = sub_mat[r_chr*NUC+q_chr]; } __syncwarp(); #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ temp = __shfl_up_sync(0xFFFFFFFF, thread_score, offset); if(lane_id >= offset){ thread_score += temp; } } thread_score += prev_score[warp_id]; if(thread_score > prev_max_score[warp_id]){ max_thread_score = thread_score; max_pos = pos_offset; } else{ max_thread_score = prev_max_score[warp_id]; max_pos = prev_max_pos[warp_id]; } __syncwarp(); #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ temp = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset); temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset); if(lane_id >= offset){ if(temp >= max_thread_score){ max_thread_score = temp; max_pos = temp_pos; } } } xdrop_done = ((max_thread_score-thread_score) > xdrop); __syncwarp(); #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ temp_xdrop_done = __shfl_up_sync(0xFFFFFFFF, xdrop_done, offset); if(lane_id >= offset){ xdrop_done |= temp_xdrop_done; } } if(xdrop_done == 1){ max_thread_score = prev_max_score[warp_id]; max_pos = prev_max_pos[warp_id]; } __syncwarp(); #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ temp = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset); temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset); if(lane_id >= offset){ if(temp >= max_thread_score){ max_thread_score = temp; max_pos = temp_pos; } } } __syncwarp(); if(lane_id == warp_size-1){ if(max_pos > prev_max_pos[warp_id]) new_max_found[warp_id] = true; else new_max_found[warp_id] = false; if(xdrop_done){ total_score[warp_id] += max_thread_score; xdrop_found[warp_id] = true; extent[warp_id] = max_pos; prev_max_pos[warp_id] = max_pos; tile[warp_id] = max_pos; } else if(ref_pos >= ref_len || query_pos >= query_len){ total_score[warp_id] += max_thread_score; edge_found[warp_id] = true; extent[warp_id] = max_pos; prev_max_pos[warp_id] = max_pos; tile[warp_id] = max_pos; } else{ prev_score[warp_id] = thread_score; prev_max_score[warp_id] = max_thread_score; prev_max_pos[warp_id] = max_pos; tile[warp_id]+= warp_size; } } __syncwarp(); if(new_max_found[warp_id]){ for(int i = 0; i < 4; i++){ count[i] = count[i] + count_del[i]; count_del[i] = 0; } } __syncwarp(); if(r_chr == q_chr){ if(pos_offset <= prev_max_pos[warp_id]){ count[r_chr] += 1; } else{ count_del[r_chr] += 1; } } __syncwarp(); } __syncwarp(); //////////////////////////////////////////////////////////////// //Left extension if(lane_id ==0){ tile[warp_id] = 0; xdrop_found[warp_id] = false; edge_found[warp_id] = false; new_max_found[warp_id] = false; prev_score[warp_id] = 0; prev_max_score[warp_id] = 0; prev_max_pos[warp_id] = 0; left_extent[warp_id] = 0; } count_del[0] = 0; count_del[1] = 0; count_del[2] = 0; count_del[3] = 0; max_pos = 0; __syncwarp(); while(!xdrop_found[warp_id] && !edge_found[warp_id]){ pos_offset = lane_id+1+tile[warp_id]; thread_score = 0; if(ref_loc[warp_id] >= pos_offset && query_loc[warp_id] >= pos_offset){ ref_pos = ref_loc[warp_id] - pos_offset; query_pos = query_loc[warp_id] - pos_offset; r_chr = d_ref_seq[ref_pos]; q_chr = d_query_seq[query_pos]; thread_score = sub_mat[r_chr*NUC+q_chr]; } #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ temp = __shfl_up_sync(0xFFFFFFFF, thread_score, offset); if(lane_id >= offset){ thread_score += temp; } } thread_score += prev_score[warp_id]; if(thread_score > prev_max_score[warp_id]){ max_thread_score = thread_score; max_pos = pos_offset; } else{ max_thread_score = prev_max_score[warp_id]; max_pos = prev_max_pos[warp_id]; } __syncwarp(); #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ temp = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset); temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset); if(lane_id >= offset){ if(temp >= max_thread_score){ max_thread_score = temp; max_pos = temp_pos; } } } xdrop_done = ((max_thread_score-thread_score) > xdrop); __syncwarp(); #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ temp_xdrop_done = __shfl_up_sync(0xFFFFFFFF, xdrop_done, offset); if(lane_id >= offset){ xdrop_done |= temp_xdrop_done; } } if(xdrop_done){ max_thread_score = prev_max_score[warp_id]; max_pos = prev_max_pos[warp_id]; } __syncwarp(); #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ temp = __shfl_up_sync(0xFFFFFFFF, max_thread_score, offset); temp_pos = __shfl_up_sync(0xFFFFFFFF, max_pos, offset); if(lane_id >= offset){ if(temp >= max_thread_score){ max_thread_score = temp; max_pos = temp_pos; } } } __syncwarp(); if(lane_id == warp_size-1){ if(max_pos > prev_max_pos[warp_id]) new_max_found[warp_id] = true; else new_max_found[warp_id] = false; if(xdrop_done){ total_score[warp_id]+=max_thread_score; xdrop_found[warp_id] = true; left_extent[warp_id] = max_pos; extent[warp_id] += left_extent[warp_id]; prev_max_pos[warp_id] = max_pos; tile[warp_id] = max_pos; } else if(ref_loc[warp_id] < pos_offset || query_loc[warp_id] < pos_offset){ total_score[warp_id]+=max_thread_score; edge_found[warp_id] = true; left_extent[warp_id] = max_pos; extent[warp_id] += left_extent[warp_id]; prev_max_pos[warp_id] = max_pos; tile[warp_id] = max_pos; } else{ prev_score[warp_id] = thread_score; prev_max_score[warp_id] = max_thread_score; prev_max_pos[warp_id] = max_pos; tile[warp_id]+=warp_size; } } __syncwarp(); if(new_max_found[warp_id]){ for(int i = 0; i < 4; i++){ count[i] = count[i] + count_del[i]; count_del[i] = 0; } } __syncwarp(); if(r_chr == q_chr){ if(pos_offset <= prev_max_pos[warp_id]){ count[r_chr] += 1; } else{ count_del[r_chr] += 1; } } __syncwarp(); } ////////////////////////////////////////////////////////////////// if(total_score[warp_id] >= hspthresh && total_score[warp_id] <= 3*hspthresh && !noentropy){ for(int i = 0; i < 4; i++){ #pragma unroll for (int offset = 1; offset < warp_size; offset = offset << 1){ count[i] += __shfl_up_sync(0xFFFFFFFF, count[i], offset); } } __syncwarp(); if(lane_id == warp_size-1 && ((count[0]+count[1]+count[2]+count[3]) >= 20)){ entropy[warp_id] = 0.f; for(int i = 0; i < 4; i++){ entropy[warp_id] += ((double) count[i])/((double) (extent[warp_id]+1)) * ((count[i] != 0) ? log(((double) count[i]) / ((double) (extent[warp_id]+1))): 0.f); } entropy[warp_id] = -entropy[warp_id]/log(4.0f); } } __syncwarp(); ////////////////////////////////////////////////////////////////// if(hid < num_hits){ if(lane_id == 0){ if( ((int) (((float) total_score[warp_id]) * entropy[warp_id])) >= hspthresh){ d_hsp[hid].ref_start = ref_loc[warp_id] - left_extent[warp_id]; d_hsp[hid].query_start = query_loc[warp_id] - left_extent[warp_id]; d_hsp[hid].len = extent[warp_id]; if(entropy[warp_id] > 0) d_hsp[hid].score = total_score[warp_id]*entropy[warp_id]; d_done[hid] = 1; } else{ d_hsp[hid].ref_start = ref_loc[warp_id]; d_hsp[hid].query_start = query_loc[warp_id]; d_hsp[hid].len = 0; d_hsp[hid].score = 0; d_done[hid] = 0; } } } __syncwarp(); } } __global__ void compress_output (uint32_t* d_done, segmentPair* d_hsp, segmentPair* d_hsp_reduced, int num_hits){ int thread_id = threadIdx.x; int block_dim = blockDim.x; int grid_dim = gridDim.x; int block_id = blockIdx.x; int stride = block_dim * grid_dim; uint32_t start = block_dim * block_id + thread_id; int index = 0; for (uint32_t id = start; id < num_hits; id += stride) { index = d_done[id]; if(id > 0){ if(index > d_done[id-1]){ d_hsp_reduced[index-1] = d_hsp[id]; } } else{ if(index == 1){ d_hsp_reduced[0] = d_hsp[0]; } } } } std::vector<segmentPair> SeedAndFilter (std::vector<uint64_t> seed_offset_vector, bool rev, uint32_t buffer){ uint32_t num_hits = 0; uint32_t total_anchors = 0; uint32_t num_seeds = seed_offset_vector.size(); assert(num_seeds <= MAX_SEEDS); uint64_t* tmp_offset = (uint64_t*) malloc(num_seeds*sizeof(uint64_t)); for (uint32_t i = 0; i < num_seeds; i++) { tmp_offset[i] = seed_offset_vector[i]; } int g; std::unique_lock<std::mutex> locker(mu); if (available_gpus.empty()) { cv.wait(locker, [](){return !available_gpus.empty();}); } g = available_gpus.back(); available_gpus.pop_back(); locker.unlock(); check_cuda_setDevice(g, "SeedAndFilter"); check_cuda_memcpy((void*)d_seed_offsets[g], (void*)tmp_offset, num_seeds*sizeof(uint64_t), cudaMemcpyHostToDevice, "seed_offsets"); find_num_hits <<<MAX_BLOCKS, MAX_THREADS>>> (num_seeds, d_index_table[g], d_seed_offsets[g], d_hit_num_array[g]); thrust::inclusive_scan(d_hit_num_vec[g].begin(), d_hit_num_vec[g].begin() + num_seeds, d_hit_num_vec[g].begin()); check_cuda_memcpy((void*)&num_hits, (void*)(d_hit_num_array[g]+num_seeds-1), sizeof(uint32_t), cudaMemcpyDeviceToHost, "num_hits"); int num_iter = num_hits/MAX_HITS+1; uint32_t iter_hit_limit = MAX_HITS; thrust::device_vector<uint32_t> limit_pos (num_iter); for(int i = 0; i < num_iter-1; i++){ thrust::device_vector<uint32_t>::iterator result_end = thrust::lower_bound(d_hit_num_vec[g].begin(), d_hit_num_vec[g].begin()+num_seeds, iter_hit_limit); uint32_t pos = thrust::distance(d_hit_num_vec[g].begin(), result_end)-1; iter_hit_limit = d_hit_num_vec[g][pos]+MAX_HITS; limit_pos[i] = pos; } limit_pos[num_iter-1] = num_seeds-1; segmentPair** h_hsp = (segmentPair**) malloc(num_iter*sizeof(segmentPair*)); uint32_t* num_anchors = (uint32_t*) calloc(num_iter, sizeof(uint32_t)); uint32_t start_seed_index = 0; uint32_t start_hit_val = 0; uint32_t iter_num_seeds, iter_num_hits; if(num_hits > 0){ for(int i = 0; i < num_iter; i++){ iter_num_seeds = limit_pos[i] + 1 - start_seed_index; iter_num_hits = d_hit_num_vec[g][limit_pos[i]] - start_hit_val; find_hits <<<iter_num_seeds, BLOCK_SIZE>>> (d_index_table[g], d_pos_table[g], d_seed_offsets[g], seed_size, d_hit_num_array[g], iter_num_hits, d_hsp[g], start_seed_index, start_hit_val); if(rev){ find_hsps <<<1024, BLOCK_SIZE>>> (d_ref_seq[g], d_query_rc_seq[buffer*NUM_DEVICES+g], ref_len, query_length[buffer], d_sub_mat[g], noentropy, xdrop, hspthresh, iter_num_hits, d_hsp[g], d_done[g]); } else{ find_hsps <<<1024, BLOCK_SIZE>>> (d_ref_seq[g], d_query_seq[buffer*NUM_DEVICES+g], ref_len, query_length[buffer], d_sub_mat[g], noentropy, xdrop, hspthresh, iter_num_hits, d_hsp[g], d_done[g]); } thrust::inclusive_scan(d_done_vec[g].begin(), d_done_vec[g].begin() + iter_num_hits, d_done_vec[g].begin()); check_cuda_memcpy((void*)&num_anchors[i], (void*)(d_done[g]+iter_num_hits-1), sizeof(uint32_t), cudaMemcpyDeviceToHost, "num_anchors"); if(num_anchors[i] > 0){ compress_output <<<MAX_BLOCKS, MAX_THREADS>>>(d_done[g], d_hsp[g], d_hsp_reduced[g], iter_num_hits); thrust::stable_sort(d_hsp_reduced_vec[g].begin(), d_hsp_reduced_vec[g].begin()+num_anchors[i], hspComp()); thrust::device_vector<segmentPair>::iterator result_end = thrust::unique_copy(d_hsp_reduced_vec[g].begin(), d_hsp_reduced_vec[g].begin()+num_anchors[i], d_hsp_vec[g].begin(), hspEqual()); num_anchors[i] = thrust::distance(d_hsp_vec[g].begin(), result_end), num_anchors[i]; total_anchors += num_anchors[i]; h_hsp[i] = (segmentPair*) calloc(num_anchors[i], sizeof(segmentPair)); check_cuda_memcpy((void*)h_hsp[i], (void*)d_hsp[g], num_anchors[i]*sizeof(segmentPair), cudaMemcpyDeviceToHost, "hsp_output"); } start_seed_index = limit_pos[i] + 1; start_hit_val = d_hit_num_vec[g][limit_pos[i]]; } } limit_pos.clear(); { std::unique_lock<std::mutex> locker(mu); available_gpus.push_back(g); locker.unlock(); cv.notify_one(); } std::vector<segmentPair> gpu_filter_output; segmentPair first_el; first_el.len = total_anchors; first_el.score = num_hits; gpu_filter_output.push_back(first_el); if(total_anchors > 0){ for(int it = 0; it < num_iter; it++){ for(int i = 0; i < num_anchors[it]; i++){ gpu_filter_output.push_back(h_hsp[it][i]); } if(num_anchors[it] > 0){ free(h_hsp[it]); } } } free(h_hsp); free(num_anchors); free(tmp_offset); return gpu_filter_output; } void InitializeProcessor (bool transition, uint32_t WGA_CHUNK, uint32_t input_seed_size, int* sub_mat, int input_xdrop, int input_hspthresh, bool input_noentropy){ cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); float global_mem_gb = static_cast<float>(deviceProp.totalGlobalMem / 1073741824.0f); if(transition) MAX_SEEDS = 13*WGA_CHUNK; else MAX_SEEDS = WGA_CHUNK; MAX_HITS = MAX_HITS_PER_GB*global_mem_gb; seed_size = input_seed_size; xdrop = input_xdrop; hspthresh = input_hspthresh; noentropy = input_noentropy; d_sub_mat = (int**) malloc(NUM_DEVICES*sizeof(int*)); d_query_seq = (char**) malloc(BUFFER_DEPTH*NUM_DEVICES*sizeof(char*)); d_query_rc_seq = (char**) malloc(BUFFER_DEPTH*NUM_DEVICES*sizeof(char*)); d_seed_offsets = (uint64_t**) malloc(NUM_DEVICES*sizeof(uint64_t*)); d_hit_num_array = (uint32_t**) malloc(NUM_DEVICES*sizeof(uint32_t*)); d_hit_num_vec.reserve(NUM_DEVICES); d_done = (uint32_t**) malloc(NUM_DEVICES*sizeof(uint32_t*)); d_done_vec.reserve(NUM_DEVICES); d_hsp = (segmentPair**) malloc(NUM_DEVICES*sizeof(segmentPair*)); d_hsp_vec.reserve(NUM_DEVICES); d_hsp_reduced = (segmentPair**) malloc(NUM_DEVICES*sizeof(segmentPair*)); d_hsp_reduced_vec.reserve(NUM_DEVICES); segmentPair zeroHsp; zeroHsp.ref_start = 0; zeroHsp.query_start = 0; zeroHsp.len = 0; zeroHsp.score = 0; for(int g = 0; g < NUM_DEVICES; g++){ check_cuda_setDevice(g, "InitializeProcessor"); check_cuda_malloc((void**)&d_sub_mat[g], NUC2*sizeof(int), "sub_mat"); check_cuda_memcpy((void*)d_sub_mat[g], (void*)sub_mat, NUC2*sizeof(int), cudaMemcpyHostToDevice, "sub_mat"); check_cuda_malloc((void**)&d_seed_offsets[g], MAX_SEEDS*sizeof(uint64_t), "seed_offsets"); d_hit_num_vec.emplace_back(MAX_SEEDS, 0); d_hit_num_array[g] = thrust::raw_pointer_cast(d_hit_num_vec.at(g).data()); d_done_vec.emplace_back(MAX_HITS, 0); d_done[g] = thrust::raw_pointer_cast(d_done_vec.at(g).data()); d_hsp_vec.emplace_back(MAX_HITS, zeroHsp); d_hsp[g] = thrust::raw_pointer_cast(d_hsp_vec.at(g).data()); d_hsp_reduced_vec.emplace_back(MAX_HITS, zeroHsp); d_hsp_reduced[g] = thrust::raw_pointer_cast(d_hsp_reduced_vec.at(g).data()); available_gpus.push_back(g); } } void SendQueryWriteRequest (size_t start_addr, uint32_t len, uint32_t buffer){ query_length[buffer] = len; for(int g = 0; g < NUM_DEVICES; g++){ check_cuda_setDevice(g, "SendQueryWriteRequest"); char* d_query_seq_tmp; check_cuda_malloc((void**)&d_query_seq_tmp, len*sizeof(char), "tmp query_seq"); check_cuda_memcpy((void*)d_query_seq_tmp, (void*)(query_DRAM->buffer + start_addr), len*sizeof(char), cudaMemcpyHostToDevice, "query_seq"); check_cuda_malloc((void**)&d_query_seq[buffer*NUM_DEVICES+g], len*sizeof(char), "query_seq"); check_cuda_malloc((void**)&d_query_rc_seq[buffer*NUM_DEVICES+g], len*sizeof(char), "query_rc_seq"); compress_string_rev_comp <<<MAX_BLOCKS, MAX_THREADS>>> (len, d_query_seq_tmp, d_query_seq[buffer*NUM_DEVICES+g], d_query_rc_seq[buffer*NUM_DEVICES+g]); check_cuda_free((void*)d_query_seq_tmp, "d_query_seq_tmp"); } } void ClearQuery(uint32_t buffer){ for(int g = 0; g < NUM_DEVICES; g++){ check_cuda_setDevice(g, "ClearQuery"); check_cuda_free((void*)d_query_seq[buffer*NUM_DEVICES+g], "d_query_seq"); check_cuda_free((void*)d_query_rc_seq[buffer*NUM_DEVICES+g], "d_query_rc_seq"); } } void ShutdownProcessor(){ d_done_vec.clear(); d_hit_num_vec.clear(); d_hsp_vec.clear(); d_hsp_reduced_vec.clear(); cudaDeviceReset(); } InitializeProcessor_ptr g_InitializeProcessor = InitializeProcessor; SendQueryWriteRequest_ptr g_SendQueryWriteRequest = SendQueryWriteRequest; SeedAndFilter_ptr g_SeedAndFilter = SeedAndFilter; ClearQuery_ptr g_ClearQuery = ClearQuery; ShutdownProcessor_ptr g_ShutdownProcessor = ShutdownProcessor;
cf1b207b228195bffdbca070360bb78ba982c336.hip
// !!! This is a file automatically generated by hipify!!! #define USE_MNIST_LOADER #define MNIST_DOUBLE #include "mnist.h" #include "layer.h" #include <hip/hip_runtime.h> #include <cstdio> #include <time.h> static mnist_data *train_set, *test_set; static unsigned int train_cnt, test_cnt; // Define layers of CNN static Layer l_input = Layer(0, 0, 28*28); static Layer l_c1 = Layer(11*11, 3, 18*18*3);//convolutional layer static Layer l_c2 = Layer(7*7, 3, 12*12*3); // static Layer l_s1 = Layer(4*4, 1, 3*3*3); //pooling static Layer l_f = Layer(3*3*3, 10, 10); // static Layer l_d1 = Layer(10,10,10); //my neural network // static Layer l_d2 = Layer(3,10,10); static void learn(); static unsigned int classify(double data[28][28]); static void test(); static double forward_pass(double data[28][28]); static double back_pass(); static inline void loaddata() { mnist_load("data/train-images.idx3-ubyte", "data/train-labels.idx1-ubyte", &train_set, &train_cnt); mnist_load("data/t10k-images.idx3-ubyte", "data/t10k-labels.idx1-ubyte", &test_set, &test_cnt); } int main(int argc, const char **argv) { srand(time(NULL)); hipError_t err = hipInit(0); if (err != hipSuccess) { fprintf(stderr, "CUDA initialisation failed with error code - %d\n", err); return 1; } loaddata(); learn(); test(); return 0; } // Forward propagation of a single row in dataset static double forward_pass(double data[28][28]) { float input[28][28]; for (int i = 0; i < 28; ++i) { for (int j = 0; j < 28; ++j) { input[i][j] = data[i][j]; } } l_input.clear(); l_c1.clear(); l_c2.clear(); l_s1.clear(); l_f.clear(); // l_d1.clear(); clock_t start, end; start = clock(); /*input data sent*/ int datasize[1] ; datasize[0] = 28; l_input.setOutput((float *)input,(int *)datasize); hipLaunchKernelGGL(( fp_preact_c1), dim3(64), dim3(64), 0, 0, (float*) l_input.output, (float*) l_c1.preact, (float*) l_c1.weight, l_c1.preactsize,l_input.preactsize,11,3); hipLaunchKernelGGL(( fp_bias_c1), dim3(64), dim3(64), 0, 0, (float*)l_c1.preact, l_c1.bias,l_c1.preactsize, 3); hipLaunchKernelGGL(( apply_step_function), dim3(64), dim3(64), 0, 0, l_c1.preact, l_c1.output, l_c1.O); hipLaunchKernelGGL(( fp_preact_c1), dim3(64), dim3(64), 0, 0, (float*) l_c1.output, (float*) l_c2.preact, (float*) l_c2.weight, l_c2.preactsize,l_c1.preactsize,7,l_c2.N); hipLaunchKernelGGL(( fp_bias_c1), dim3(64), dim3(64), 0, 0, (float*)l_c2.preact, l_c2.bias,l_c2.preactsize, l_c2.N); hipLaunchKernelGGL(( apply_step_function), dim3(64), dim3(64), 0, 0, l_c2.preact, l_c2.output, l_c2.O); hipLaunchKernelGGL(( fp_preact_s1), dim3(64), dim3(64), 0, 0, (float*)l_c2.output, (float*)l_s1.preact, (float*)l_s1.weight,l_s1.preactsize,l_c2.preactsize,l_c2.N,4); hipLaunchKernelGGL(( fp_bias_s1), dim3(64), dim3(64), 0, 0, (float*)l_s1.preact, l_s1.bias, l_s1.preactsize,l_c2.N); hipLaunchKernelGGL(( apply_step_function), dim3(64), dim3(64), 0, 0, l_s1.preact, l_s1.output, l_s1.O); hipLaunchKernelGGL(( fp_preact_f), dim3(64), dim3(64), 0, 0, (float* )l_s1.output, l_f.preact, (float*)l_f.weight, l_s1.preactsize, l_c2.N,l_f.O,l_f.preactsize); hipLaunchKernelGGL(( fp_bias_f), dim3(64), dim3(64), 0, 0, l_f.preact, l_f.bias,l_f.O); hipLaunchKernelGGL(( apply_step_function), dim3(64), dim3(64), 0, 0, l_f.preact, l_f.output, l_f.O); // fp_preact_dense<<<64, 64>>>((float*) l_f.output, (float*) l_d1.preact, (float*) l_d1.weight, l_d1.preactsize,l_f.preactsize,10,10); // fp_bias_dense<<<64, 64>>>((float*)l_d1.preact, l_d1.bias,l_d1.preactsize, 10); // apply_step_function<<<64, 64>>>(l_d1.preact, l_d1.output, l_d1.O); end = clock(); return ((double) (end - start)) / CLOCKS_PER_SEC; } // Back propagation to update weights static double back_pass() { clock_t start, end; start = clock(); // bp_weight_dense<<<64, 64>>>((float*)l_d1.d_weight, (float*)l_d1.d_preact, (float*)l_f.output,l_d1.N,10,l_d1.preactsize,l_f.preactsize); // bp_bias_dense<<<64, 64>>>(l_d1.bias, (float*)l_d1.d_preact, l_d1.N, l_d1.preactsize); // // bp_output_dense<<<64, 64>>>((float*)l_f.d_output, (float*)l_d1.weight, (float*)l_d1.d_preact, l_d1.N,10,l_f.N,l_d1.preactsize,l_f.preactsize); // bp_preact_dense<<<64, 64>>>((float*)l_f.d_preact, (float*)l_f.d_output, (float*)l_f.preact,l_f.N,l_f.preactsize); hipLaunchKernelGGL(( bp_weight_f), dim3(64), dim3(64), 0, 0, (float*)l_f.d_weight, l_f.d_preact, (float*)l_s1.output, l_f.O, l_c2.N,l_s1.preactsize); hipLaunchKernelGGL(( bp_bias_f), dim3(64), dim3(64), 0, 0, l_f.bias, l_f.d_preact,l_f.O); hipLaunchKernelGGL(( bp_output_s1), dim3(64), dim3(64), 0, 0, (float*)l_s1.d_output, (float*)l_f.weight, l_f.d_preact, l_f.O,l_c2.N,l_s1.preactsize); hipLaunchKernelGGL(( bp_preact_s1), dim3(64), dim3(64), 0, 0, (float*)l_s1.d_preact, (float*)l_s1.d_output, (float*)l_s1.preact, l_c2.N,l_s1.preactsize); hipLaunchKernelGGL(( bp_weight_s1), dim3(64), dim3(64), 0, 0, (float*)l_s1.d_weight, (float*)l_s1.d_preact, (float*)l_c2.output, l_s1.N,4,l_c2.N,l_s1.preactsize,l_c2.preactsize); hipLaunchKernelGGL(( bp_bias_s1), dim3(64), dim3(64), 0, 0, l_s1.bias, (float*)l_s1.d_preact, l_c2.N, l_s1.preactsize); hipLaunchKernelGGL(( bp_output_c1), dim3(64), dim3(64), 0, 0, (float*)l_c2.d_output, (float*)l_s1.weight, (float*)l_s1.d_preact, l_s1.N,4,l_c2.N,l_s1.preactsize,l_c2.preactsize); hipLaunchKernelGGL(( bp_preact_c1), dim3(64), dim3(64), 0, 0, (float*)l_c2.d_preact, (float*)l_c2.d_output, (float*)l_c2.preact,l_c2.N,l_c2.preactsize); hipLaunchKernelGGL(( bp_weight_c1), dim3(64), dim3(64), 0, 0, (float*)l_c2.d_weight, (float*)l_c2.d_preact, (float*)l_input.output,l_c2.N,7,l_c2.preactsize,l_input.preactsize); hipLaunchKernelGGL(( bp_bias_c1), dim3(64), dim3(64), 0, 0, l_c2.bias, (float*)l_c2.d_preact, l_c2.N, l_c2.preactsize); hipLaunchKernelGGL(( bp_output_c1), dim3(64), dim3(64), 0, 0, (float*)l_c1.d_output, (float*)l_c2.weight, (float*)l_c2.d_preact, l_c2.N,7,l_c1.N,l_c2.preactsize,l_c1.preactsize); hipLaunchKernelGGL(( bp_preact_c1), dim3(64), dim3(64), 0, 0, (float*)l_c1.d_preact, (float*)l_c1.d_output, (float*)l_c1.preact,l_c1.N,l_c1.preactsize); hipLaunchKernelGGL(( bp_weight_c1), dim3(64), dim3(64), 0, 0, (float*)l_c1.d_weight, (float*)l_c1.d_preact, (float*)l_input.output,l_c1.N,11,l_c1.preactsize,l_input.preactsize); hipLaunchKernelGGL(( bp_bias_c1), dim3(64), dim3(64), 0, 0, l_c1.bias, (float*)l_c1.d_preact, l_c1.N, l_c1.preactsize); // apply_grad<<<64, 64>>>(l_d1.weight, l_d1.d_weight, l_d1.M * l_d1.N); hipLaunchKernelGGL(( apply_grad), dim3(64), dim3(64), 0, 0, l_f.weight, l_f.d_weight, l_f.M * l_f.N); hipLaunchKernelGGL(( apply_grad), dim3(64), dim3(64), 0, 0, l_s1.weight, l_s1.d_weight, l_s1.M * l_s1.N); hipLaunchKernelGGL(( apply_grad), dim3(64), dim3(64), 0, 0, l_c2.weight, l_c2.d_weight, l_c2.M * l_c2.N); hipLaunchKernelGGL(( apply_grad), dim3(64), dim3(64), 0, 0, l_c1.weight, l_c1.d_weight, l_c1.M * l_c1.N); end = clock(); return ((double) (end - start)) / CLOCKS_PER_SEC; } // Unfold the input layer static void unfold_input(double input[28][28], double unfolded[24*24][5*5]) { int a = 0; (void)unfold_input; for (int i = 0; i < 2; ++i) for (int j = 0; j < 2; ++j) { int b = 0; for (int x = i; x < i + 2; ++x) for (int y = j; y < j+2; ++y) unfolded[a][b++] = input[x][y]; a++; } } static void learn() { static hipblasHandle_t blas; hipblasCreate(&blas); float err; int iter = 20; double time_taken = 0.0; fprintf(stdout ,"Learning\n"); while (iter < 0 || iter-- > 0) { err = 0.0f; for (int i = 0; i < train_cnt; ++i) { float tmp_err; time_taken += forward_pass(train_set[i].data); // l_d1.bp_clear(); l_f.bp_clear(); l_s1.bp_clear(); l_c2.bp_clear(); l_c1.bp_clear(); // Euclid distance of train_set[i] hipLaunchKernelGGL(( makeError), dim3(10), dim3(1), 0, 0, l_f.d_preact, l_f.output, train_set[i].label, 10); hipblasSnrm2(blas, 10, l_f.d_preact, 1, &tmp_err); //calculate the norm2 err += tmp_err; time_taken += back_pass(); } err /= train_cnt; fprintf(stdout, "error: %e, time_on_gpu: %lf\n", err, time_taken); if (err < threshold) { fprintf(stdout, "Training complete, error less than threshold\n\n"); break; } } fprintf(stdout, "\n Time - %lf\n", time_taken); } // Returns label of given data (0-9) static unsigned int classify(double data[28][28]) { float res[10]; forward_pass(data); unsigned int max = 0; hipMemcpy(res, l_f.output, sizeof(float) * 10, hipMemcpyDeviceToHost); for (int i = 1; i < 10; ++i) { if (res[max] < res[i]) { max = i; } } return max; } // Perform forward propagation of test data static void test() { int error = 0; for (int i = 0; i < test_cnt; ++i) { if (classify(test_set[i].data) != test_set[i].label) { ++error; } } fprintf(stdout, "Error Rate: %.2lf%%\n", double(error) / double(test_cnt) * 100.0); }
cf1b207b228195bffdbca070360bb78ba982c336.cu
#define USE_MNIST_LOADER #define MNIST_DOUBLE #include "mnist.h" #include "layer.h" #include <cuda.h> #include <cstdio> #include <time.h> static mnist_data *train_set, *test_set; static unsigned int train_cnt, test_cnt; // Define layers of CNN static Layer l_input = Layer(0, 0, 28*28); static Layer l_c1 = Layer(11*11, 3, 18*18*3);//convolutional layer static Layer l_c2 = Layer(7*7, 3, 12*12*3); // static Layer l_s1 = Layer(4*4, 1, 3*3*3); //pooling static Layer l_f = Layer(3*3*3, 10, 10); // static Layer l_d1 = Layer(10,10,10); //my neural network // static Layer l_d2 = Layer(3,10,10); static void learn(); static unsigned int classify(double data[28][28]); static void test(); static double forward_pass(double data[28][28]); static double back_pass(); static inline void loaddata() { mnist_load("data/train-images.idx3-ubyte", "data/train-labels.idx1-ubyte", &train_set, &train_cnt); mnist_load("data/t10k-images.idx3-ubyte", "data/t10k-labels.idx1-ubyte", &test_set, &test_cnt); } int main(int argc, const char **argv) { srand(time(NULL)); CUresult err = cuInit(0); if (err != CUDA_SUCCESS) { fprintf(stderr, "CUDA initialisation failed with error code - %d\n", err); return 1; } loaddata(); learn(); test(); return 0; } // Forward propagation of a single row in dataset static double forward_pass(double data[28][28]) { float input[28][28]; for (int i = 0; i < 28; ++i) { for (int j = 0; j < 28; ++j) { input[i][j] = data[i][j]; } } l_input.clear(); l_c1.clear(); l_c2.clear(); l_s1.clear(); l_f.clear(); // l_d1.clear(); clock_t start, end; start = clock(); /*input data sent*/ int datasize[1] ; datasize[0] = 28; l_input.setOutput((float *)input,(int *)datasize); fp_preact_c1<<<64, 64>>>((float*) l_input.output, (float*) l_c1.preact, (float*) l_c1.weight, l_c1.preactsize,l_input.preactsize,11,3); fp_bias_c1<<<64, 64>>>((float*)l_c1.preact, l_c1.bias,l_c1.preactsize, 3); apply_step_function<<<64, 64>>>(l_c1.preact, l_c1.output, l_c1.O); fp_preact_c1<<<64, 64>>>((float*) l_c1.output, (float*) l_c2.preact, (float*) l_c2.weight, l_c2.preactsize,l_c1.preactsize,7,l_c2.N); fp_bias_c1<<<64, 64>>>((float*)l_c2.preact, l_c2.bias,l_c2.preactsize, l_c2.N); apply_step_function<<<64, 64>>>(l_c2.preact, l_c2.output, l_c2.O); fp_preact_s1<<<64, 64>>>((float*)l_c2.output, (float*)l_s1.preact, (float*)l_s1.weight,l_s1.preactsize,l_c2.preactsize,l_c2.N,4); fp_bias_s1<<<64, 64>>>((float*)l_s1.preact, l_s1.bias, l_s1.preactsize,l_c2.N); apply_step_function<<<64, 64>>>(l_s1.preact, l_s1.output, l_s1.O); fp_preact_f<<<64, 64>>>((float* )l_s1.output, l_f.preact, (float*)l_f.weight, l_s1.preactsize, l_c2.N,l_f.O,l_f.preactsize); fp_bias_f<<<64, 64>>>(l_f.preact, l_f.bias,l_f.O); apply_step_function<<<64, 64>>>(l_f.preact, l_f.output, l_f.O); // fp_preact_dense<<<64, 64>>>((float*) l_f.output, (float*) l_d1.preact, (float*) l_d1.weight, l_d1.preactsize,l_f.preactsize,10,10); // fp_bias_dense<<<64, 64>>>((float*)l_d1.preact, l_d1.bias,l_d1.preactsize, 10); // apply_step_function<<<64, 64>>>(l_d1.preact, l_d1.output, l_d1.O); end = clock(); return ((double) (end - start)) / CLOCKS_PER_SEC; } // Back propagation to update weights static double back_pass() { clock_t start, end; start = clock(); // bp_weight_dense<<<64, 64>>>((float*)l_d1.d_weight, (float*)l_d1.d_preact, (float*)l_f.output,l_d1.N,10,l_d1.preactsize,l_f.preactsize); // bp_bias_dense<<<64, 64>>>(l_d1.bias, (float*)l_d1.d_preact, l_d1.N, l_d1.preactsize); // // bp_output_dense<<<64, 64>>>((float*)l_f.d_output, (float*)l_d1.weight, (float*)l_d1.d_preact, l_d1.N,10,l_f.N,l_d1.preactsize,l_f.preactsize); // bp_preact_dense<<<64, 64>>>((float*)l_f.d_preact, (float*)l_f.d_output, (float*)l_f.preact,l_f.N,l_f.preactsize); bp_weight_f<<<64, 64>>>((float*)l_f.d_weight, l_f.d_preact, (float*)l_s1.output, l_f.O, l_c2.N,l_s1.preactsize); bp_bias_f<<<64, 64>>>(l_f.bias, l_f.d_preact,l_f.O); bp_output_s1<<<64, 64>>>((float*)l_s1.d_output, (float*)l_f.weight, l_f.d_preact, l_f.O,l_c2.N,l_s1.preactsize); bp_preact_s1<<<64, 64>>>((float*)l_s1.d_preact, (float*)l_s1.d_output, (float*)l_s1.preact, l_c2.N,l_s1.preactsize); bp_weight_s1<<<64, 64>>>((float*)l_s1.d_weight, (float*)l_s1.d_preact, (float*)l_c2.output, l_s1.N,4,l_c2.N,l_s1.preactsize,l_c2.preactsize); bp_bias_s1<<<64, 64>>>(l_s1.bias, (float*)l_s1.d_preact, l_c2.N, l_s1.preactsize); bp_output_c1<<<64, 64>>>((float*)l_c2.d_output, (float*)l_s1.weight, (float*)l_s1.d_preact, l_s1.N,4,l_c2.N,l_s1.preactsize,l_c2.preactsize); bp_preact_c1<<<64, 64>>>((float*)l_c2.d_preact, (float*)l_c2.d_output, (float*)l_c2.preact,l_c2.N,l_c2.preactsize); bp_weight_c1<<<64, 64>>>((float*)l_c2.d_weight, (float*)l_c2.d_preact, (float*)l_input.output,l_c2.N,7,l_c2.preactsize,l_input.preactsize); bp_bias_c1<<<64, 64>>>(l_c2.bias, (float*)l_c2.d_preact, l_c2.N, l_c2.preactsize); bp_output_c1<<<64, 64>>>((float*)l_c1.d_output, (float*)l_c2.weight, (float*)l_c2.d_preact, l_c2.N,7,l_c1.N,l_c2.preactsize,l_c1.preactsize); bp_preact_c1<<<64, 64>>>((float*)l_c1.d_preact, (float*)l_c1.d_output, (float*)l_c1.preact,l_c1.N,l_c1.preactsize); bp_weight_c1<<<64, 64>>>((float*)l_c1.d_weight, (float*)l_c1.d_preact, (float*)l_input.output,l_c1.N,11,l_c1.preactsize,l_input.preactsize); bp_bias_c1<<<64, 64>>>(l_c1.bias, (float*)l_c1.d_preact, l_c1.N, l_c1.preactsize); // apply_grad<<<64, 64>>>(l_d1.weight, l_d1.d_weight, l_d1.M * l_d1.N); apply_grad<<<64, 64>>>(l_f.weight, l_f.d_weight, l_f.M * l_f.N); apply_grad<<<64, 64>>>(l_s1.weight, l_s1.d_weight, l_s1.M * l_s1.N); apply_grad<<<64, 64>>>(l_c2.weight, l_c2.d_weight, l_c2.M * l_c2.N); apply_grad<<<64, 64>>>(l_c1.weight, l_c1.d_weight, l_c1.M * l_c1.N); end = clock(); return ((double) (end - start)) / CLOCKS_PER_SEC; } // Unfold the input layer static void unfold_input(double input[28][28], double unfolded[24*24][5*5]) { int a = 0; (void)unfold_input; for (int i = 0; i < 2; ++i) for (int j = 0; j < 2; ++j) { int b = 0; for (int x = i; x < i + 2; ++x) for (int y = j; y < j+2; ++y) unfolded[a][b++] = input[x][y]; a++; } } static void learn() { static cublasHandle_t blas; cublasCreate(&blas); float err; int iter = 20; double time_taken = 0.0; fprintf(stdout ,"Learning\n"); while (iter < 0 || iter-- > 0) { err = 0.0f; for (int i = 0; i < train_cnt; ++i) { float tmp_err; time_taken += forward_pass(train_set[i].data); // l_d1.bp_clear(); l_f.bp_clear(); l_s1.bp_clear(); l_c2.bp_clear(); l_c1.bp_clear(); // Euclid distance of train_set[i] makeError<<<10, 1>>>(l_f.d_preact, l_f.output, train_set[i].label, 10); cublasSnrm2(blas, 10, l_f.d_preact, 1, &tmp_err); //calculate the norm2 err += tmp_err; time_taken += back_pass(); } err /= train_cnt; fprintf(stdout, "error: %e, time_on_gpu: %lf\n", err, time_taken); if (err < threshold) { fprintf(stdout, "Training complete, error less than threshold\n\n"); break; } } fprintf(stdout, "\n Time - %lf\n", time_taken); } // Returns label of given data (0-9) static unsigned int classify(double data[28][28]) { float res[10]; forward_pass(data); unsigned int max = 0; cudaMemcpy(res, l_f.output, sizeof(float) * 10, cudaMemcpyDeviceToHost); for (int i = 1; i < 10; ++i) { if (res[max] < res[i]) { max = i; } } return max; } // Perform forward propagation of test data static void test() { int error = 0; for (int i = 0; i < test_cnt; ++i) { if (classify(test_set[i].data) != test_set[i].label) { ++error; } } fprintf(stdout, "Error Rate: %.2lf%%\n", double(error) / double(test_cnt) * 100.0); }
841ea2a830f83f0bfb627854b1e4818593444546.hip
// !!! This is a file automatically generated by hipify!!! /* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common/kernels/kernel.h" #include <cstdio> namespace nvinfer1 { namespace plugin { pluginStatus_t generateAnchors_cpu( int numRatios, float* ratios, int numScales, float* scales, int baseSize, float* anchors) { #ifdef DEBUG DEBUG_PRINTF("Generating Anchors with:\n"); DEBUG_PRINTF("Scales:"); for (int s = 0; s < numScales; ++s) { DEBUG_PRINTF("%f\t", scales[s]); } DEBUG_PRINTF("\n"); DEBUG_PRINTF("Ratios:"); for (int r = 0; r < numRatios; ++r) { DEBUG_PRINTF("%f\t", ratios[r]); } DEBUG_PRINTF("\n"); #endif if ((numScales <= 0) || (numRatios <= 0) || (baseSize <= 0)) { return STATUS_BAD_PARAM; } // Generate parameters for numRatios * numScales general anchor boxes for (int r = 0; r < numRatios; ++r) { for (int s = 0; s < numScales; ++s) { int id = r * numScales + s; float scale = scales[s]; float ratio = ratios[r]; float bs = baseSize; float ws = round(sqrt((float) (bs * bs) / ratio)); float hs = round(ws * ratio); // Width: bs / sqrt(ratio) * scale // Height: bs * sqrt(ratio) * scale ws *= scale; hs *= scale; // x_anchor_ctr /* * This value should not useful in this implementation of generating numRatios * numScales general anchor boxes. * Because the center of anchor box in the original input raw image scale will not be dependent on this. */ anchors[id * 4] = (bs - 1) / 2; // y_anchor_ctr /* * This value should not useful in this implementation of generating numRatios * numScales general anchor boxes. * Because the center of anchor box in the original input raw image scale will not be dependent on this. */ anchors[id * 4 + 1] = (bs - 1) / 2; // w_anchor anchors[id * 4 + 2] = ws; // h_anchor anchors[id * 4 + 3] = hs; } } return STATUS_SUCCESS; } pluginStatus_t generateAnchors(hipStream_t stream, int numRatios, float* ratios, int numScales, float* scales, int baseSize, float* anchors) { // Each anchor box has 4 parameters int ac = numRatios * numScales * 4; float* anchors_cpu; CSC(hipHostMalloc((void**) &anchors_cpu, sizeof(float) * ac), STATUS_FAILURE); pluginStatus_t status = generateAnchors_cpu(numRatios, ratios, numScales, scales, baseSize, anchors_cpu); CSC(hipMemcpyAsync(anchors, anchors_cpu, sizeof(float) * ac, hipMemcpyHostToDevice, stream), STATUS_FAILURE); CSC(hipHostFree(anchors_cpu), STATUS_FAILURE); return status; } } // namespace plugin } // namespace nvinfer1
841ea2a830f83f0bfb627854b1e4818593444546.cu
/* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common/kernels/kernel.h" #include <cstdio> namespace nvinfer1 { namespace plugin { pluginStatus_t generateAnchors_cpu( int numRatios, float* ratios, int numScales, float* scales, int baseSize, float* anchors) { #ifdef DEBUG DEBUG_PRINTF("Generating Anchors with:\n"); DEBUG_PRINTF("Scales:"); for (int s = 0; s < numScales; ++s) { DEBUG_PRINTF("%f\t", scales[s]); } DEBUG_PRINTF("\n"); DEBUG_PRINTF("Ratios:"); for (int r = 0; r < numRatios; ++r) { DEBUG_PRINTF("%f\t", ratios[r]); } DEBUG_PRINTF("\n"); #endif if ((numScales <= 0) || (numRatios <= 0) || (baseSize <= 0)) { return STATUS_BAD_PARAM; } // Generate parameters for numRatios * numScales general anchor boxes for (int r = 0; r < numRatios; ++r) { for (int s = 0; s < numScales; ++s) { int id = r * numScales + s; float scale = scales[s]; float ratio = ratios[r]; float bs = baseSize; float ws = round(sqrt((float) (bs * bs) / ratio)); float hs = round(ws * ratio); // Width: bs / sqrt(ratio) * scale // Height: bs * sqrt(ratio) * scale ws *= scale; hs *= scale; // x_anchor_ctr /* * This value should not useful in this implementation of generating numRatios * numScales general anchor boxes. * Because the center of anchor box in the original input raw image scale will not be dependent on this. */ anchors[id * 4] = (bs - 1) / 2; // y_anchor_ctr /* * This value should not useful in this implementation of generating numRatios * numScales general anchor boxes. * Because the center of anchor box in the original input raw image scale will not be dependent on this. */ anchors[id * 4 + 1] = (bs - 1) / 2; // w_anchor anchors[id * 4 + 2] = ws; // h_anchor anchors[id * 4 + 3] = hs; } } return STATUS_SUCCESS; } pluginStatus_t generateAnchors(cudaStream_t stream, int numRatios, float* ratios, int numScales, float* scales, int baseSize, float* anchors) { // Each anchor box has 4 parameters int ac = numRatios * numScales * 4; float* anchors_cpu; CSC(cudaMallocHost((void**) &anchors_cpu, sizeof(float) * ac), STATUS_FAILURE); pluginStatus_t status = generateAnchors_cpu(numRatios, ratios, numScales, scales, baseSize, anchors_cpu); CSC(cudaMemcpyAsync(anchors, anchors_cpu, sizeof(float) * ac, cudaMemcpyHostToDevice, stream), STATUS_FAILURE); CSC(cudaFreeHost(anchors_cpu), STATUS_FAILURE); return status; } } // namespace plugin } // namespace nvinfer1
14d8539b21aae56c6516ddfba814d2c97178a553.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) OpenMMLab. All rights reserved #include "pytorch_cuda_helper.hpp" #include "pytorch_device_registry.hpp" #include "tin_shift_cuda_kernel.cuh" void TINShiftForwardCUDAKernelLauncher(Tensor input, Tensor shift, Tensor output) { int output_size = output.numel(); int batch_size = input.size(0); int t_size = input.size(1); int channels = input.size(2); int hw_size = input.size(3); int group_size = shift.size(1); int group_channel = channels / group_size; int num_kernels = batch_size * hw_size * channels; at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "tin_shift_forward_cuda_kernel", [&] { hipLaunchKernelGGL(( tin_shift_forward_cuda_kernel<scalar_t>) , dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0, stream, output_size, input.data_ptr<scalar_t>(), shift.data_ptr<int>(), output.data_ptr<scalar_t>(), batch_size, channels, t_size, hw_size, group_size, group_channel); }); AT_CUDA_CHECK(hipGetLastError()); } void TINShiftBackwardCUDAKernelLauncher(Tensor grad_output, Tensor shift, Tensor grad_input) { int output_size = grad_output.numel(); int batch_size = grad_output.size(0); int t_size = grad_output.size(1); int channels = grad_output.size(2); int hw_size = grad_output.size(3); int group_size = shift.size(1); int group_channel = channels / group_size; int num_kernels = batch_size * hw_size * channels; at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad_output.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "tin_shift_backward_cuda_kernel", [&] { hipLaunchKernelGGL(( tin_shift_backward_cuda_kernel<scalar_t>) , dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0, stream, output_size, grad_output.data_ptr<scalar_t>(), shift.data_ptr<int>(), grad_input.data_ptr<scalar_t>(), batch_size, channels, t_size, hw_size, group_size, group_channel); }); AT_CUDA_CHECK(hipGetLastError()); }
14d8539b21aae56c6516ddfba814d2c97178a553.cu
// Copyright (c) OpenMMLab. All rights reserved #include "pytorch_cuda_helper.hpp" #include "pytorch_device_registry.hpp" #include "tin_shift_cuda_kernel.cuh" void TINShiftForwardCUDAKernelLauncher(Tensor input, Tensor shift, Tensor output) { int output_size = output.numel(); int batch_size = input.size(0); int t_size = input.size(1); int channels = input.size(2); int hw_size = input.size(3); int group_size = shift.size(1); int group_channel = channels / group_size; int num_kernels = batch_size * hw_size * channels; at::cuda::CUDAGuard device_guard(input.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "tin_shift_forward_cuda_kernel", [&] { tin_shift_forward_cuda_kernel<scalar_t> <<<GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, stream>>>( output_size, input.data_ptr<scalar_t>(), shift.data_ptr<int>(), output.data_ptr<scalar_t>(), batch_size, channels, t_size, hw_size, group_size, group_channel); }); AT_CUDA_CHECK(cudaGetLastError()); } void TINShiftBackwardCUDAKernelLauncher(Tensor grad_output, Tensor shift, Tensor grad_input) { int output_size = grad_output.numel(); int batch_size = grad_output.size(0); int t_size = grad_output.size(1); int channels = grad_output.size(2); int hw_size = grad_output.size(3); int group_size = shift.size(1); int group_channel = channels / group_size; int num_kernels = batch_size * hw_size * channels; at::cuda::CUDAGuard device_guard(grad_output.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "tin_shift_backward_cuda_kernel", [&] { tin_shift_backward_cuda_kernel<scalar_t> <<<GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, stream>>>( output_size, grad_output.data_ptr<scalar_t>(), shift.data_ptr<int>(), grad_input.data_ptr<scalar_t>(), batch_size, channels, t_size, hw_size, group_size, group_channel); }); AT_CUDA_CHECK(cudaGetLastError()); }
4e07ccb5784f4ecb65a9f241da76336298d638f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "split_points.cuh" #include <library/cpp/cuda/wrappers/arch.cuh> #include <catboost/cuda/cuda_lib/cuda_base.h> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/gpu_data/partitions.h> #include <catboost/cuda/cuda_util/kernel/update_part_props.cuh> #include <catboost/cuda/cuda_util/kernel/reorder_one_bit.cuh> #include <catboost/cuda/cuda_util/kernel/reorder_one_bit_impl.cuh> #include <contrib/libs/nvidia/cub/hipcub/hipcub.hpp> namespace NKernel { template <typename T> __global__ void CopyInLeavesImpl(const ui32* leaves, const TDataPartition* parts, const T *src, T *dst, ui32 numStats, ui64 lineSize) { const ui32 leafId = leaves[blockIdx.y]; const ui32 offset = parts[leafId].Offset; const ui32 size = parts[leafId].Size; ui32 i = blockIdx.x * blockDim.x + threadIdx.x; src += offset; dst += offset; while (i < size) { #pragma unroll 8 for (int k = 0; k < numStats; ++k) { WriteThrough(dst + i + k * lineSize, __ldg(src + i + k * lineSize)); } i += gridDim.x * blockDim.x; } } template <ui32 Size, ui32 BlockSize = 1024> __global__ void GatherInplaceImpl(const ui32* leaf, const TDataPartition* parts, const ui32* map, float* stats, ui64 lineSize, ui32* indices) { __shared__ char4 tmp[Size]; char4* data = blockIdx.x == 0 ? (char4*)indices : (char4*)(stats + (blockIdx.x - 1) * lineSize); const ui32 leafId = leaf[blockIdx.y]; TDataPartition part = Ldg(parts + leafId); const ui32 offset = part.Offset; ui32 size = part.Size; //should be always true btw, but may help compiler const ui32 tid = threadIdx.x; map += offset; data += offset; #pragma unroll for (ui32 i = tid; i < Size; i += BlockSize) { if (i < size) { const ui32 loadIdx = __ldg(map + i); tmp[i] = __ldg(data + loadIdx); } } __syncthreads(); #pragma unroll for (ui32 i = tid; i < Size; i += BlockSize) { if (i < size) { WriteThrough(data + i, tmp[i]); } } } template <int Size> void GatherInplaceLeqSize(const ui32* leaf, ui32 leavesCount, const TDataPartition* parts, const ui32* map, float* stats, ui32 statCount, ui64 lineSize, ui32* indices, TCudaStream stream) { const ui32 blockSize = 1024; dim3 numBlocks; numBlocks.x = 1 + statCount; numBlocks.y = leavesCount; numBlocks.z = 1; hipLaunchKernelGGL(( GatherInplaceImpl<Size, blockSize>) , dim3(numBlocks), dim3(blockSize), 0, stream, leaf, parts, map, stats, lineSize, indices); } template <ui32 Size, ui32 BlockSize = 1024> __global__ void GatherInplaceSingleLeafImpl(const ui32 leafId, const TDataPartition* parts, const ui32* map, float* stats, ui64 lineSize, ui32* indices) { __shared__ char4 tmp[Size]; char4* data = blockIdx.x == 0 ? (char4*)indices : (char4*)(stats + (blockIdx.x - 1) * lineSize); TDataPartition part = Ldg(parts + leafId); const ui32 offset = part.Offset; ui32 size = part.Size; //should be always true btw, but may help compiler const ui32 tid = threadIdx.x; data += offset; #pragma unroll for (ui32 i = tid; i < Size; i += BlockSize) { if (i < size) { const ui32 loadIdx = __ldg(map + i); tmp[i] = __ldg(data + loadIdx); } } __syncthreads(); #pragma unroll for (ui32 i = tid; i < Size; i += BlockSize) { if (i < size) { WriteThrough(data + i, tmp[i]); } } } template <int Size> void GatherInplaceSingleLeaf(const ui32 leaf, const TDataPartition* parts, const ui32* map, float* stats, ui32 statCount, ui64 lineSize, ui32* indices, TCudaStream stream) { const ui32 blockSize = 1024; dim3 numBlocks; numBlocks.x = 1 + statCount; numBlocks.y = 1; numBlocks.z = 1; hipLaunchKernelGGL(( GatherInplaceSingleLeafImpl<Size, blockSize>) , dim3(numBlocks), dim3(blockSize), 0, stream, leaf, parts, map, stats, lineSize, indices); } /* this should be called before updatePartProps */ template <typename T> __global__ void GatherInLeavesImpl(const ui32* leaves, const TDataPartition* parts, const T *src, const ui32* map, T *dst, ui32 numStats, ui64 lineSize) { const ui32 leafId = leaves[blockIdx.y]; const ui32 offset = parts[leafId].Offset; const ui32 size = parts[leafId].Size; ui32 i = blockIdx.x * blockDim.x + threadIdx.x; map += offset; src += offset; dst += offset; while (i < size) { const ui32 loadIdx = __ldg(map + i); #pragma unroll 8 for (int k = 0; k < numStats; ++k) { WriteThrough(dst + i + k * lineSize, __ldg(src + loadIdx + k * lineSize)); } i += gridDim.x * blockDim.x; } } template <class T> void CopyInLeaves(const ui32* leaves, const ui32 leavesCount, const TDataPartition* parts, const T *src, T *dst, ui32 numStats, ui32 lineSize, TCudaStream stream) { const ui32 blockSize = 256; dim3 numBlocks; numBlocks.x = (leavesCount > 4 ? 2 : 4) * TArchProps::SMCount(); numBlocks.y = leavesCount; numBlocks.z = 1; if (leavesCount) { hipLaunchKernelGGL(( CopyInLeavesImpl<T>), dim3(numBlocks), dim3(blockSize), 0, stream, leaves, parts, src, dst, numStats, lineSize); } } template <class T> void GatherInLeaves(const ui32* leaves, const ui32 leavesCount, const TDataPartition* parts, const T *src, const ui32* map, T *dst, ui32 numStats, ui32 lineSize, TCudaStream stream) { const ui32 blockSize = 256; dim3 numBlocks; numBlocks.x = (leavesCount > 4 ? 2 : 4) * TArchProps::SMCount(); numBlocks.y = leavesCount; numBlocks.z = 1; if (leavesCount) { hipLaunchKernelGGL(( GatherInLeavesImpl), dim3(numBlocks), dim3(blockSize), 0, stream, leaves, parts, src, map, dst, numStats, lineSize); } } template <typename T> __global__ void CopyLeafImpl(const ui32 leafId, const TDataPartition* parts, const T* src, T* dst, ui32 numStats, ui64 lineSize) { const ui32 offset = parts[leafId].Offset; const ui32 size = parts[leafId].Size; ui32 i = blockIdx.x * blockDim.x + threadIdx.x; src += offset; while (i < size) { #pragma unroll 8 for (int k = 0; k < numStats; ++k) { WriteThrough(dst + i + k * size, __ldg(src + i + k * lineSize)); } i += gridDim.x * blockDim.x; } } template <class T> void CopyLeaf(const ui32 leafId, const ui32 leafSize, const TDataPartition* parts, const T* src, T* dst, ui32 numStats, ui32 lineSize, TCudaStream stream) { const ui32 blockSize = 256; dim3 numBlocks; numBlocks.x = (leafSize + blockSize - 1) / blockSize; numBlocks.y = 1; numBlocks.z = 1; if (leafSize) { hipLaunchKernelGGL(( CopyLeafImpl<T>), dim3(numBlocks), dim3(blockSize), 0, stream, leafId, parts, src, dst, numStats, lineSize); } } /* this should be called before updatePartProps */ template <typename T> __global__ void GatherLeafImpl(const ui32 leafId, const TDataPartition* parts, const T* src, const ui32* map, T* dst, ui32 numStats, ui64 lineSize) { const ui32 offset = parts[leafId].Offset; const ui32 size = parts[leafId].Size; ui32 i = blockIdx.x * blockDim.x + threadIdx.x; dst += offset; while (i < size) { const ui32 loadIdx = __ldg(map + i); #pragma unroll 8 for (int k = 0; k < numStats; ++k) { WriteThrough(dst + i + k * lineSize, __ldg(src + loadIdx + k * size)); } i += gridDim.x * blockDim.x; } } template <class T> void GatherLeaf(const ui32 leafId, const ui32 leafSize, const TDataPartition* parts, const T* src, const ui32* map, T* dst, ui32 numStats, ui32 lineSize, TCudaStream stream) { const ui32 blockSize = 256; dim3 numBlocks; numBlocks.x = (leafSize + blockSize - 1) / blockSize; numBlocks.y = 1; numBlocks.z = 1; if (leafSize) { hipLaunchKernelGGL(( GatherLeafImpl), dim3(numBlocks), dim3(blockSize), 0, stream, leafId, parts, src, map, dst, numStats, lineSize); } } __global__ void UpdatePartitionsAfterSplitImpl(const ui32* leftLeaves, const ui32* rightLeaves, ui32 leafCount, const bool* sortedFlags, TDataPartition* parts, TDataPartition* partsCpu ) { const ui32 leftLeaf = leftLeaves[blockIdx.y]; const ui32 rightLeaf = rightLeaves[blockIdx.y]; sortedFlags += parts[leftLeaf].Offset; const ui32 partSize = parts[leftLeaf].Size; ui32 i = blockIdx.x * blockDim.x + threadIdx.x; const ui32 offset = parts[leftLeaf].Offset; while (i <= partSize) { int flag0 = i < partSize ? Ldg(sortedFlags + i) : 1; int flag1 = i ? Ldg(sortedFlags + i - 1) : 0; if (flag0 != flag1) { //we are on border TDataPartition leftPart = parts[leftLeaf]; leftPart.Size = i; parts[leftLeaf] = leftPart; partsCpu[leftLeaf] = leftPart; TDataPartition rightPart = parts[rightLeaf]; rightPart.Offset = offset + i; rightPart.Size = partSize - i; parts[rightLeaf] = rightPart; partsCpu[rightLeaf] = rightPart; break; } i += blockDim.x * gridDim.x; } } void UpdatePartitionsAfterSplit(const ui32* leftLeafs, const ui32* rightLeafs, ui32 leavesCount, const bool* sortedFlag, TDataPartition* parts, TDataPartition* partsCpu, TCudaStream stream) { const ui32 blockSize = 512; dim3 numBlocks; numBlocks.x = (leavesCount > 4 ? 2 : 4) * TArchProps::SMCount(); numBlocks.y = leavesCount; numBlocks.z = 1; if (leavesCount) { hipLaunchKernelGGL(( UpdatePartitionsAfterSplitImpl), dim3(numBlocks), dim3(blockSize), 0, stream, leftLeafs, rightLeafs, leavesCount, sortedFlag, parts, partsCpu); } } __global__ void UpdatePartitionAfterSplitImpl(const ui32 leftLeaf, const ui32 rightLeaf, const bool* sortedFlags, TDataPartition* parts, TDataPartition* partsCpu ) { const ui32 partSize = parts[leftLeaf].Size; ui32 i = blockIdx.x * blockDim.x + threadIdx.x; const ui32 offset = parts[leftLeaf].Offset; while (i <= partSize) { int flag0 = i < partSize ? Ldg(sortedFlags + i) : 1; int flag1 = i ? Ldg(sortedFlags + i - 1) : 0; if (flag0 != flag1) { //we are on border TDataPartition leftPart = parts[leftLeaf]; leftPart.Size = i; partsCpu[leftLeaf] = leftPart; parts[leftLeaf] = leftPart; TDataPartition rightPart = parts[rightLeaf]; rightPart.Offset = offset + i; rightPart.Size = partSize - i; partsCpu[rightLeaf] = rightPart; parts[rightLeaf] = rightPart; break; } i += blockDim.x * gridDim.x; } } void UpdatePartitionAfterSplit(const ui32 leftLeaf, const ui32 rightLeaf, ui32 leafSize, const bool* sortedFlag, TDataPartition* parts, TDataPartition* partsCpu, TCudaStream stream) { const ui32 blockSize = 512; dim3 numBlocks; numBlocks.x = (leafSize + blockSize - 1) / blockSize; numBlocks.y = 1; numBlocks.z = 1; if (leafSize) { hipLaunchKernelGGL(( UpdatePartitionAfterSplitImpl), dim3(numBlocks), dim3(blockSize), 0, stream, leftLeaf, rightLeaf, sortedFlag, parts, partsCpu); } } /* * blockIdx.x * gridDim.x + threadIdx.x is index in leaf * blockIdx.y is part number * this is not time critical kernel, so we make for several blocks per SM for each leaf and just skip computations if necessary */ template <int N, int BlockSize> __global__ void SplitAndMakeSequenceInLeavesImpl(const ui32* compressedIndex, const ui32* loadIndices, const TDataPartition* parts, const ui32* leafIds, const TCFeature* splitFeatures, const ui32* splitBins, bool* splitFlags, ui32* indices) { const ui32 leafId = leafIds[blockIdx.y]; TDataPartition part = Ldg(parts + leafId); const i32 size = part.Size; const i32 offset = part.Offset; loadIndices += offset; indices += offset; splitFlags += offset; int i = blockIdx.x * BlockSize * N + threadIdx.x; if (i >= size) { return; } TCFeature feature = splitFeatures[blockIdx.y]; const ui32 binIdx = splitBins[blockIdx.y]; const ui32 value = binIdx << feature.Shift; const ui32 mask = feature.Mask << feature.Shift; const bool oneHot = feature.OneHotFeature; compressedIndex += feature.Offset; while (i < size) { ui32 loadIndex[N]; #pragma unroll for (int k = 0; k < N; ++k) { if (i + k * BlockSize < size) { loadIndex[k] = loadIndices ? __ldg(loadIndices + i + k * BlockSize) : i + k * BlockSize; } } ui32 featureVal[N]; #pragma unroll for (int k = 0; k < N; ++k) { if (i + k * BlockSize < size) { featureVal[k] = __ldg(compressedIndex + loadIndex[k]) & mask; } } #pragma unroll for (int k = 0; k < N; ++k) { if (i + k * BlockSize < size) { WriteThrough(indices + i + k * BlockSize, static_cast<ui32>(i + k * BlockSize)); } } bool split[N]; #pragma unroll for (int k = 0; k < N; ++k) { split[k] = (oneHot ? (featureVal[k] == value) : featureVal[k] > value); } #pragma unroll for (int k = 0; k < N; ++k) { if (i + k * BlockSize < size) { WriteThrough(splitFlags + i + k * BlockSize, split[k]); } } i += N * BlockSize * gridDim.x; } } void SplitAndMakeSequenceInLeaves(const ui32* compressedIndex, const ui32* loadIndices, const TDataPartition* parts, const ui32* leafIds, ui32 leavesCount, const TCFeature* splitFeatures, const ui32* splitBins, bool* splitFlags, ui32* indices, TCudaStream stream) { if (leavesCount) { const ui32 blockSize = 512; const int N = 4; dim3 numBlocks; numBlocks.x = (leavesCount > 4 ? 2 : 4) * TArchProps::SMCount(); numBlocks.y = leavesCount; numBlocks.z = 1; hipLaunchKernelGGL(( SplitAndMakeSequenceInLeavesImpl<N, blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, compressedIndex, loadIndices, parts, leafIds, splitFeatures, splitBins, splitFlags, indices); } } template <int N, int BlockSize> __global__ void SplitAndMakeSequenceInSingleLeafImpl(const ui32* compressedIndex, const ui32* loadIndices, const TDataPartition* parts, const ui32 leafId, const TCFeature feature, const ui32 binIdx, bool* splitFlags, ui32* indices) { TDataPartition part = Ldg(parts + leafId); const i32 size = part.Size; const i32 offset = part.Offset; loadIndices += offset; const int i = blockIdx.x * BlockSize * N + threadIdx.x; const ui32 value = binIdx << feature.Shift; const ui32 mask = feature.Mask << feature.Shift; const bool oneHot = feature.OneHotFeature; compressedIndex += feature.Offset; ui32 loadIndex[N]; #pragma unroll for (int k = 0; k < N; ++k) { if (i + k * BlockSize < size) { loadIndex[k] = __ldg(loadIndices + i + k * BlockSize); } } ui32 featureVal[N]; #pragma unroll for (int k = 0; k < N; ++k) { if (i + k * BlockSize < size) { featureVal[k] = __ldg(compressedIndex + loadIndex[k]) & mask; } } #pragma unroll for (int k = 0; k < N; ++k) { if (i + k * BlockSize < size) { WriteThrough(indices + i + k * BlockSize, static_cast<ui32>(i + k * BlockSize)); } } bool split[N]; #pragma unroll for (int k = 0; k < N; ++k) { split[k] = (oneHot ? (featureVal[k] == value) : featureVal[k] > value); } #pragma unroll for (int k = 0; k < N; ++k) { if (i + k * BlockSize < size) { WriteThrough(splitFlags + i + k * BlockSize, split[k]); } } } void SplitAndMakeSequenceInLeaf(const ui32* compressedIndex, const ui32* loadIndices, const TDataPartition* parts, ui32 leafId, ui32 leafSize, TCFeature splitFeature, ui32 splitBin, bool* splitFlags, ui32* indices, TCudaStream stream) { const ui32 blockSize = 256; const int N = 2; dim3 numBlocks; numBlocks.x = (leafSize + blockSize * N - 1) / (blockSize * N); numBlocks.y = 1; numBlocks.z = 1; if (numBlocks.x) { SplitAndMakeSequenceInSingleLeafImpl<N, blockSize> << < numBlocks, blockSize, 0, stream >> > (compressedIndex, loadIndices, parts, leafId, splitFeature, splitBin, splitFlags, indices); } } //TODO(noxoomo): cub sucks for this, write proper segmented version void SortByFlagsInLeaves(const ui32* leavesToSplit, const ui32 leafCount, const TDataPartition* partsCpu, TSplitPointsContext& context, TCudaStream stream) { /* * Sort leaves by flags */ for (ui32 i = 0; i < leafCount; ++i) { const ui32 leafId = leavesToSplit[i]; TDataPartition part = partsCpu[leafId]; const bool* flagsSrc = context.TempFlags.Get() + part.Offset; bool* flagsDst = context.Flags.Get() + part.Offset; const ui32* indicesSrc = context.TempIndices.Get() + part.Offset; ui32* indicesDst = context.Indices.Get() + part.Offset; if (part.Size) { hipError_t error = hipcub::DeviceRadixSort::SortPairs < bool, ui32 > ((void*) context.TempStorage.Get(), context.TempStorageSizes[i], flagsSrc, flagsDst, indicesSrc, indicesDst, (int) part.Size, 0, 1, stream); CUDA_SAFE_CALL(error); } } } // void SortWithoutCub(ui32 leafId, const TDataPartition* partsCpu, TSplitPointsContext& context, TCudaStream stream) { TDataPartition part = partsCpu[leafId]; if (part.Size) { const bool* flagsSrc = context.TempFlags.Get(); bool* flagsDst = context.Flags.Get(); const ui32* indicesSrc = context.TempIndices.Get(); ui32* indicesDst = context.Indices.Get(); char* tempStorage = context.TempStorage.Get(); const ui64 tempOffsetsSize = sizeof(int) * part.Size; { using TInput = TScanBitIterator<bool>; TInput inputIter(context.TempFlags.Get(), 0); ui64 tempStorageSize = tempStorage ? context.TempStorageSizes[0] - tempOffsetsSize : 0; auto scanTmp = tempStorage ? (void*)(tempStorage + tempOffsetsSize) : nullptr; hipError_t err = hipcub::DeviceScan::ExclusiveSum < TInput, int*> (scanTmp, tempStorageSize, inputIter, (int*)tempStorage, part.Size, stream); if (!tempStorage) { context.TempStorageSizes[0] = tempStorageSize + tempOffsetsSize; } CUDA_SAFE_CALL(err); } if (tempStorage) { const int blockSize = 512; const int N = 1; const int numBlocks = (part.Size + (N * blockSize) - 1) / (N * blockSize); ReorderOneBitImpl<bool, ui32, N, blockSize> << < numBlocks, blockSize, 0, stream >> > ( flagsSrc, indicesSrc, (int*) tempStorage, 0, flagsDst, indicesDst, part.Size); } } } ui32 FastSortSize() { return 500000; } void SortByFlagsInLeaf(ui32 leafId, const TDataPartition* partsCpu, TSplitPointsContext& context, TCudaStream stream) { /* * Sort leaves by flags */ TDataPartition part = partsCpu[leafId]; if (part.Size > FastSortSize()) { const bool* flagsSrc = context.TempFlags.Get(); bool* flagsDst = context.Flags.Get(); const ui32* indicesSrc = context.TempIndices.Get(); ui32* indicesDst = context.Indices.Get(); hipError_t error = hipcub::DeviceRadixSort::SortPairs < bool, ui32 > ((void*) context.TempStorage.Get(), context.TempStorageSizes[0], flagsSrc, flagsDst, indicesSrc, indicesDst, (int) part.Size, 0, 1, stream); CUDA_SAFE_CALL(error); } else { SortWithoutCub(leafId, partsCpu, context, stream); } } #define TEMPL_INST(Type)\ template void CopyInLeaves<Type>(const ui32* leaves, const ui32 leavesCount, const TDataPartition* parts, const Type *src, Type *dst, ui32 numCopies, ui32 lineSize, TCudaStream stream);\ template void GatherInLeaves<Type>(const ui32* leaves, const ui32 leavesCount, const TDataPartition* parts, const Type* src, const ui32* map, Type *dst, ui32 numStats, ui32 lineSize, TCudaStream stream);\ template void GatherLeaf<Type>(const ui32 leaf, const ui32 size, const TDataPartition* parts, const Type* src, const ui32* map, Type *dst, ui32 numStats, ui32 lineSize, TCudaStream stream);\ template void CopyLeaf<Type>(const ui32 leaf, const ui32 size, const TDataPartition* parts, const Type *src, Type *dst, ui32 numCopies, ui32 lineSize, TCudaStream stream); TEMPL_INST(ui32) TEMPL_INST(float) #undef TEMPL_INST template void GatherInplaceLeqSize<12288>(const ui32* leaf, ui32 leavesCount, const TDataPartition* parts, const ui32* map, float* stats, ui32 statCount, ui64 lineSize, ui32* indices, TCudaStream stream); template void GatherInplaceLeqSize<6144>(const ui32* leaf, ui32 leavesCount, const TDataPartition* parts, const ui32* map, float* stats, ui32 statCount, ui64 lineSize, ui32* indices, TCudaStream stream); template void GatherInplaceLeqSize<3072>(const ui32* leaf, ui32 leavesCount, const TDataPartition* parts, const ui32* map, float* stats, ui32 statCount, ui64 lineSize, ui32* indices, TCudaStream stream); template void GatherInplaceLeqSize<1024>(const ui32* leaf, ui32 leavesCount, const TDataPartition* parts, const ui32* map, float* stats, ui32 statCount, ui64 lineSize, ui32* indices, TCudaStream stream); #define INPLACE_SINGLE_LEAF(Size)\ template void GatherInplaceSingleLeaf<Size>(const ui32 leaf, \ const TDataPartition* parts,\ const ui32* map,\ float* stats, ui32 statCount,\ ui64 lineSize,\ ui32* indices,\ TCudaStream stream); INPLACE_SINGLE_LEAF(6144) INPLACE_SINGLE_LEAF(12288) INPLACE_SINGLE_LEAF(3072) INPLACE_SINGLE_LEAF(1024) }
4e07ccb5784f4ecb65a9f241da76336298d638f3.cu
#include "split_points.cuh" #include <library/cpp/cuda/wrappers/arch.cuh> #include <catboost/cuda/cuda_lib/cuda_base.h> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/gpu_data/partitions.h> #include <catboost/cuda/cuda_util/kernel/update_part_props.cuh> #include <catboost/cuda/cuda_util/kernel/reorder_one_bit.cuh> #include <catboost/cuda/cuda_util/kernel/reorder_one_bit_impl.cuh> #include <contrib/libs/nvidia/cub/cub/device/device_radix_sort.cuh> namespace NKernel { template <typename T> __global__ void CopyInLeavesImpl(const ui32* leaves, const TDataPartition* parts, const T *src, T *dst, ui32 numStats, ui64 lineSize) { const ui32 leafId = leaves[blockIdx.y]; const ui32 offset = parts[leafId].Offset; const ui32 size = parts[leafId].Size; ui32 i = blockIdx.x * blockDim.x + threadIdx.x; src += offset; dst += offset; while (i < size) { #pragma unroll 8 for (int k = 0; k < numStats; ++k) { WriteThrough(dst + i + k * lineSize, __ldg(src + i + k * lineSize)); } i += gridDim.x * blockDim.x; } } template <ui32 Size, ui32 BlockSize = 1024> __global__ void GatherInplaceImpl(const ui32* leaf, const TDataPartition* parts, const ui32* map, float* stats, ui64 lineSize, ui32* indices) { __shared__ char4 tmp[Size]; char4* data = blockIdx.x == 0 ? (char4*)indices : (char4*)(stats + (blockIdx.x - 1) * lineSize); const ui32 leafId = leaf[blockIdx.y]; TDataPartition part = Ldg(parts + leafId); const ui32 offset = part.Offset; ui32 size = part.Size; //should be always true btw, but may help compiler const ui32 tid = threadIdx.x; map += offset; data += offset; #pragma unroll for (ui32 i = tid; i < Size; i += BlockSize) { if (i < size) { const ui32 loadIdx = __ldg(map + i); tmp[i] = __ldg(data + loadIdx); } } __syncthreads(); #pragma unroll for (ui32 i = tid; i < Size; i += BlockSize) { if (i < size) { WriteThrough(data + i, tmp[i]); } } } template <int Size> void GatherInplaceLeqSize(const ui32* leaf, ui32 leavesCount, const TDataPartition* parts, const ui32* map, float* stats, ui32 statCount, ui64 lineSize, ui32* indices, TCudaStream stream) { const ui32 blockSize = 1024; dim3 numBlocks; numBlocks.x = 1 + statCount; numBlocks.y = leavesCount; numBlocks.z = 1; GatherInplaceImpl<Size, blockSize> <<<numBlocks, blockSize, 0, stream>>>(leaf, parts, map, stats, lineSize, indices); } template <ui32 Size, ui32 BlockSize = 1024> __global__ void GatherInplaceSingleLeafImpl(const ui32 leafId, const TDataPartition* parts, const ui32* map, float* stats, ui64 lineSize, ui32* indices) { __shared__ char4 tmp[Size]; char4* data = blockIdx.x == 0 ? (char4*)indices : (char4*)(stats + (blockIdx.x - 1) * lineSize); TDataPartition part = Ldg(parts + leafId); const ui32 offset = part.Offset; ui32 size = part.Size; //should be always true btw, but may help compiler const ui32 tid = threadIdx.x; data += offset; #pragma unroll for (ui32 i = tid; i < Size; i += BlockSize) { if (i < size) { const ui32 loadIdx = __ldg(map + i); tmp[i] = __ldg(data + loadIdx); } } __syncthreads(); #pragma unroll for (ui32 i = tid; i < Size; i += BlockSize) { if (i < size) { WriteThrough(data + i, tmp[i]); } } } template <int Size> void GatherInplaceSingleLeaf(const ui32 leaf, const TDataPartition* parts, const ui32* map, float* stats, ui32 statCount, ui64 lineSize, ui32* indices, TCudaStream stream) { const ui32 blockSize = 1024; dim3 numBlocks; numBlocks.x = 1 + statCount; numBlocks.y = 1; numBlocks.z = 1; GatherInplaceSingleLeafImpl<Size, blockSize> <<<numBlocks, blockSize, 0, stream>>>(leaf, parts, map, stats, lineSize, indices); } /* this should be called before updatePartProps */ template <typename T> __global__ void GatherInLeavesImpl(const ui32* leaves, const TDataPartition* parts, const T *src, const ui32* map, T *dst, ui32 numStats, ui64 lineSize) { const ui32 leafId = leaves[blockIdx.y]; const ui32 offset = parts[leafId].Offset; const ui32 size = parts[leafId].Size; ui32 i = blockIdx.x * blockDim.x + threadIdx.x; map += offset; src += offset; dst += offset; while (i < size) { const ui32 loadIdx = __ldg(map + i); #pragma unroll 8 for (int k = 0; k < numStats; ++k) { WriteThrough(dst + i + k * lineSize, __ldg(src + loadIdx + k * lineSize)); } i += gridDim.x * blockDim.x; } } template <class T> void CopyInLeaves(const ui32* leaves, const ui32 leavesCount, const TDataPartition* parts, const T *src, T *dst, ui32 numStats, ui32 lineSize, TCudaStream stream) { const ui32 blockSize = 256; dim3 numBlocks; numBlocks.x = (leavesCount > 4 ? 2 : 4) * TArchProps::SMCount(); numBlocks.y = leavesCount; numBlocks.z = 1; if (leavesCount) { CopyInLeavesImpl<T><<<numBlocks, blockSize, 0, stream>>>(leaves, parts, src, dst, numStats, lineSize); } } template <class T> void GatherInLeaves(const ui32* leaves, const ui32 leavesCount, const TDataPartition* parts, const T *src, const ui32* map, T *dst, ui32 numStats, ui32 lineSize, TCudaStream stream) { const ui32 blockSize = 256; dim3 numBlocks; numBlocks.x = (leavesCount > 4 ? 2 : 4) * TArchProps::SMCount(); numBlocks.y = leavesCount; numBlocks.z = 1; if (leavesCount) { GatherInLeavesImpl<<<numBlocks, blockSize, 0, stream>>>(leaves, parts, src, map, dst, numStats, lineSize); } } template <typename T> __global__ void CopyLeafImpl(const ui32 leafId, const TDataPartition* parts, const T* src, T* dst, ui32 numStats, ui64 lineSize) { const ui32 offset = parts[leafId].Offset; const ui32 size = parts[leafId].Size; ui32 i = blockIdx.x * blockDim.x + threadIdx.x; src += offset; while (i < size) { #pragma unroll 8 for (int k = 0; k < numStats; ++k) { WriteThrough(dst + i + k * size, __ldg(src + i + k * lineSize)); } i += gridDim.x * blockDim.x; } } template <class T> void CopyLeaf(const ui32 leafId, const ui32 leafSize, const TDataPartition* parts, const T* src, T* dst, ui32 numStats, ui32 lineSize, TCudaStream stream) { const ui32 blockSize = 256; dim3 numBlocks; numBlocks.x = (leafSize + blockSize - 1) / blockSize; numBlocks.y = 1; numBlocks.z = 1; if (leafSize) { CopyLeafImpl<T><<<numBlocks, blockSize, 0, stream>>>(leafId, parts, src, dst, numStats, lineSize); } } /* this should be called before updatePartProps */ template <typename T> __global__ void GatherLeafImpl(const ui32 leafId, const TDataPartition* parts, const T* src, const ui32* map, T* dst, ui32 numStats, ui64 lineSize) { const ui32 offset = parts[leafId].Offset; const ui32 size = parts[leafId].Size; ui32 i = blockIdx.x * blockDim.x + threadIdx.x; dst += offset; while (i < size) { const ui32 loadIdx = __ldg(map + i); #pragma unroll 8 for (int k = 0; k < numStats; ++k) { WriteThrough(dst + i + k * lineSize, __ldg(src + loadIdx + k * size)); } i += gridDim.x * blockDim.x; } } template <class T> void GatherLeaf(const ui32 leafId, const ui32 leafSize, const TDataPartition* parts, const T* src, const ui32* map, T* dst, ui32 numStats, ui32 lineSize, TCudaStream stream) { const ui32 blockSize = 256; dim3 numBlocks; numBlocks.x = (leafSize + blockSize - 1) / blockSize; numBlocks.y = 1; numBlocks.z = 1; if (leafSize) { GatherLeafImpl<<<numBlocks, blockSize, 0, stream>>>(leafId, parts, src, map, dst, numStats, lineSize); } } __global__ void UpdatePartitionsAfterSplitImpl(const ui32* leftLeaves, const ui32* rightLeaves, ui32 leafCount, const bool* sortedFlags, TDataPartition* parts, TDataPartition* partsCpu ) { const ui32 leftLeaf = leftLeaves[blockIdx.y]; const ui32 rightLeaf = rightLeaves[blockIdx.y]; sortedFlags += parts[leftLeaf].Offset; const ui32 partSize = parts[leftLeaf].Size; ui32 i = blockIdx.x * blockDim.x + threadIdx.x; const ui32 offset = parts[leftLeaf].Offset; while (i <= partSize) { int flag0 = i < partSize ? Ldg(sortedFlags + i) : 1; int flag1 = i ? Ldg(sortedFlags + i - 1) : 0; if (flag0 != flag1) { //we are on border TDataPartition leftPart = parts[leftLeaf]; leftPart.Size = i; parts[leftLeaf] = leftPart; partsCpu[leftLeaf] = leftPart; TDataPartition rightPart = parts[rightLeaf]; rightPart.Offset = offset + i; rightPart.Size = partSize - i; parts[rightLeaf] = rightPart; partsCpu[rightLeaf] = rightPart; break; } i += blockDim.x * gridDim.x; } } void UpdatePartitionsAfterSplit(const ui32* leftLeafs, const ui32* rightLeafs, ui32 leavesCount, const bool* sortedFlag, TDataPartition* parts, TDataPartition* partsCpu, TCudaStream stream) { const ui32 blockSize = 512; dim3 numBlocks; numBlocks.x = (leavesCount > 4 ? 2 : 4) * TArchProps::SMCount(); numBlocks.y = leavesCount; numBlocks.z = 1; if (leavesCount) { UpdatePartitionsAfterSplitImpl<<<numBlocks, blockSize, 0, stream>>>(leftLeafs, rightLeafs, leavesCount, sortedFlag, parts, partsCpu); } } __global__ void UpdatePartitionAfterSplitImpl(const ui32 leftLeaf, const ui32 rightLeaf, const bool* sortedFlags, TDataPartition* parts, TDataPartition* partsCpu ) { const ui32 partSize = parts[leftLeaf].Size; ui32 i = blockIdx.x * blockDim.x + threadIdx.x; const ui32 offset = parts[leftLeaf].Offset; while (i <= partSize) { int flag0 = i < partSize ? Ldg(sortedFlags + i) : 1; int flag1 = i ? Ldg(sortedFlags + i - 1) : 0; if (flag0 != flag1) { //we are on border TDataPartition leftPart = parts[leftLeaf]; leftPart.Size = i; partsCpu[leftLeaf] = leftPart; parts[leftLeaf] = leftPart; TDataPartition rightPart = parts[rightLeaf]; rightPart.Offset = offset + i; rightPart.Size = partSize - i; partsCpu[rightLeaf] = rightPart; parts[rightLeaf] = rightPart; break; } i += blockDim.x * gridDim.x; } } void UpdatePartitionAfterSplit(const ui32 leftLeaf, const ui32 rightLeaf, ui32 leafSize, const bool* sortedFlag, TDataPartition* parts, TDataPartition* partsCpu, TCudaStream stream) { const ui32 blockSize = 512; dim3 numBlocks; numBlocks.x = (leafSize + blockSize - 1) / blockSize; numBlocks.y = 1; numBlocks.z = 1; if (leafSize) { UpdatePartitionAfterSplitImpl<<<numBlocks, blockSize, 0, stream>>>(leftLeaf, rightLeaf, sortedFlag, parts, partsCpu); } } /* * blockIdx.x * gridDim.x + threadIdx.x is index in leaf * blockIdx.y is part number * this is not time critical kernel, so we make for several blocks per SM for each leaf and just skip computations if necessary */ template <int N, int BlockSize> __global__ void SplitAndMakeSequenceInLeavesImpl(const ui32* compressedIndex, const ui32* loadIndices, const TDataPartition* parts, const ui32* leafIds, const TCFeature* splitFeatures, const ui32* splitBins, bool* splitFlags, ui32* indices) { const ui32 leafId = leafIds[blockIdx.y]; TDataPartition part = Ldg(parts + leafId); const i32 size = part.Size; const i32 offset = part.Offset; loadIndices += offset; indices += offset; splitFlags += offset; int i = blockIdx.x * BlockSize * N + threadIdx.x; if (i >= size) { return; } TCFeature feature = splitFeatures[blockIdx.y]; const ui32 binIdx = splitBins[blockIdx.y]; const ui32 value = binIdx << feature.Shift; const ui32 mask = feature.Mask << feature.Shift; const bool oneHot = feature.OneHotFeature; compressedIndex += feature.Offset; while (i < size) { ui32 loadIndex[N]; #pragma unroll for (int k = 0; k < N; ++k) { if (i + k * BlockSize < size) { loadIndex[k] = loadIndices ? __ldg(loadIndices + i + k * BlockSize) : i + k * BlockSize; } } ui32 featureVal[N]; #pragma unroll for (int k = 0; k < N; ++k) { if (i + k * BlockSize < size) { featureVal[k] = __ldg(compressedIndex + loadIndex[k]) & mask; } } #pragma unroll for (int k = 0; k < N; ++k) { if (i + k * BlockSize < size) { WriteThrough(indices + i + k * BlockSize, static_cast<ui32>(i + k * BlockSize)); } } bool split[N]; #pragma unroll for (int k = 0; k < N; ++k) { split[k] = (oneHot ? (featureVal[k] == value) : featureVal[k] > value); } #pragma unroll for (int k = 0; k < N; ++k) { if (i + k * BlockSize < size) { WriteThrough(splitFlags + i + k * BlockSize, split[k]); } } i += N * BlockSize * gridDim.x; } } void SplitAndMakeSequenceInLeaves(const ui32* compressedIndex, const ui32* loadIndices, const TDataPartition* parts, const ui32* leafIds, ui32 leavesCount, const TCFeature* splitFeatures, const ui32* splitBins, bool* splitFlags, ui32* indices, TCudaStream stream) { if (leavesCount) { const ui32 blockSize = 512; const int N = 4; dim3 numBlocks; numBlocks.x = (leavesCount > 4 ? 2 : 4) * TArchProps::SMCount(); numBlocks.y = leavesCount; numBlocks.z = 1; SplitAndMakeSequenceInLeavesImpl<N, blockSize><<<numBlocks, blockSize, 0, stream>>>(compressedIndex, loadIndices, parts, leafIds, splitFeatures, splitBins, splitFlags, indices); } } template <int N, int BlockSize> __global__ void SplitAndMakeSequenceInSingleLeafImpl(const ui32* compressedIndex, const ui32* loadIndices, const TDataPartition* parts, const ui32 leafId, const TCFeature feature, const ui32 binIdx, bool* splitFlags, ui32* indices) { TDataPartition part = Ldg(parts + leafId); const i32 size = part.Size; const i32 offset = part.Offset; loadIndices += offset; const int i = blockIdx.x * BlockSize * N + threadIdx.x; const ui32 value = binIdx << feature.Shift; const ui32 mask = feature.Mask << feature.Shift; const bool oneHot = feature.OneHotFeature; compressedIndex += feature.Offset; ui32 loadIndex[N]; #pragma unroll for (int k = 0; k < N; ++k) { if (i + k * BlockSize < size) { loadIndex[k] = __ldg(loadIndices + i + k * BlockSize); } } ui32 featureVal[N]; #pragma unroll for (int k = 0; k < N; ++k) { if (i + k * BlockSize < size) { featureVal[k] = __ldg(compressedIndex + loadIndex[k]) & mask; } } #pragma unroll for (int k = 0; k < N; ++k) { if (i + k * BlockSize < size) { WriteThrough(indices + i + k * BlockSize, static_cast<ui32>(i + k * BlockSize)); } } bool split[N]; #pragma unroll for (int k = 0; k < N; ++k) { split[k] = (oneHot ? (featureVal[k] == value) : featureVal[k] > value); } #pragma unroll for (int k = 0; k < N; ++k) { if (i + k * BlockSize < size) { WriteThrough(splitFlags + i + k * BlockSize, split[k]); } } } void SplitAndMakeSequenceInLeaf(const ui32* compressedIndex, const ui32* loadIndices, const TDataPartition* parts, ui32 leafId, ui32 leafSize, TCFeature splitFeature, ui32 splitBin, bool* splitFlags, ui32* indices, TCudaStream stream) { const ui32 blockSize = 256; const int N = 2; dim3 numBlocks; numBlocks.x = (leafSize + blockSize * N - 1) / (blockSize * N); numBlocks.y = 1; numBlocks.z = 1; if (numBlocks.x) { SplitAndMakeSequenceInSingleLeafImpl<N, blockSize> << < numBlocks, blockSize, 0, stream >> > (compressedIndex, loadIndices, parts, leafId, splitFeature, splitBin, splitFlags, indices); } } //TODO(noxoomo): cub sucks for this, write proper segmented version void SortByFlagsInLeaves(const ui32* leavesToSplit, const ui32 leafCount, const TDataPartition* partsCpu, TSplitPointsContext& context, TCudaStream stream) { /* * Sort leaves by flags */ for (ui32 i = 0; i < leafCount; ++i) { const ui32 leafId = leavesToSplit[i]; TDataPartition part = partsCpu[leafId]; const bool* flagsSrc = context.TempFlags.Get() + part.Offset; bool* flagsDst = context.Flags.Get() + part.Offset; const ui32* indicesSrc = context.TempIndices.Get() + part.Offset; ui32* indicesDst = context.Indices.Get() + part.Offset; if (part.Size) { cudaError_t error = cub::DeviceRadixSort::SortPairs < bool, ui32 > ((void*) context.TempStorage.Get(), context.TempStorageSizes[i], flagsSrc, flagsDst, indicesSrc, indicesDst, (int) part.Size, 0, 1, stream); CUDA_SAFE_CALL(error); } } } // void SortWithoutCub(ui32 leafId, const TDataPartition* partsCpu, TSplitPointsContext& context, TCudaStream stream) { TDataPartition part = partsCpu[leafId]; if (part.Size) { const bool* flagsSrc = context.TempFlags.Get(); bool* flagsDst = context.Flags.Get(); const ui32* indicesSrc = context.TempIndices.Get(); ui32* indicesDst = context.Indices.Get(); char* tempStorage = context.TempStorage.Get(); const ui64 tempOffsetsSize = sizeof(int) * part.Size; { using TInput = TScanBitIterator<bool>; TInput inputIter(context.TempFlags.Get(), 0); ui64 tempStorageSize = tempStorage ? context.TempStorageSizes[0] - tempOffsetsSize : 0; auto scanTmp = tempStorage ? (void*)(tempStorage + tempOffsetsSize) : nullptr; cudaError_t err = cub::DeviceScan::ExclusiveSum < TInput, int*> (scanTmp, tempStorageSize, inputIter, (int*)tempStorage, part.Size, stream); if (!tempStorage) { context.TempStorageSizes[0] = tempStorageSize + tempOffsetsSize; } CUDA_SAFE_CALL(err); } if (tempStorage) { const int blockSize = 512; const int N = 1; const int numBlocks = (part.Size + (N * blockSize) - 1) / (N * blockSize); ReorderOneBitImpl<bool, ui32, N, blockSize> << < numBlocks, blockSize, 0, stream >> > ( flagsSrc, indicesSrc, (int*) tempStorage, 0, flagsDst, indicesDst, part.Size); } } } ui32 FastSortSize() { return 500000; } void SortByFlagsInLeaf(ui32 leafId, const TDataPartition* partsCpu, TSplitPointsContext& context, TCudaStream stream) { /* * Sort leaves by flags */ TDataPartition part = partsCpu[leafId]; if (part.Size > FastSortSize()) { const bool* flagsSrc = context.TempFlags.Get(); bool* flagsDst = context.Flags.Get(); const ui32* indicesSrc = context.TempIndices.Get(); ui32* indicesDst = context.Indices.Get(); cudaError_t error = cub::DeviceRadixSort::SortPairs < bool, ui32 > ((void*) context.TempStorage.Get(), context.TempStorageSizes[0], flagsSrc, flagsDst, indicesSrc, indicesDst, (int) part.Size, 0, 1, stream); CUDA_SAFE_CALL(error); } else { SortWithoutCub(leafId, partsCpu, context, stream); } } #define TEMPL_INST(Type)\ template void CopyInLeaves<Type>(const ui32* leaves, const ui32 leavesCount, const TDataPartition* parts, const Type *src, Type *dst, ui32 numCopies, ui32 lineSize, TCudaStream stream);\ template void GatherInLeaves<Type>(const ui32* leaves, const ui32 leavesCount, const TDataPartition* parts, const Type* src, const ui32* map, Type *dst, ui32 numStats, ui32 lineSize, TCudaStream stream);\ template void GatherLeaf<Type>(const ui32 leaf, const ui32 size, const TDataPartition* parts, const Type* src, const ui32* map, Type *dst, ui32 numStats, ui32 lineSize, TCudaStream stream);\ template void CopyLeaf<Type>(const ui32 leaf, const ui32 size, const TDataPartition* parts, const Type *src, Type *dst, ui32 numCopies, ui32 lineSize, TCudaStream stream); TEMPL_INST(ui32) TEMPL_INST(float) #undef TEMPL_INST template void GatherInplaceLeqSize<12288>(const ui32* leaf, ui32 leavesCount, const TDataPartition* parts, const ui32* map, float* stats, ui32 statCount, ui64 lineSize, ui32* indices, TCudaStream stream); template void GatherInplaceLeqSize<6144>(const ui32* leaf, ui32 leavesCount, const TDataPartition* parts, const ui32* map, float* stats, ui32 statCount, ui64 lineSize, ui32* indices, TCudaStream stream); template void GatherInplaceLeqSize<3072>(const ui32* leaf, ui32 leavesCount, const TDataPartition* parts, const ui32* map, float* stats, ui32 statCount, ui64 lineSize, ui32* indices, TCudaStream stream); template void GatherInplaceLeqSize<1024>(const ui32* leaf, ui32 leavesCount, const TDataPartition* parts, const ui32* map, float* stats, ui32 statCount, ui64 lineSize, ui32* indices, TCudaStream stream); #define INPLACE_SINGLE_LEAF(Size)\ template void GatherInplaceSingleLeaf<Size>(const ui32 leaf, \ const TDataPartition* parts,\ const ui32* map,\ float* stats, ui32 statCount,\ ui64 lineSize,\ ui32* indices,\ TCudaStream stream); INPLACE_SINGLE_LEAF(6144) INPLACE_SINGLE_LEAF(12288) INPLACE_SINGLE_LEAF(3072) INPLACE_SINGLE_LEAF(1024) }
c826410cbbf39e60edbf36ac58cdd222aa2f3307.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This is a simple CUDA code that negates an array of integers. * It introduces the concepts of device memory management, and * kernel invocation. * * Training material developed by James Perry and Alan Gray * Copyright EPCC, The University of Edinburgh, 2013 */ #include <stdio.h> #include <stdlib.h> /* Forward Declaration*/ /* Utility function to check for and report CUDA errors */ void checkCUDAError(const char*); /* The actual array negation kernel (basic single block version) */ __global__ void negate(int *d_a) { int idx = threadIdx.x; d_a[idx] = -1 * d_a[idx]; } /* Multi-block version of kernel for part 2C */ __global__ void negate_multiblock(int *d_a) { /* Part 2C: negate an element of d_a, using multiple blocks this time */ int idx = threadIdx.x + blockIdx.x * blockDim.x; d_a[idx] = -1 * d_a[idx]; } /* The number of integer elements in the array */ #define ARRAY_SIZE 256 /* * The number of CUDA blocks and threads per block to use. * These should always multiply to give the array size. * For the single block kernel, NUM_BLOCKS should be 1 and * THREADS_PER_BLOCK should be the array size */ #define NUM_BLOCKS 1 #define THREADS_PER_BLOCK 256 /* Main routine */ int main(int argc, char *argv[]) { int *h_a, *h_out; int *d_a; int i; size_t sz = ARRAY_SIZE * sizeof(int); /* * allocate memory on host * h_a holds the input array, h_out holds the result */ h_a = (int *) malloc(sz); h_out = (int *) malloc(sz); /* * allocate memory on device */ /* Part 1A: allocate device memory */ hipMalloc((void**)&d_a, sz); /* initialise host arrays */ for (i = 0; i < ARRAY_SIZE; i++) { h_a[i] = i; h_out[i] = 0; } /* copy input array from host to GPU */ /* Part 1B: copy host array h_a to device array d_a */ hipMemcpy(d_a, h_a , sz, hipMemcpyHostToDevice); /* run the kernel on the GPU */ /* Part 2A: configure and launch kernel (un-comment and complete) */ dim3 blocksPerGrid(3, 1, 1 ); dim3 threadsPerBlock(3, 1, 1 ); hipLaunchKernelGGL(( negate), dim3(blocksPerGrid),dim3(threadPerBlock) , 0, 0, d_a); /* wait for all threads to complete and check for errors */ hipDeviceSynchronize(); checkCUDAError("kernel invocation"); /* copy the result array back to the host */ /* Part 1C: copy device array d_a to host array h_out */ hipMemcpy(h_out, d_a, sz, hipMemcpyDeviceToHost); checkCUDAError("memcpy"); /* print out the result */ printf("Results: "); for (i = 0; i < ARRAY_SIZE; i++) { printf("%d, ", h_out[i]); } printf("\n\n"); /* free device buffer */ /* Part 1D: free d_a */ hipFree(d_a); /* free host buffers */ free(h_a); free(h_out); return 0; } /* Utility function to check for and report CUDA errors */ void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } }
c826410cbbf39e60edbf36ac58cdd222aa2f3307.cu
/* * This is a simple CUDA code that negates an array of integers. * It introduces the concepts of device memory management, and * kernel invocation. * * Training material developed by James Perry and Alan Gray * Copyright EPCC, The University of Edinburgh, 2013 */ #include <stdio.h> #include <stdlib.h> /* Forward Declaration*/ /* Utility function to check for and report CUDA errors */ void checkCUDAError(const char*); /* The actual array negation kernel (basic single block version) */ __global__ void negate(int *d_a) { int idx = threadIdx.x; d_a[idx] = -1 * d_a[idx]; } /* Multi-block version of kernel for part 2C */ __global__ void negate_multiblock(int *d_a) { /* Part 2C: negate an element of d_a, using multiple blocks this time */ int idx = threadIdx.x + blockIdx.x * blockDim.x; d_a[idx] = -1 * d_a[idx]; } /* The number of integer elements in the array */ #define ARRAY_SIZE 256 /* * The number of CUDA blocks and threads per block to use. * These should always multiply to give the array size. * For the single block kernel, NUM_BLOCKS should be 1 and * THREADS_PER_BLOCK should be the array size */ #define NUM_BLOCKS 1 #define THREADS_PER_BLOCK 256 /* Main routine */ int main(int argc, char *argv[]) { int *h_a, *h_out; int *d_a; int i; size_t sz = ARRAY_SIZE * sizeof(int); /* * allocate memory on host * h_a holds the input array, h_out holds the result */ h_a = (int *) malloc(sz); h_out = (int *) malloc(sz); /* * allocate memory on device */ /* Part 1A: allocate device memory */ cudaMalloc((void**)&d_a, sz); /* initialise host arrays */ for (i = 0; i < ARRAY_SIZE; i++) { h_a[i] = i; h_out[i] = 0; } /* copy input array from host to GPU */ /* Part 1B: copy host array h_a to device array d_a */ cudaMemcpy(d_a, h_a , sz, cudaMemcpyHostToDevice); /* run the kernel on the GPU */ /* Part 2A: configure and launch kernel (un-comment and complete) */ dim3 blocksPerGrid(3, 1, 1 ); dim3 threadsPerBlock(3, 1, 1 ); negate<<< blocksPerGrid,threadPerBlock >>>(d_a); /* wait for all threads to complete and check for errors */ cudaThreadSynchronize(); checkCUDAError("kernel invocation"); /* copy the result array back to the host */ /* Part 1C: copy device array d_a to host array h_out */ cudaMemcpy(h_out, d_a, sz, cudaMemcpyDeviceToHost); checkCUDAError("memcpy"); /* print out the result */ printf("Results: "); for (i = 0; i < ARRAY_SIZE; i++) { printf("%d, ", h_out[i]); } printf("\n\n"); /* free device buffer */ /* Part 1D: free d_a */ cudaFree(d_a); /* free host buffers */ free(h_a); free(h_out); return 0; } /* Utility function to check for and report CUDA errors */ void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
b3dfe4f871bbcb43cd690134b34f911550b4c018.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <stdarg.h> #ifdef UNIX #include <stdint.h> #include <unistd.h> #endif // CUDA #include "hip/hip_runtime.h" #include "roctracer/roctx.h" __constant__ int devFluidParams[4]; #define FLUID_NX devFluidParams[0] #define FLUID_NY devFluidParams[1] #define FLUID_NZ devFluidParams[2] #define FLUID_SLABPITCH devFluidParams[3] __constant__ double dragparams[16]; __constant__ double devLambda[16]; // for gradient calculator kernels #define PI 3.141592653589793 //#define THREAD0_PRINTS_DBG /* NOTE NOTE NOTE IMPORTANT: If this is turned on to make them print, then the functions allregimeCdrag cukern_GasDustDrag_GeneralLinearCore cukern_LogTrapSolve which contain cuda kernel printf()s must all be moved UP HERE, ABOVE #include mex.h!!! mex.h wraps printf() and is fundamentally incompatible with cuda kernel printfs. */ /* * From dimensional analysis, by choosing L and T we can rescale... */ /* compute this on CPU and store in __constant__ double thevars[] */ #define VISC0 dragparams[0] #define VISCPOW dragparams[1] #define LAMPOW dragparams[2] #define ALPHA dragparams[3] #define BETA dragparams[4] #define DELTA dragparams[5] #define EPSILON dragparams[6] #define GAMMAM1 dragparams[8] #include "mex.h" #include "cudaCommon.h" #include "cudaSource2FluidDrag.h" // If defined in concert with ACCOUNT_GRADP, exponential methods will attempt to run the // action of the pressure gradient backwards in time to solve v' = -k(v) v + a on an // interval [-.5 .5] instead of [0 1]. This does not work and yields wrong dispersion // relations entirely. //#define EXPO_DOTR // This will account for the pressure gradient and solve v' = -k(v) v + a //#define ACCOUNT_GRADP // If the viscous temperature exponent is found to be 0.5 and the cross section exponent // is zero, the viscosity is hard spheres and some function calls can be simplified // for a speedup. typedef enum ViscosityModel { HARD_SPHERES, PCOF } ViscosityModel; //int sourcefunction_2FluidDrag(MGArray *fluidA, MGArray *fluidB, GeometryParams geo, double gam, double sigmaGas, double muGas, double sigmaDust, double muDust, double dt, int method); int sourcefunction_2FluidDrag(MGArray *fluidA, MGArray *fluidB, GeometryParams *geo, ThermoDetails *thermogas, ThermoDetails *thermodust, double dt, int method); int solveDragEMP(MGArray *gas, MGArray *dust, double dt); int solveDragRK4(MGArray *gas, MGArray *dust, double dt); int solveDragETDRK1(MGArray *gas, MGArray *dust, GeometryParams *geo, double fluidGamma, double dt); int solveDragETDRK2(MGArray *gas, MGArray *dust, GeometryParams *geo, double fluidGamma, double dt); int solveDragLogTrapezoid(MGArray *gas, MGArray *dust, GeometryParams *geo, double fluidGamma, double dt, int timeOrder); int prepareForExpMethod(MGArray *gas, MGArray *dust, MGArray *tempMem, GeometryParams geom, int spaceOrder, double scalingParameter); int findMidGradP2(MGArray *gas, MGArray *tempMem, GeometryParams geom, int spaceOrder, double scalingParameter); void dbgPrint(MGArray *gas, MGArray *dust, MGArray *t, int who, int idx); template <bool ONLY_DV_INI> __global__ void cukern_GasDustDrag_GeneralAccel(double *gas, double *dust, double *tmpmem, int srcBlock, int dstBlock, int N); __global__ void cukern_GasDustDrag_EpsteinAccel(double *gas, double *dust, double *vrel, int N); template <bool resetAccumulator> __global__ void cukern_GasDustDrag_GeneralLinearTime(double *gas, double *dust, double *tmpmem, int srcBlock, int kBlock, int N); // shell call for inner loop of above kernel template <bool resetAccumulator> __device__ void cukern_GasDustDrag_GeneralLinearCore(double *gas, double *dust, double *tmpmem, int srcBlock, int kBlock, int N); __global__ void cukern_findInitialDeltaV(double *g, double *d, double *dv, unsigned long partNumel); // Functions to evaluate explicit Butcher tableaus template <bool resetAccumulator> __global__ void cukern_SolveRK_single(double *tmpmem, int d, double A, int i, double B, unsigned long partNumel); template <bool resetAccumulator> __global__ void cukern_SolveRK_double(double *tmpmem, int d, double F[2], int i[2], double B, unsigned long partNumel); template <bool resetAccumulator> __global__ void cukern_SolveRK_triple(double *tmpmem, int d, double F[3], int i[3], double B, unsigned long partNumel); __global__ void cukern_SolveRK_final(double *tmpmem, int i, double B, double W, unsigned long partNumel); __global__ void cukern_applyFinalDeltaV(double *g, double *d, double *dv_final, unsigned long partNumel); __global__ void cukern_ExpMidpoint_partA(double *gas, double *dust, double *tmpmem, double t, unsigned long partNumel); __global__ void cukern_ExpMidpoint_partB(double *gas, double *dust, double t, double *tmpmem); __global__ void cukern_ETDRK1(double *gas, double *dust, double t, double *tmpmem); template <int order> __global__ void cukern_LogTrapSolve(double *gas, double *dust, double t, double *tmpmem, int partNumel); // Accept the following drag models: // (1) full : Use full Epstein+Stokes calculation with interpolation between all 4 quadrants // (2) Epstein : Use only Epstein force calculation, valid for any speed but only small particles // (3) Linear : Compute Epstein+Stokes in low-velocity limit, valid only for |delta-v/c| << 1 (and strictly, Re < 1) // PARITY CONVENTIONS ARE AS FOLLOWS: // delta-V is defined as GAS VELOCITY MINUS DUST VELOCITY // Drag force is positive in the direction of delta-V, // i.e. d/dt(dust momentum) = F_drag and d/dt(gas momentum) = -F_drag // ergo d/dt(delta_V) ~ -F_drag / mass void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { if ((nrhs!=3) || (nlhs != 0)) mexErrMsgTxt("Wrong number of arguments: need cudaSource2FluidDrag(FluidManager[2], geometry, [dt, solverMethod])\n"); if(CHECK_CUDA_ERROR("entering cudaSource2FluidDrag") != SUCCESSFUL) { DROP_MEX_ERROR("Failed upon entry to cudaSource2FluidDrag."); } MGArray fluidA[5]; int status = MGA_accessFluidCanister(prhs[0], 0, &fluidA[0]); if(status != SUCCESSFUL) { PRINT_FAULT_HEADER; printf("Unable to access first FluidManager.\n"); PRINT_FAULT_FOOTER; DROP_MEX_ERROR("crashing."); } const mxArray *thermostruct = derefXatNdotAdotB(prhs[0], 0, "thermoDetails", NULL); ThermoDetails thermA = accessMatlabThermoDetails(thermostruct); MGArray fluidB[5]; status = MGA_accessFluidCanister(prhs[0], 1, &fluidB[0]); if(status != SUCCESSFUL) { PRINT_FAULT_HEADER; printf("Unable to access second FluidManager.\n"); PRINT_FAULT_FOOTER; DROP_MEX_ERROR("crashing."); } thermostruct = derefXatNdotAdotB(prhs[0], 1, "thermoDetails", NULL); ThermoDetails thermB = accessMatlabThermoDetails(thermostruct); GeometryParams geo = accessMatlabGeometryClass(prhs[1]); double *params = mxGetPr(prhs[2]); size_t ne = mxGetNumberOfElements(prhs[2]); if(ne != 2) { PRINT_FAULT_HEADER; printf("3rd argument to cudaSource2FluidDrag must have 2 elements:\n[ dt (method: 0=midpt, 1=rk4, 2=exponential)]\nGiven argument has %i instead.\n", (int)ne); PRINT_FAULT_FOOTER; DROP_MEX_ERROR("Crashing."); } double dt = params[0]; int solverMethod = (int)params[1]; // For reference: //1nm iron sphere, 300K -> 56m/s thermal velocity //10nm iron ball, 300K -> 1.79m/s thermal velocity //100nm iron ball, 300K -> 56mm/s thermal velocity status = sourcefunction_2FluidDrag(&fluidA[0], &fluidB[0], &geo, &thermA, &thermB, dt, solverMethod); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { DROP_MEX_ERROR("2-fluid drag code crashed!"); } return; } /* Calculates the drag between fluids A and B where B is presumed to be dust. * geo describes the physical geometry of the grids, to which fluidA an fluidB must conform. * thermogas and thermodust provide the necessary fluid microphysics constants. * dt is the time to integrate and method selects the numeric integration scheme to employ. */ int sourcefunction_2FluidDrag(MGArray *fluidA, MGArray *fluidB, GeometryParams *geo, ThermoDetails *thermogas, ThermoDetails *thermodust, double dt, int method) { int i; int sub[6]; int hostFluidParams[4]; int statusCode = SUCCESSFUL; double hostDrag[16]; double gam = thermogas -> gamma; // Reference viscosity & viscosity temperature dependence (0.5 for hard spheres) double nu0 = thermogas->mu0; double nupow = thermogas->muTindex; // double lampow = thermogas->sigmaTindex; double ddust = sqrt(thermodust->sigma0 / 3.141592653589793); // based on sigma being a kinetic cross section = pi (2r)^2, this is correct and needn't be divided by 4 double mgas = thermogas->m; double mdust = thermodust->m; hostDrag[0] = nu0; // reference viscosity, fluidDetailModel.viscosity hostDrag[1] = -nupow; // viscosity temperature dependence, fluidDetailModel.visocity hostDrag[2] = lampow; // cross section temperature dependence, fluidDetailModel. ... hostDrag[3] = mgas *(gam-1.0) / (298.15*thermogas->kBolt); // alpha = mgas * (gamma-1) / (t_ref * k_b) hostDrag[4] = sqrt(2.0)*mgas/(thermogas->sigma0 * ddust); // beta =2 mgas / (sqrt(2) * sigmaGas * dustDiameter); hostDrag[5] = ddust / nu0; // delta= dustDiameter / (visc0) hostDrag[6] = thermodust->sigma0 / (1.0*mdust); // epsilon = sigmaDust / 8 mdust hostDrag[7] = dt; hostDrag[8] = (gam-1.0); hostDrag[9] = .25*thermodust->sigma0 / mdust; hostDrag[10]= 16*(gam-1.0)/3.0; #ifdef THREAD0_PRINTS_DBG printf("hostDrag[] in sourceFunction_2FluidDrag:\n"); printf("VISC0 = %le\nVISCPOW = %le\nLAMPOW = %le\nALPHA=%le\nBETA=%le\nDELTA=%le\nEPSILON=%le\n", hostDrag[0], hostDrag[1], hostDrag[2], hostDrag[3], hostDrag[4], hostDrag[5], hostDrag[6]); #endif for(i = 0; i < fluidA->nGPUs; i++) { hipSetDevice(fluidA->deviceID[i]); statusCode = CHECK_CUDA_ERROR("hipSetDevice"); if(statusCode != SUCCESSFUL) break; calcPartitionExtent(fluidA, i, &sub[0]); hostFluidParams[0] = sub[3]; hostFluidParams[1] = sub[4]; hostFluidParams[2] = sub[5]; hostFluidParams[3] = fluidA->slabPitch[i] / sizeof(double); // This is important, due to padding, is isn't just .partNumel hipMemcpyToSymbol((const void *)devFluidParams, &hostFluidParams[0], 4*sizeof(int), 0, hipMemcpyHostToDevice); statusCode = CHECK_CUDA_ERROR("memcpyToSymbol"); if(statusCode != SUCCESSFUL) break; hipMemcpyToSymbol((const void *)dragparams, &hostDrag[0], 11*sizeof(double), 0, hipMemcpyHostToDevice); statusCode = CHECK_CUDA_ERROR("memcpyToSymbol"); if(statusCode != SUCCESSFUL) break; } if(statusCode != SUCCESSFUL) return statusCode; // FIXME pick a numeric method here dynamically? switch(method) { case 0: // EMP statusCode = CHECK_IMOGEN_ERROR(solveDragEMP(fluidA, fluidB, dt)); break; case 1: // RK4 statusCode = CHECK_IMOGEN_ERROR(solveDragRK4(fluidA, fluidB, dt)); break; case 2: // ETDRK1 (exponential Euler) statusCode = CHECK_IMOGEN_ERROR(solveDragETDRK1(fluidA, fluidB, geo, gam, dt)); break; case 3: // ETDRK2 (exponential midpoint) statusCode = CHECK_IMOGEN_ERROR(solveDragETDRK2(fluidA, fluidB, geo, gam, dt)); break; case 4: // LogTrap2 method (quadratic accuracy with time-variable drag coefficient) statusCode = CHECK_IMOGEN_ERROR(solveDragLogTrapezoid(fluidA, fluidB, geo, gam, dt, 2)); break; case 5: // LogTrap3 method (cubic accuracy with time-variable drag coefficient) statusCode = CHECK_IMOGEN_ERROR(solveDragLogTrapezoid(fluidA, fluidB, geo, gam, dt, 3)); break; } return statusCode; } /* Helps track the state of the integrator when debugging w/o needing cuda-gdb * i.e. a slightly more sophisticated printf()-debug * gas, dust, t are the five-MGArray pointers to gas, dust and tmp storage * who: bit 1 = print about gas, 2 = about dust, 4 = about t * idx: the linear index of the cell to print about (the test suite element generates a uniform * in space fluid state) */ void dbgPrint(MGArray *gas, MGArray *dust, MGArray *t, int who, int idx) { double *hstcpy = (double *)malloc(gas->slabPitch[0]*5); int qq = gas->slabPitch[0]/8; if(who & 1) { hipMemcpy((void *)hstcpy, (const void *)gas->devicePtr[0], gas->slabPitch[0]*5, hipMemcpyDeviceToHost); printf("Gas input state: [%e %e %e %e %e]\n", hstcpy[idx+0*qq], hstcpy[idx+1*qq], hstcpy[idx+2*qq], hstcpy[idx+3*qq], hstcpy[idx+4*qq]); } if(who & 2) { hipMemcpy((void *)hstcpy, (const void *)dust->devicePtr[0], gas->slabPitch[0]*5, hipMemcpyDeviceToHost); printf("Dust input state: [%e %e %e %e %e]\n", hstcpy[idx+0*qq], hstcpy[idx+1*qq], hstcpy[idx+2*qq], hstcpy[idx+3*qq], hstcpy[idx+4*qq]); } if(who & 4) { hipMemcpy((void *)hstcpy, (const void *)t->devicePtr[0], gas->slabPitch[0]*5, hipMemcpyDeviceToHost); printf("tmp memory state: [%e %e %e %e %e]\n", hstcpy[idx+0*qq], hstcpy[idx+1*qq], hstcpy[idx+2*qq], hstcpy[idx+3*qq], hstcpy[idx+4*qq]); } free(hstcpy); } /* Solves the action of gas-dust drag for one dust using the explicit midpoint method * 2nd order in time, not A-stable (dt <~ t_stop) */ int solveDragEMP(MGArray *gas, MGArray *dust, double dt) { int n = gas->nGPUs; double *g; double *d; double *vrel; int statusCode = SUCCESSFUL; MGArray tmpArrays; statusCode = MGA_allocSlab(gas, &tmpArrays, 5); if(CHECK_IMOGEN_ERROR(statusCode) != SUCCESSFUL) return statusCode; int i; int BS = 96; dim3 blocksize(BS, 1, 1); dim3 gridsize(32, 1, 1); for(i = 0; i < n; i++) { long NE = gas->partNumel[i]; // avoid launching tons of threads for small problems gridsize.x = 32; if(ROUNDUPTO(NE, BS)/BS < 32) { gridsize.x = ROUNDUPTO(NE, BS)/BS; } hipSetDevice(gas->deviceID[i]); g = gas->devicePtr[i]; d = dust->devicePtr[i]; vrel = tmpArrays.devicePtr[i]; // compute initial delta-v hipLaunchKernelGGL(( cukern_findInitialDeltaV), dim3(gridsize), dim3(blocksize), 0, 0, g, d, vrel, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_findInitialDeltaV"); if(statusCode != SUCCESSFUL) break; // solve gas drag on y0, store in block 3: use only ini dv for u_specific hipLaunchKernelGGL(( cukern_GasDustDrag_GeneralAccel<true>), dim3(gridsize), dim3(blocksize), 0, 0, g, d, vrel, 0, 3, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_GasDustDrag_full<false>"); if(statusCode != SUCCESSFUL) break; // compute delta-v at t=1/2; store stage at block 4 hipLaunchKernelGGL(( cukern_SolveRK_single<true>), dim3(gridsize), dim3(blocksize), 0, 0, vrel, 4, .5*dt, 3, 0, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_SolveRK_single<true>"); if(statusCode != SUCCESSFUL) break; // solve gas drag at t=1/2 using half stage, store in block 3 hipLaunchKernelGGL(( cukern_GasDustDrag_GeneralAccel<false>), dim3(gridsize), dim3(blocksize), 0, 0, g, d, vrel, 4, 3, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_GasDustDrag_full<true>"); if(statusCode != SUCCESSFUL) break; // Apply final stage derivative to compute y(t) hipLaunchKernelGGL(( cukern_SolveRK_final), dim3(gridsize), dim3(blocksize), 0, 0, vrel, 3, 1.0, dt, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_SolveRK_final"); if(statusCode != SUCCESSFUL) break; // compute new gas/dust momentum and temperature arrays using analytic forms hipLaunchKernelGGL(( cukern_applyFinalDeltaV), dim3(gridsize), dim3(blocksize), 0, 0, g, d, vrel, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_applyFinalDeltaV"); if(statusCode != SUCCESSFUL) break; } MGA_delete(&tmpArrays); return CHECK_IMOGEN_ERROR(statusCode); } /* Solves the action of the gas-dust drag for one dust using the 4th order RK method of Kutta (1903) * 4th order in time, conditionally stable (dt <~ 3t_stop) */ int solveDragRK4(MGArray *gas, MGArray *dust, double dt) { int n = gas->nGPUs; double *g; double *d; // short names for gas and dust gpu memory pointers double *vrel; // temp memory short pointer name int statusCode = SUCCESSFUL; int i; MGArray tmpArrays; statusCode = MGA_allocSlab(gas, &tmpArrays, 5); if(CHECK_IMOGEN_ERROR(statusCode) != SUCCESSFUL) return statusCode; int BS = 96; // FIXME this should determine an appropriate blocksize at runtime perhaps? dim3 blocksize(BS, 1, 1); dim3 gridsize(32, 1, 1); dim3 smallgrid(1,1,1); double bWeights[4] = { 1.0, 2.0, 2.0, 1.0 }; // classic RK4 weights double bRescale = dt / 6.0; for(i = 0; i < n; i++) { long NE = gas->partNumel[i]; // avoid launching tons of threads for small problems gridsize.x = 32; if(ROUNDUPTO(NE, BS)/BS < 32) { gridsize.x = ROUNDUPTO(NE, BS)/BS; } hipSetDevice(gas->deviceID[i]); g = gas->devicePtr[i]; d = dust->devicePtr[i]; vrel = tmpArrays.devicePtr[i]; // compute initial delta-v hipLaunchKernelGGL(( cukern_findInitialDeltaV), dim3(gridsize), dim3(blocksize), 0, 0, g, d, vrel, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_findInitialDeltaV"); if(statusCode != SUCCESSFUL) break; // solve gas drag on y0, store in block 3 hipLaunchKernelGGL(( cukern_GasDustDrag_GeneralAccel<true>), dim3(gridsize), dim3(blocksize), 0, 0, g, d, vrel, 0, 3, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_GasDustDrag_GeneralAccel<true>"); if(statusCode != SUCCESSFUL) break; // compute delta-v at t=1/2; store stage at block 4 hipLaunchKernelGGL(( cukern_SolveRK_single<true>), dim3(gridsize), dim3(blocksize), 0, 0, vrel, 4, 0.5*dt, 3, bWeights[0], NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_SolveRK_single<true>"); if(statusCode != SUCCESSFUL) break; // solve gas drag on k2, store in block 3 hipLaunchKernelGGL(( cukern_GasDustDrag_GeneralAccel<false>), dim3(gridsize), dim3(blocksize), 0, 0, g, d, vrel, 4, 3, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_GasDustDrag_GeneralAccel<false>"); if(statusCode != SUCCESSFUL) break; // compute delta-v at t=1/2; store stage at block 4 hipLaunchKernelGGL(( cukern_SolveRK_single<false>), dim3(gridsize), dim3(blocksize), 0, 0, vrel, 4, 0.5*dt, 3, bWeights[1], NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_SolveRK_single<true>"); if(statusCode != SUCCESSFUL) break; // solve gas drag on k3, store in block 3 hipLaunchKernelGGL(( cukern_GasDustDrag_GeneralAccel<false>), dim3(gridsize), dim3(blocksize), 0, 0, g, d, vrel, 4, 3, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_GasDustDrag_GeneralAccel<false>"); if(statusCode != SUCCESSFUL) break; // compute delta-v at t=1/2; store stage at block 4 hipLaunchKernelGGL(( cukern_SolveRK_single<false>), dim3(gridsize), dim3(blocksize), 0, 0, vrel, 4, 1.0*dt, 3, bWeights[2], NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_SolveRK_single<true>"); if(statusCode != SUCCESSFUL) break; // solve gas drag on k4, store in block 3 hipLaunchKernelGGL(( cukern_GasDustDrag_GeneralAccel<false>), dim3(gridsize), dim3(blocksize), 0, 0, g, d, vrel, 4, 3, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_GasDustDrag_GeneralAccel<false>"); if(statusCode != SUCCESSFUL) break; // add block 3 to accumulator, rescale by dt / 6.0 and add y0 to find final dv. hipLaunchKernelGGL(( cukern_SolveRK_final), dim3(gridsize), dim3(blocksize), 0, 0, vrel, 3, bWeights[3], bRescale, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_SolveRK_final"); if(statusCode != SUCCESSFUL) break; // compute new gas/dust momentum and temperature arrays hipLaunchKernelGGL(( cukern_applyFinalDeltaV), dim3(gridsize), dim3(blocksize), 0, 0, g, d, vrel, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_applyFinalDeltaV"); if(statusCode != SUCCESSFUL) break; } if(statusCode != SUCCESSFUL) { printf("RK4 was unsuccessful: Trying to free temp memory, then returning crash condition.\n"); MGA_delete(&tmpArrays); PRINT_FAULT_FOOTER; } else { statusCode = MGA_delete(&tmpArrays); } return CHECK_IMOGEN_ERROR(statusCode); } /* Solves the gas-dust drag equations using the 2nd order Explicit Exponential Runge-Kutta method * aka the exponential midpoint method: * u_stiff(hf) = u_stiff(0) exp(M_stiff(0) t/2) * u_soft(hf) = u_0 + M_soft(0)*t/2 * u_stiff(t) = u_stiff(0) exp(M_stiff(hf) t) * u_soft(t) = u_0 + M_soft(hf)*t/2 * where the stiff term (gas-dust drag) is solved by directly exponentiating its characteristic matrix * and the nonstiff terms are handled by simple explicit RK2 * * We are advantaged here that to an excellent approximation the stiff terms are truly linear * (i.e. the effect of drag heating in altering pressure gradients is neglectable) if drag is strong * enough to require calling this method. * formally order 2, stiff order 1, L-stable */ int solveDragETDRK1(MGArray *gas, MGArray *dust, GeometryParams *geo, double fluidGamma, double dt) { int dbprint = 0; int n = gas->nGPUs; double *g; double *d; double *tempPtr; int statusCode = SUCCESSFUL; MGArray tmpMem; MGArray *gs = &tmpMem; int numTmpArrays, spaceOrder; #ifdef ACCOUNT_GRADP numTmpArrays = 5; spaceOrder = 2; #else numTmpArrays = 2; spaceOrder = 0; #endif statusCode = MGA_allocSlab(gas, gs, numTmpArrays); if(CHECK_IMOGEN_ERROR(statusCode) != SUCCESSFUL) return statusCode; int i; int BS = 96; // for kernels not requiring finite differencing dim3 linblock(BS, 1, 1); dim3 lingrid(32, 1, 1); // for kernels that do need to do FD dim3 fdgrid(4, 4, 1); dim3 fdblock(16, 16, 1); // Emits [|dv_tr|, u_0, P_x, P_y, P_z] into temp memory at gs statusCode = prepareForExpMethod(gas, dust, gs, *geo, spaceOrder, fluidGamma - 1); if(CHECK_IMOGEN_ERROR(statusCode) != SUCCESSFUL) return statusCode; if(dbprint) { dbgPrint(gas, dust, gs, 7, 6); } int velblock = 0; int kblock = 0; for(i = 0; i < n; i++) { long NE = gas->partNumel[i]; // avoid launching tons of threads for small problems lingrid.x = 32; if(ROUNDUPTO(NE, BS)/BS < 32) { lingrid.x = ROUNDUPTO(NE, BS)/BS; } hipSetDevice(gas->deviceID[i]); g = gas->devicePtr[i]; d = dust->devicePtr[i]; tempPtr = tmpMem.devicePtr[i]; // Use u_0 and dv_tr to compute the drag eigenvalue at t=0 // overwrite the |dv_tr| value (block 0) with K hipLaunchKernelGGL(( cukern_GasDustDrag_GeneralLinearTime<true>), dim3(lingrid), dim3(linblock), 0, 0, g, d, tempPtr, velblock, kblock, gas->partNumel[i]); statusCode = CHECK_CUDA_LAUNCH_ERROR(linblock, lingrid, gas, i, "cukern_GasDustDrag_linearTime"); if(statusCode != SUCCESSFUL) break; if(dbprint) { dbgPrint(gas, dust, gs, 4, 6); } // Use 1st order exponential time differencing (exponential euler) hipLaunchKernelGGL(( cukern_ETDRK1), dim3(lingrid), dim3(linblock), 0, 0, g, d, dt, tempPtr); statusCode = CHECK_CUDA_LAUNCH_ERROR(linblock, lingrid, gas, i, "cukern_ExponentialEulerHalf"); if(statusCode != SUCCESSFUL) break; if(dbprint) { dbgPrint(gas, dust, gs, 7, 6); } } // Make sure node's internal boundaries are consistent if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) MGA_exchangeLocalHalos(gas + 1, 4); if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) MGA_exchangeLocalHalos(dust + 1, 4); if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) MGA_delete(gs); return statusCode; } // TODO /* Implement Exponential Time Differencing, 2nd order RK: * y_1 = exp(h L) y_0 + h phi_1(h L) f(t=0) * y_n+1 = exp(h L) y_0 + h (phi_1(h L) - phi_2(h L)) f(t=0) + h phi_2(h L) f(t=1) * *L = -k *-> y_1 = exp(-k t) y_0 + t (exp(-k t)-1) / (-k t) f_0 *-> y_1 = exp(-k t) y_0 + f_0 (1 - exp(-k t)) / k *-> y_1 = f_0 / k + (y_0 - f_0/k) exp(-k t) * *y_n+1 = exp(-k t) y_0 + f_0 (-(exp(-k t)-1)/k - (exp(-k t)-1-kt)/(k^2 t)) + f_1 (exp(-kt)-1-k t)/k^2t *y_n+1 = exp(-k t) y_0 + f_0/k + (f_0-f_1)/k^2t + f_0/k - f_1/k + (-f_0/k - f_0/k^2t + f_1/k^2t) exp(-k t) *y_n+1 = exp(-k t) y_0 + (2f_0-f_1)/k -f_0/k exp(-kt) + (f_0-f_1)/k^2t - (f_0 - f_1) exp(-k t)/k^2t *y_n+1 = (2f_0-f_1)/k + (y_0-f_0/k) exp(-kt) + (f_0-f_1)(1-exp(-kt))/k^2t *y_n+1 = y_0 exp(-kt) + f_0(2/k - exp(-kt)/k + 1/k^2t -exp(-kt)/k^2t) + f_1(-1/k + exp(-kt)/k^2t) */ int solveDragETDRK2(MGArray *gas, MGArray *dust, GeometryParams *geo, double fluidGamma, double dt) { int n = gas->nGPUs; int dbprint = 0; double *g; double *d; double *tempPtr; int statusCode = SUCCESSFUL; MGArray tmpMem; MGArray *gs = &tmpMem; statusCode = MGA_allocSlab(gas, gs, 6); if(CHECK_IMOGEN_ERROR(statusCode) != SUCCESSFUL) return statusCode; int i; int BS = 96; // for kernels not requiring finite differencing dim3 linblock(BS, 1, 1); dim3 lingrid(32, 1, 1); // for kernels that do need to do FD dim3 fdgrid(4, 4, 1); dim3 fdblock(16, 16, 1); // Emits [|dv_tr|, u_0, P_x, P_y, P_z] into temp memory at gs statusCode = prepareForExpMethod(gas, dust, gs, *geo, 2, fluidGamma - 1); if(CHECK_IMOGEN_ERROR(statusCode) != SUCCESSFUL) return statusCode; if(dbprint) { dbgPrint(gas, dust, gs, 7, 6); } int velblock = 0; int kblock = 0; for(i = 0; i < n; i++) { long NE = gas->partNumel[i]; // avoid launching tons of threads for small problems lingrid.x = 32; if(ROUNDUPTO(NE, BS)/BS < 32) { lingrid.x = ROUNDUPTO(NE, BS)/BS; } hipSetDevice(gas->deviceID[i]); g = gas->devicePtr[i]; d = dust->devicePtr[i]; tempPtr = tmpMem.devicePtr[i]; // Use u_0 and dv_tr to compute the drag eigenvalue at t=0 // overwrites the |dv_tr| value (block 0) with K // replace [|dv_tr|, u_0, P_x, P_y, P_z] into temp memory at gs // with [K , u_0, P_x, P_y, P_z] into temp memory at gs hipLaunchKernelGGL(( cukern_GasDustDrag_GeneralLinearTime<true>), dim3(lingrid), dim3(linblock), 0, 0, g, d, tempPtr, velblock, kblock, gas->partNumel[i]); statusCode = CHECK_CUDA_LAUNCH_ERROR(linblock, lingrid, gas, i, "cukern_GasDustDrag_linearTime"); if(statusCode != SUCCESSFUL) break; if(dbprint) { dbgPrint(gas, dust, gs, 4, 6); } // Use the eigenvalue from t=0 to advance to t=1/2 // Output only new uint & dv values from this stage, // We do this only do re-evaluate the pressure gradient & eigenvalue at the midpoint // This reads K from register 0 and overwrites it with dv_half // overwrite [K , u_0, P_x, P_y, P_z] into temp memory at gs // with [dv_new , u_new, P_x, P_y, P_z] into temp memory at gs hipLaunchKernelGGL(( cukern_ExpMidpoint_partA), dim3(lingrid), dim3(linblock), 0, 0, g, d, tempPtr, dt, gas->partNumel[i]); statusCode = CHECK_CUDA_LAUNCH_ERROR(linblock, lingrid, gas, i, "doing cukern_ExponentialEulerIntermediate"); if(statusCode != SUCCESSFUL) break; if(dbprint) { dbgPrint(gas, dust, gs, 4, 6); } } // Solve gradient-P again statusCode = findMidGradP2(gas, gs, *geo, 2, fluidGamma - 1); if(dbprint) { dbgPrint(gas, dust, gs, 4, 6); } for(i = 0; i < n; i++) { long NE = gas->partNumel[i]; // avoid launching tons of threads for small problems lingrid.x = 32; if(ROUNDUPTO(NE, BS)/BS < 32) { lingrid.x = ROUNDUPTO(NE, BS)/BS; } hipSetDevice(gas->deviceID[i]); g = gas->devicePtr[i]; d = dust->devicePtr[i]; tempPtr = tmpMem.devicePtr[i]; // accumulates new k onto original k, such that block 0 is now (k0 + k1)... hipLaunchKernelGGL(( cukern_GasDustDrag_GeneralLinearTime<true>), dim3(lingrid), dim3(linblock), 0, 0, g, d, tempPtr, velblock, kblock, gas->partNumel[i]); statusCode = CHECK_CUDA_LAUNCH_ERROR(linblock, lingrid, gas, i, "cukern_GasDustDrag_linearTime"); if(statusCode != SUCCESSFUL) break; if(dbprint) { dbgPrint(gas, dust, gs, 4, 6); } // Use averaged pressure gradient and k value to compute timestep. // we divide t by 2 since we simply summed the k values previously hipLaunchKernelGGL(( cukern_ExpMidpoint_partB), dim3(lingrid), dim3(linblock), 0, 0, g, d, dt, tempPtr); statusCode = CHECK_CUDA_LAUNCH_ERROR(linblock, lingrid, gas, i, "cukern_exponentialMidpoint"); if(statusCode != SUCCESSFUL) break; if(dbprint) { dbgPrint(gas, dust, gs, 3, 6); } } // Make sure node's internal boundaries are consistent if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) statusCode = MGA_exchangeLocalHalos(gas + 1, 4); if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) statusCode = MGA_exchangeLocalHalos(dust + 1, 4); if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) statusCode = MGA_delete(gs); return statusCode; } /* Second or third order method that handles variable drag coefficients * 2nd order (trapezoid): * u_0 = P(y_0) * k_0 = compute_kdrag(y_0, u_0) * (y_1, u_1) = y_0 exp(-k_0 t) * k_1 = compute_kdrag(y_1, u_1) * y_n+1 = y_0 exp(-0.5(k_0 + k_1)t) * * 3rd order: (Richardson extrapolated trapezoid) * u_0 = P(y_0) * k_0 = compute_kdrag(y_0, u_0) * (y_1, u_1) = y_0 exp(-k_0 t) * k_1 = compute_kdrag(y_1, u_1) * (y_nhf,u_nhf)= y_0 exp(-0.5 * 0.5(k_0 + k_1)t) * k_nhf = compute_kdrag(y_nhf, u_nhf) * k_integral = richardson_extrap(.25 k_0 + .5 k_nhf + .25 k1, .5k_0 + .5k_1) * y_1 = y_0 exp(-k_integral t) */ int solveDragLogTrapezoid(MGArray *gas, MGArray *dust, GeometryParams *geo, double fluidGamma, double dt, int timeOrder) { int n = gas->nGPUs; int dbprint = 0; double *g; double *d; double *tempPtr; int statusCode = SUCCESSFUL; MGArray tmpMem; MGArray *gs = &tmpMem; #ifdef USE_NVTX roctxMark("Large alloc (3 arrays)"); #endif statusCode = MGA_allocSlab(gas, gs, 3); if(CHECK_IMOGEN_ERROR(statusCode) != SUCCESSFUL) return statusCode; int i; int BS = 96; // for kernels not requiring finite differencing dim3 linblock(BS, 1, 1); dim3 lingrid(32, 1, 1); // for kernels that do need to do FD dim3 fdgrid(4, 4, 1); dim3 fdblock(16, 16, 1); // Emits [|dv_tr|, u_0] into temp memory at gs statusCode = prepareForExpMethod(gas, dust, gs, *geo, 0, fluidGamma - 1); if(CHECK_IMOGEN_ERROR(statusCode) != SUCCESSFUL) return statusCode; /* These often fail wrongly in parallel because of invalid halo entries */ /* int dbcheckval = dbgfcn_CheckArrayVals(gs, 5, 1); if(CHECK_IMOGEN_ERROR(dbcheckval) != SUCCESSFUL) return dbcheckval; */ if(dbprint) { dbgPrint(gas, dust, gs, 7, 6); } for(i = 0; i < n; i++) { long NE = gas->partNumel[i]; // avoid launching tons of threads for small problems lingrid.x = 32; if(ROUNDUPTO(NE, BS)/BS < 32) { lingrid.x = ROUNDUPTO(NE, BS)/BS; } hipSetDevice(gas->deviceID[i]); g = gas->devicePtr[i]; d = dust->devicePtr[i]; tempPtr = tmpMem.devicePtr[i]; switch(timeOrder) { case 2: hipLaunchKernelGGL(( cukern_LogTrapSolve<2>), dim3(lingrid), dim3(linblock), 0, 0, g, d, dt, tempPtr, gas->partNumel[i]); break; case 3: hipLaunchKernelGGL(( cukern_LogTrapSolve<3>), dim3(lingrid), dim3(linblock), 0, 0, g, d, dt, tempPtr, gas->partNumel[i]); break; default: statusCode = ERROR_INVALID_ARGS; break; } statusCode = CHECK_CUDA_LAUNCH_ERROR(linblock, lingrid, gas, i, "doing cukern_LogTrapSolve"); if(statusCode != SUCCESSFUL) break; if(dbprint) { dbgPrint(gas, dust, gs, 7, 6); } } // See to it that internal gpu-gpu boundaries for momentum & energy are consistent if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) statusCode = MGA_exchangeLocalHalos(gas + 1, 4); if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) statusCode = MGA_exchangeLocalHalos(dust + 1, 4); /* These often fail in parallel due to boundary conditions or halo cells dbcheckval = dbgfcn_CheckFluidVals(gas, 1); if(CHECK_IMOGEN_ERROR(dbcheckval) != SUCCESSFUL) return dbcheckval; dbcheckval = dbgfcn_CheckFluidVals(dust, 1); if(CHECK_IMOGEN_ERROR(dbcheckval) != SUCCESSFUL) return dbcheckval; */ // Dump the temporary memory if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) statusCode = MGA_delete(gs); return statusCode; } /* This function returns the Stokes coefficient, scaled by 1/2 * This parameter is experimentally measured except for the low-Re regime */ __device__ double drag_coeff(double Re) { if(Re < 1) { // 24 / Re return 12 / (Re+1e-15); } if(Re > 7.845084191866316e+02) { // .44 return 0.22; } // 24 Re^-.6 return 12.0*pow(Re,-0.6); } /* Computes the drag coefficient for all Reynolds and Knudsen numbers with an accuracy of <1% for * speeds less than approximately Mach 0.1. * The coefficients are divided by 8 per a factor that appears in the drag time formula * The Cunninghand correction coefficients of Allen & Raabe (1.142, .558, .998) are used. */ __device__ double allregimeCdrag(double Re, double Kn) { // Prevent 1/0 errors which may occur when a simulation is initialized with dv = 0 // The only physical way to acheive Re = 0 is if dv = 0, and if dv =0 then skip wasting time // Note that all leading numeric coefficients in C_drag are divided by 8 from their normal // presentation to absorb numeric factors elsewhere in the drag equations double cunningham = 1 + Kn*(1.142 + 1*0.558*exp(-0.999/Kn)); double C_drag = (3 / Re + .45*pow(Re, -.319) + .0509*Re/(8710+Re)) / cunningham; #ifdef THREAD0_PRINTS_DBG if(threadIdx.x == 0) { printf("b=%i,t=%i,line %i: Cdrag reporting: Cd0 = %.12lf, Cu = %.12lf\n, Cd = %.12lf\n", __LINE__, blockIdx.x, threadIdx.x, C_drag, cunningham, C_drag / cunningham); } #endif return C_drag; } /* The general linear core is called upon by the LogTrap solver as well so it is here separated out */ template <bool resetAccumulator> __device__ void cukern_GasDustDrag_GeneralLinearCore(double *gas, double *dust, double *tmpmem, int srcBlock, int kBlock, int N) { double rhoA, rhoB; // gas and dust densities respectively double magdv; // magnitude velocity difference double uspecific; // specific internal energy density = e_{int} / rho double Tnormalized; // Temperature normalized by the reference temp for the viscosity double Re, Kn; // Reynolds number and Knudsen number double kdrag, Cd_hat; // drag time constant & drag coefficient magdv = tmpmem[srcBlock*FLUID_SLABPITCH]; if(magdv < 1e-9) { if(resetAccumulator) { tmpmem[kBlock*FLUID_SLABPITCH] = 0; } #ifdef THREAD0_PRINTS_DBG if(threadIdx.x == 0) { printf("b=%i,t=%i, line=%i: general linear core reporting: |dv| < 1e-9, returning no drag\n", __LINE__, blockIdx.x, threadIdx.x); } #endif return; } rhoA = gas[0]; rhoB = dust[0]; // make sure computation includes gas heating term! // fixme double check this calculation I think it may be in error if(srcBlock != 0) { // If srcblock != zero, we're evaluating a different dv than originally used to give uinternal: // must add dissipated relative KE to gas internal energy. uspecific = tmpmem[FLUID_SLABPITCH] + .5 * rhoB * (tmpmem[0]*tmpmem[0] - magdv*magdv) / (rhoA + rhoB); } else { // If srcBlock is zero, we're reading the original dv for which uinternal was computed: No change uspecific = tmpmem[FLUID_SLABPITCH]; } Tnormalized = ALPHA * uspecific; Re = DELTA * rhoA * magdv * pow(Tnormalized, VISCPOW); Kn = BETA * pow(Tnormalized, LAMPOW) / rhoA; Cd_hat = allregimeCdrag(Re, Kn); kdrag = Cd_hat * magdv * (rhoA + rhoB) * EPSILON; #ifdef THREAD0_PRINTS_DBG if(threadIdx.x == 0) { printf("b=%i,t=%i, line %i: general linear core reporting: uspecific=%le, T/T0=%le, Re=%le, Kn=%le, Cd=%le, k=%le, a=k*v=%le\n", __LINE__, blockIdx.x, threadIdx.x, uspecific, Tnormalized, Re, Kn, 8*Cd_hat, kdrag, kdrag*magdv); } #endif if(resetAccumulator) { tmpmem[kBlock*FLUID_SLABPITCH] = kdrag; } else { tmpmem[kBlock*FLUID_SLABPITCH] += kdrag; } //tmpmem[2*FLUID_SLABPITCH] = Re; //tmpmem[3*FLUID_SLABPITCH] = Kn; //tmpmem[4*FLUID_SLABPITCH] = Cd_hat * 8; } /* This function directly computes the gas-dust drag force in the full (stokes+epstein) regime * This is suited for weaker drag or strange regimes, but unnecessary and time-consuming for * small particles which will never exit the low-speed Epstein regime. * - Uses staged dv value stored at srcBlock, writes acceleration into dstBlock * - template saves on evaluating drag heating if true */ template <bool ONLY_DV_INI> __global__ void cukern_GasDustDrag_GeneralAccel(double *gas, double *dust, double *tmpmem, int srcBlock, int dstBlock, int N) { int i = threadIdx.x + blockIdx.x*blockDim.x; double rhoA, rhoB; // gas and dust densities respectively double magdv; // magnitude velocity difference double uspecific; // specific internal energy density double Tnormalized; double Re, Kn; // Spherical particle Reynolds number double Cd_hat, accel; gas += i; dust += i; tmpmem += i; for(; i < N; i+= blockDim.x*gridDim.x) { magdv = tmpmem[srcBlock*FLUID_SLABPITCH]; rhoA = gas[0]; rhoB = dust[0]; if(ONLY_DV_INI) { uspecific = tmpmem[FLUID_SLABPITCH]; } else { // make sure computation includes gas heating term! uspecific = tmpmem[FLUID_SLABPITCH] + .5 * rhoB * (tmpmem[0]*tmpmem[0] - magdv*magdv) / (rhoA + rhoB); } Tnormalized = ALPHA * uspecific; Re = DELTA * rhoA * magdv * pow(Tnormalized, VISCPOW); Kn = BETA * pow(Tnormalized, LAMPOW) / rhoA; Cd_hat = allregimeCdrag(Re, Kn); accel = Cd_hat * magdv * magdv * (rhoA + rhoB) * EPSILON; tmpmem[dstBlock*FLUID_SLABPITCH] = -accel; gas += blockDim.x*gridDim.x; dust += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } #define EPSTEIN_ALPHA dragparams[9] #define EPSTEIN_BETA dragparams[10] /* This function computes particle drag in the Epstein regime (particles much smaller than gas MFP) * but is unsuited to large particles or dense gas */ __global__ void cukern_GasDustDrag_EpsteinAccel(double *gas, double *dust, double *vrel, int N) { int i = threadIdx.x + blockIdx.x*blockDim.x; double rhoA, rhoB; // gas and dust densities respectively double magdv; // magnitude velocity difference double uinternal; // specific internal energy density double accel; // Relative acceleration (d/dt of vrel) gas += i; dust += i; vrel += i; for(; i < N; i+= blockDim.x*gridDim.x) { magdv = vrel[FLUID_SLABPITCH]; rhoA = gas[0]; rhoB = dust[0]; // make sure computation includes gas heating term! uinternal = vrel[2*FLUID_SLABPITCH] + rhoB * (vrel[0]*vrel[0] - magdv*magdv) / (rhoA + rhoB); // compute f(single particle) = sqrt(f_slow^2 + f_fast^2) // where f_slow = (4/3) A_dust cbar rho_g dv // f_fast = A_dust rho_g dv^2 // and accel = f(single particle) * (rho_dust / m_dust) * (rho_g + rho_d)/(rhog rhod) // = f(single particle) * ndust / reduced mass accel = EPSTEIN_ALPHA * magdv * rhoA * sqrt(magdv*magdv + EPSTEIN_BETA*uinternal) * (1.0+rhoB/rhoA); vrel[3*FLUID_SLABPITCH] = accel; gas += blockDim.x*gridDim.x; dust += blockDim.x*gridDim.x; vrel += blockDim.x*gridDim.x; } } /* This function returns the drag rate K = (dv/dt) / v which is useful for e.g. exponential methods * for very stiff drag * * If motion is acted on exclusively by drag, a simple formula is available to determine heating * as a result of drag friction exactly. In this case, the original and current velocities are used * If it is not, the result is not as trivial and ONLY_DV_INI = true just uses a given input specific * internal energy. */ template <bool resetAccumulator> __global__ void cukern_GasDustDrag_GeneralLinearTime(double *gas, double *dust, double *tmpmem, int srcBlock, int kBlock, int N) { int i = threadIdx.x + blockIdx.x*blockDim.x; gas += i; dust += i; tmpmem += i; for(; i < N; i+= blockDim.x*gridDim.x) { cukern_GasDustDrag_GeneralLinearCore<resetAccumulator>(gas, dust, tmpmem, srcBlock, kBlock, N); gas += blockDim.x*gridDim.x; dust += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } /* Computes initial magnitude velocity ("w") into dv[0] and u_internal initial into dv[slabPitch] * and computes Uint_ini (e_internal / rho evaluated at original |w|) into dv[2*slabNumel] */ __global__ void cukern_findInitialDeltaV(double *g, double *d, double *dv, unsigned long partNumel) { int x = threadIdx.x + blockIdx.x*blockDim.x; g += x; d += x; dv+= x; double u, q, dvsq, rhoginv, rhodinv; double momsq; while(x < partNumel) { rhoginv = 1/g[0]; rhodinv = 1/d[0]; q = g[2*FLUID_SLABPITCH]; u = q*rhoginv - d[2*FLUID_SLABPITCH]*rhodinv; momsq = q*q; dvsq = u*u; q = g[3*FLUID_SLABPITCH]; u = q*rhoginv - d[3*FLUID_SLABPITCH]*rhodinv; momsq += q*q; dvsq += u*u; q = g[4*FLUID_SLABPITCH]; u = q*rhoginv - d[4*FLUID_SLABPITCH]*rhodinv; momsq += q*q; dvsq += u*u; // Store magnitude delta-v and initial specific internal energy for use by gas drag routine dv[0] = sqrt(dvsq); dv[FLUID_SLABPITCH] = (g[FLUID_SLABPITCH] - .5*momsq * rhoginv)*rhoginv; x += blockDim.x*gridDim.x; g += blockDim.x*gridDim.x; d += blockDim.x*gridDim.x; dv+= blockDim.x*gridDim.x; } } /* This set of functions implement evaluation of the rows in RK Butcher tableaux containing * from 1 to 3 nonzero entries */ /* This function completes evaluation of an explicit Butcher tableau. * the final y' stored at i gets added with weight B to the accumulator * The accumulator is rescaled by W, added to block 0, and overwritten * tmpmem[2] += B * tmpmem[i] * tmpmem[d] = tmpmem[0] + W*tmpmem[2]; */ __global__ void cukern_SolveRK_final(double *tmpmem, int i, double B, double W, unsigned long partNumel) { int x = threadIdx.x + blockIdx.x*blockDim.x; tmpmem += x; while(x < partNumel) { /* compute Y1 value */ tmpmem[2*FLUID_SLABPITCH] = tmpmem[0] + W*(tmpmem[2*FLUID_SLABPITCH] + B*tmpmem[i*FLUID_SLABPITCH]); x += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } /* This function computes an explicit RK intermediate that takes one F eval * the new stage is computed using * tmpmem[d] = tmpmem[0] + (A*tmpmem[i] * and the accumulator goes as * tmpmem[2] += B * tmpmem[i1] */ template <bool resetAccumulator> __global__ void cukern_SolveRK_single(double *tmpmem, int d, double A, int i, double B, unsigned long partNumel) { int x = threadIdx.x + blockIdx.x*blockDim.x; tmpmem += x; while(x < partNumel) { /* compute stage value */ tmpmem[d*FLUID_SLABPITCH] = tmpmem[0] + A*tmpmem[i*FLUID_SLABPITCH]; /* compute accumulator */ if(resetAccumulator) { tmpmem[2*FLUID_SLABPITCH] = B * tmpmem[i*FLUID_SLABPITCH]; } else { tmpmem[2*FLUID_SLABPITCH] += B * tmpmem[i*FLUID_SLABPITCH]; } x += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } /* This function computes an explicit RK intermediate that takes two F evals * the new stage is computed using * tmpmem[d] = tmpmem[0] + (F0 * tmpmem[i0] + F1 * tmpmem[i1]); * and the accumulator goes as * tmpmem[2] += B * tmpmem[i1] * (Implicitly, F1 at i1 is the new F eval to be accumulated) */ template <bool resetAccumulator> __global__ void cukern_SolveRK_double(double *tmpmem, int d, double F[2], int i[2], double B, unsigned long partNumel) { int x = threadIdx.x + blockIdx.x*blockDim.x; tmpmem += x; while(x < partNumel) { /* compute stage value */ tmpmem[d*FLUID_SLABPITCH] = tmpmem[0] + (F[0]*tmpmem[i[0]*FLUID_SLABPITCH] + F[1]*tmpmem[i[1]*FLUID_SLABPITCH]); /* compute accumulator */ if(resetAccumulator) { tmpmem[2*FLUID_SLABPITCH] = B * tmpmem[i[1]*FLUID_SLABPITCH]; } else { tmpmem[2*FLUID_SLABPITCH] += B * tmpmem[i[1]*FLUID_SLABPITCH]; } x += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } /* This function computes an explicit RK intermediate that takes two F evals * the new stage is computed using * tmpmem[d] = tmpmem[0] + sum_{i=0}^{i=2} (F[i] * tmpmem[idx[i]]); * and the accumulator goes as * tmpmem[2] += B * tmpmem[i[2]] * (Implicitly, F1 at i[2] is the new F eval to be accumulated) */ template <bool resetAccumulator> __global__ void cukern_SolveRK_triple(double *tmpmem, int d, double F[3], int i[3], double B, unsigned long partNumel) { int x = threadIdx.x + blockIdx.x*blockDim.x; tmpmem += x; while(x < partNumel) { /* compute stage value */ tmpmem[d*FLUID_SLABPITCH] = tmpmem[0] + (F[0]*tmpmem[i[0]*FLUID_SLABPITCH] + F[1]*tmpmem[i[1]*FLUID_SLABPITCH] + F[2]*tmpmem[i[2]*FLUID_SLABPITCH]); /* compute accumulator */ if(resetAccumulator) { tmpmem[2*FLUID_SLABPITCH] = B * tmpmem[i[2]*FLUID_SLABPITCH]; } else { tmpmem[2*FLUID_SLABPITCH] += B * tmpmem[i[2]*FLUID_SLABPITCH]; } x += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } /* From the initial momentum difference from *gas and *dust, computes the change in their momentum * densities to reach momentum difference *dp, given the relative fraction of acceleration * experienced by the gas and dust particles, and applies total energy conservation to solve * the gas/dust energy densities */ __global__ void cukern_applyFinalDeltaV(double *g, double *d, double *dv_final, unsigned long partNumel) { int x = threadIdx.x + blockIdx.x*blockDim.x; g += x; d += x; dv_final += x; double vstick[3]; double dvhat[3]; double rhog, rhod; double a, b, c, p1, p2; double dustmom, dustmomfin; while(x < partNumel) { rhog = g[0]; rhod = d[0]; // convert rho & momentum into CoM velocity & differential velocity p1 = g[2*FLUID_SLABPITCH]; p2 = d[2*FLUID_SLABPITCH]; vstick[0] = (p1+p2)/(rhog+rhod); dvhat[0] = p1/rhog - p2/rhod; p1 = g[3*FLUID_SLABPITCH]; p2 = d[3*FLUID_SLABPITCH]; vstick[1] = (p1+p2)/(rhog+rhod); dvhat[1] = p1/rhog - p2/rhod; p1 = g[4*FLUID_SLABPITCH]; p2 = d[4*FLUID_SLABPITCH]; vstick[2] = (p1+p2)/(rhog+rhod); dvhat[2] = p1/rhog - p2/rhod; // Compute differential velocity unit vector a = dv_final[2*FLUID_SLABPITCH] / sqrt(dvhat[0]*dvhat[0] + dvhat[1]*dvhat[1]+dvhat[2]*dvhat[2]); dvhat[0] *= a; dvhat[1] *= a; dvhat[2] *= a; // Reduced mass proves useful b = rhog*rhod/(rhog+rhod); // Accumulate initial & final dust momenta for exact energy conservation; // Convert CoM and decayed differential velocities back to momenta densities dustmom = d[2*FLUID_SLABPITCH]*d[2*FLUID_SLABPITCH]; g[2*FLUID_SLABPITCH] = rhog*vstick[0] + dvhat[0]*b; d[2*FLUID_SLABPITCH] = c = rhod*vstick[0] - dvhat[0]*b; dustmomfin = c*c; dustmom += d[3*FLUID_SLABPITCH]*d[3*FLUID_SLABPITCH]; g[3*FLUID_SLABPITCH] = rhog*vstick[1] + dvhat[1]*b; d[3*FLUID_SLABPITCH] = c = rhod*vstick[1] - dvhat[1]*b; dustmomfin += c*c; dustmom += d[4*FLUID_SLABPITCH]*d[4*FLUID_SLABPITCH]; g[4*FLUID_SLABPITCH] = rhog*vstick[2] + dvhat[2]*b; d[4*FLUID_SLABPITCH] = c = rhod*vstick[2] - dvhat[2]*b; dustmomfin += c*c; // Conserve total energy to machine precision // d/dt (KE_gas + Eint_gas + KE_dust) = 0 // d/dt (KE_gas + Eint_gas) = -d/dt(KE_dust) // Etot_gas(after) - Etot_gas(before) = -(KE_dust(after)-KE_dust(before)) // -> Etot_gas += KE_dust(ini) - KE_dust(fin) g[FLUID_SLABPITCH] += .5*(dustmom - dustmomfin)/d[0]; // FIXME - this is a hack to preserve dust "pressure" because I lack an inviscid // FIXME - Burgers solver or sticky-dust Godunov routine. So simply set it to a // FIXME - uniform low temperature d[FLUID_SLABPITCH] = .5*dustmomfin/d[0] + 1e-4 * d[0]; x += blockDim.x*gridDim.x; g += blockDim.x*gridDim.x; d += blockDim.x*gridDim.x; dv_final += blockDim.x*gridDim.x; } } /* (2) [u_hf, |dv_hf|] = cukern_ExpMidpoint_partA(gas_state, dust_state, k_0, P_x, P_y, P_z) * compute time-reversed elements of dv again (memory & memory BW precious, v_i = (p_i - 2 P_i t)/rho cheap as dirt) * solve y_i' = -k_0 y_i + a_i, a_i = - P_i / rho_gas per vector element * y(t) = a_i / k_0 + (y_i - a_i/k_0) exp(-k_0 t) * this is an L-stable method for the drag equation * Our only interest in solving this is to re-evaluate the linear operation matrix at t_half * Linear matrix is diag([k_n k_n k_n]) -> require only |dv_half| to re-call gasDustDrag */ __global__ void cukern_ExpMidpoint_partA(double *gas, double *dust, double *tmpmem, double t, unsigned long partNumel) { int x = threadIdx.x + blockIdx.x*blockDim.x; gas += x; dust += x; tmpmem += x; double rhoginv; // 1/rho_gas double dv_i; // element of delta-v double k; // drag eigenvalue double a0; // element of accel = gradient(P)/rho_gas double dv_t; // updated delta v. not sure if needed independently... double dvsq; // accumulated (delta-v)^2 double duint; // accumulated drag heating while(x < partNumel) { // load k, solve driven linear system k = tmpmem[0]; // load & compute time-reversed delta-vx rhoginv= 1.0 / gas[0]; a0 = tmpmem[2*FLUID_SLABPITCH]; #ifdef EXPO_DOTR dv_i = (gas[2*FLUID_SLABPITCH] + t*a0)*rhoginv - dust[2*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[2*FLUID_SLABPITCH] + t*a0)*rhoginv - dust[2*FLUID_SLABPITCH]/dust[0]; #endif // compute decay of this value a0 *= rhoginv / k; dv_t = a0 + (dv_i - a0)*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation // accumulate new delta-v^2 dvsq = dv_t*dv_t; // accumulate drag heating duint = k*a0*a0*t - 2*a0*(dv_i - a0)*expm1(-k*t) - (dv_i - a0)*(dv_i - a0)*expm1(-2*k*t); // Repeat the above for the other two components a0 = tmpmem[3*FLUID_SLABPITCH]; #ifdef EXPO_DOTR dv_i = (gas[3*FLUID_SLABPITCH] + t*a0)*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[3*FLUID_SLABPITCH])*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; #endif a0 *= rhoginv/k; dv_t = a0 + (dv_i - a0)*exp(-t*k); dvsq += dv_t*dv_t; duint += k*a0*a0*t - 2*a0*(dv_i - a0)*expm1(-k*t) - (dv_i - a0)*(dv_i - a0)*expm1(-2*k*t); a0 = tmpmem[4*FLUID_SLABPITCH]; #ifdef EXPO_DOTR dv_i = (gas[4*FLUID_SLABPITCH] + t*a0)*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[4*FLUID_SLABPITCH])*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; #endif a0 *= rhoginv/k; dv_t = a0 + (dv_i - a0)*exp(-t*k); dvsq += dv_t*dv_t; duint += k*a0*a0*t - 2*a0*(dv_i - a0)*expm1(-k*t) - (dv_i - a0)*(dv_i - a0)*expm1(-2*k*t); tmpmem[0] = sqrt(dvsq); // overwrite in place tmpmem[FLUID_SLABPITCH] += GAMMAM1 * duint * rhoginv; // advance ptrs x += blockDim.x*gridDim.x; gas += blockDim.x*gridDim.x; dust += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } /*(5) [(gas_state), (dust_state)] = exponentialMidpt(gas_state, dust_state, k_hf, P_x, P_y, P_z) * compute time-reversed elements of dv a 3rd time (memory & memory BW precious, v_i = (p_i - 2 P_i t)/rho cheap as dirt) * advance to drag-applied dv values dv_i <- -P_i/(k_hf rho) + (dv_i + P_i/(k_hf rho))*exp(-k_hf t) * compute new u_specific? or let d/dt(Etotal) = 0 do the job? does that still work? * overwrite gas_state/dust_state using updated values * ... */ __global__ void cukern_ExpMidpoint_partB(double *gas, double *dust, double t, double *tmpmem) { int x = threadIdx.x + blockIdx.x*blockDim.x; gas += x; dust += x; tmpmem += x; double rhoginv; // 1/rho_gas // double rhodinv; // 1/rho_dust double dv_i; // element of delta-v double k; // drag eigenvalue double dpdt; // element of accel = gradient(P)/rho_gas double dv_t; // updated delta v. not sure if needed independently... double pdustsq; // use to track accumulated transfer of total energy double vstick; // barycentric velocity of gas-dust system double mu; // reduced mass double q; // scratchpad variable while(x < FLUID_SLABPITCH) { // load & compute time-reversed delta-vx and stick velocity rhoginv = 1.0 / gas[0]; mu = gas[0]*dust[0]/(gas[0]+dust[0]); pdustsq = -dust[2*FLUID_SLABPITCH] * dust[2*FLUID_SLABPITCH]; vstick = (gas[2*FLUID_SLABPITCH]+dust[2*FLUID_SLABPITCH]) / (gas[0] + dust[0]); #ifdef ACCOUNT_GRADP dpdt = -tmpmem[2*FLUID_SLABPITCH]; #else dpdt = 0; #endif #ifdef EXPO_DOTR dv_i = (gas[2*FLUID_SLABPITCH] - t*dpdt)*rhoginv - dust[2*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[2*FLUID_SLABPITCH])*rhoginv - dust[2*FLUID_SLABPITCH]/dust[0]; #endif // load k, solve driven linear system k = tmpmem[0]; dpdt *= mu*rhoginv; dv_t = dpdt/k + (dv_i - dpdt/k)*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation // recalculate new differential velocities gas[2*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[2*FLUID_SLABPITCH] = q = dust[0]*vstick - dv_t*mu; // accumulate change in dust kinetic energy pdustsq += q*q; // // do Y direction pdustsq -= dust[3*FLUID_SLABPITCH]*dust[3*FLUID_SLABPITCH]; vstick = (gas[3*FLUID_SLABPITCH]+dust[3*FLUID_SLABPITCH]) / (gas[0] + dust[0]); #ifdef ACCOUNT_GRADP dpdt = -tmpmem[3*FLUID_SLABPITCH]; #endif #ifdef EXPO_DOTR dv_i = (gas[3*FLUID_SLABPITCH] - t*dpdt)*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[3*FLUID_SLABPITCH])*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; #endif dpdt *= mu*rhoginv; dv_t = dpdt/k + (dv_i - dpdt/k)*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation gas[3*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[3*FLUID_SLABPITCH]= q = dust[0]*vstick - dv_t*mu; pdustsq += q*q; // do Z direction pdustsq -= dust[4*FLUID_SLABPITCH]*dust[4*FLUID_SLABPITCH]; vstick = (gas[4*FLUID_SLABPITCH]+dust[4*FLUID_SLABPITCH]) / (gas[0] + dust[0]); #ifdef ACCOUNT_GRADP dpdt = -tmpmem[4*FLUID_SLABPITCH]; #endif #ifdef EXPO_DOTR dv_i = (gas[4*FLUID_SLABPITCH] - t*dpdt)*rhoginv - dust[4*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[4*FLUID_SLABPITCH])*rhoginv - dust[4*FLUID_SLABPITCH]/dust[0]; #endif dpdt *= mu*rhoginv; dv_t = dpdt/k + (dv_i - dpdt/k)*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation gas[4*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[4*FLUID_SLABPITCH] = q = dust[0]*vstick - dv_t*mu; pdustsq += q*q; // From conservation of total energy we have that the gas total energy decreases by whatever // amount the dust kinetic energy rises; Under (M_dust >> M_atom) the gas gets ~100% of heating gas[FLUID_SLABPITCH] -= .5*pdustsq / dust[0]; // advance ptrs x += blockDim.x*gridDim.x; gas += blockDim.x*gridDim.x; dust += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } /*(5) [(gas_state), (dust_state)] = cukern_ETD1RK(gas_state, dust_state, k_hf, P_x, P_y, P_z) * compute time-reversed elements of dv a 3rd time (memory & memory BW precious, v_i = (p_i - 2 P_i t)/rho cheap as dirt) * advance to drag-applied dv values dv_i <- -P_i/(k_hf rho) + (dv_i + P_i/(k_hf rho))*exp(-k_hf t) * compute new u_specific? or let d/dt(Etotal) = 0 do the job? does that still work? * overwrite gas_state/dust_state using updated values * ... */ __global__ void cukern_ETDRK1(double *gas, double *dust, double t, double *tmpmem) { int x = threadIdx.x + blockIdx.x*blockDim.x; gas += x; dust += x; tmpmem += x; double rhoginv; // 1/rho_gas // double rhodinv; // 1/rho_dust double dv_i; // element of delta-v double k; // drag eigenvalue double dpdt; // element of accel = gradient(P)/rho_gas double dv_t; // updated delta v. not sure if needed independently... double pdustsq; // use to track accumulated transfer of total energy double vstick; // barycentric velocity of gas-dust system double mu; // reduced mass double q; // scratchpad variable while(x < FLUID_SLABPITCH) { // load & compute time-reversed delta-vx and stick velocity rhoginv = 1.0 / gas[0]; mu = gas[0]*dust[0]/(gas[0]+dust[0]); pdustsq = -dust[2*FLUID_SLABPITCH] * dust[2*FLUID_SLABPITCH]; vstick = (gas[2*FLUID_SLABPITCH]+dust[2*FLUID_SLABPITCH]) / (gas[0] + dust[0]); #ifdef ACCOUNT_GRADP dpdt = -tmpmem[2*FLUID_SLABPITCH]; #else dpdt = 0; #endif #ifdef EXPO_DOTR dv_i = (gas[2*FLUID_SLABPITCH] - t*dpdt)*rhoginv - dust[2*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[2*FLUID_SLABPITCH])*rhoginv - dust[2*FLUID_SLABPITCH]/dust[0]; #endif // load k, solve driven linear system k = tmpmem[0]; dpdt *= mu*rhoginv; dv_t = dpdt/k + (dv_i - dpdt/k)*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation // recalculate new differential velocities gas[2*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[2*FLUID_SLABPITCH] = q = dust[0]*vstick - dv_t*mu; // accumulate change in dust kinetic energy pdustsq += q*q; // // do Y direction pdustsq -= dust[3*FLUID_SLABPITCH]*dust[3*FLUID_SLABPITCH]; vstick = (gas[3*FLUID_SLABPITCH]+dust[3*FLUID_SLABPITCH]) / (gas[0] + dust[0]); #ifdef ACCOUNT_GRADP dpdt = -tmpmem[3*FLUID_SLABPITCH]; #endif #ifdef EXPO_DOTR dv_i = (gas[3*FLUID_SLABPITCH] - t*dpdt)*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[3*FLUID_SLABPITCH])*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; #endif dpdt *= mu*rhoginv; dv_t = dpdt/k + (dv_i - dpdt/k)*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation gas[3*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[3*FLUID_SLABPITCH]= q = dust[0]*vstick - dv_t*mu; pdustsq += q*q; // do Z direction pdustsq -= dust[4*FLUID_SLABPITCH]*dust[4*FLUID_SLABPITCH]; vstick = (gas[4*FLUID_SLABPITCH]+dust[4*FLUID_SLABPITCH]) / (gas[0] + dust[0]); #ifdef ACCOUNT_GRADP dpdt = -tmpmem[4*FLUID_SLABPITCH]; #endif #ifdef EXPO_DOTR dv_i = (gas[4*FLUID_SLABPITCH] - t*dpdt)*rhoginv - dust[4*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[4*FLUID_SLABPITCH])*rhoginv - dust[4*FLUID_SLABPITCH]/dust[0]; #endif dpdt *= mu*rhoginv; dv_t = dpdt/k + (dv_i - dpdt/k)*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation gas[4*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[4*FLUID_SLABPITCH] = q = dust[0]*vstick - dv_t*mu; pdustsq += q*q; // From conservation of total energy we have that the gas total energy decreases by whatever // amount the dust kinetic energy rises; Under (M_dust >> M_atom) the gas gets ~100% of heating gas[FLUID_SLABPITCH] -= .5*pdustsq / dust[0]; // advance ptrs x += blockDim.x*gridDim.x; gas += blockDim.x*gridDim.x; dust += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } /* Assuming the temp registers are preloaded with * [dv_0 u_0 Px Py Pz] * First call drag solve to get * [dv_0 u_0 Px Py Pz k_0] * Then solve ETD1RK to get dv_1, u_1: * [dv_1 u_1 Px Py Pz k_0] * call drag solve with accumulate=yes set to get * [dv_1 u_1 Px Py Pz (k_0+k_1)] * solve the log integral to find y_n+1 */ template <int order> __global__ void cukern_LogTrapSolve(double *gas, double *dust, double t, double *tmpmem, int partNumel) { double rhoginv; // 1/rho_gas // double rhodinv; // 1/rho_dust double dv_i; // element of delta-v double k; // drag eigenvalue double pdustsq; // element of accel = gradient(P)/rho_gas double dv_t; // updated delta v. not sure if needed independently... double dvsq; // use to track accumulated transfer of total energy double vstick; // barycentric velocity of gas-dust system double mu; // reduced mass double q; // scratchpad variable double duint; int x = threadIdx.x + blockIdx.x*blockDim.x; gas += x; dust += x; tmpmem += x; // Use ETDRK1 to approximate y_1 to first order while(x < FLUID_SLABPITCH) { mu = dust[0]/(gas[0]+dust[0]); // reduced density is needed more or less immediately /* Assuming the temp registers are preloaded with * [dv_0 u_0] */ // call drag eigenvalue solver. cukern_GasDustDrag_GeneralLinearCore<true>(gas, dust, tmpmem, 0, 2, partNumel); /* temp contents: * [dv_0 u_0 k_0] */ k = tmpmem[2*FLUID_SLABPITCH]; dv_i = tmpmem[0]; dv_t = dv_i*exp(-t*k); #ifdef THREAD0_PRINTS_DBG if(threadIdx.x == 0) { printf("b=%i,t=%i,line %i: first point: initial dv=%le, k = %le, t=%le, new dv=%le\n", __LINE__, blockIdx.x, threadIdx.x, dv_i, k, t, dv_t); } #endif // accumulate new delta-v^2 and drag heating effect dvsq = dv_t*dv_t; duint = -.5*dv_i*dv_i*mu*expm1(-2*k*t); tmpmem[0] = sqrt(dvsq); // Add the dissipated relative KE before reassessing the drag coefficient tmpmem[FLUID_SLABPITCH] += duint; /* temp contents: * [dv_1 u_1 k_0] */ // Solve drag eigenvalue k1 and accumulate in register 2 cukern_GasDustDrag_GeneralLinearCore<false>(gas, dust, tmpmem, 0, 2, partNumel); #ifdef THREAD0_PRINTS_DBG if(threadIdx.x == 0) { printf("b=%i,t=%i,line %i: Second k solve: k = %le\n", __LINE__, blockIdx.x, threadIdx.x, tmpmem[2*FLUID_SLABPITCH]); } #endif /* temp contents: * [dv_1 u_1 (k_0+k_1)] */ // Now the cutesy tricksy bit: // Cleverly reverse our way to t=1/2 and compute k just once more... // If this is set to zero, our calculation is exponential trapezoid // and has stiff time order two if(order==3) { // If one, we perform a cubic algorithmic fit and acheive third stiff order // with outrageous accuracy // Step one, back half way up: reset original internal U tmpmem[FLUID_SLABPITCH] -= duint; // take a halfstep k = .25*(tmpmem[2*FLUID_SLABPITCH]); dv_t = dv_i*exp(-t*k); // accumulate new delta-v^2 and drag heating effect dvsq = dv_t*dv_t; duint = -0.5*dv_i*dv_i*mu*expm1(-2*k*t); // #ifdef THREAD0_PRINTS_DBG if(threadIdx.x == 0) { printf("b=%i,t=%i, line %i: halfstep: k=%le, dv_t = %le\n", __LINE__, blockIdx.x, threadIdx.x, k, dv_t); } #endif // write these into storage tmpmem[0] = sqrt(dvsq); tmpmem[FLUID_SLABPITCH] += duint; // compute the explicit midpoint value of k cukern_GasDustDrag_GeneralLinearCore<true>(gas, dust, tmpmem, 0, 0, partNumel); // Apply Richardson extrapolation to find the new K value k = (0.16666666666666666667 *tmpmem[2*FLUID_SLABPITCH] + 0.66666666666666666667*tmpmem[0]); } else { k = .5*tmpmem[2*FLUID_SLABPITCH]; } // The clever weighting of k above yields in it a value which will take us to the point that the // complex actual drag ends up at, to third order, when we do exp(-k t) // horray for path independent work integrals! mu = mu * gas[0]; // mu was abused to compute the heating integral above // load & compute time-reversed delta-vx and stick velocity rhoginv = 1.0 / gas[0]; pdustsq = -dust[2*FLUID_SLABPITCH] * dust[2*FLUID_SLABPITCH]; vstick = (gas[2*FLUID_SLABPITCH]+dust[2*FLUID_SLABPITCH]) / (gas[0] + dust[0]); dv_i = (gas[2*FLUID_SLABPITCH])*rhoginv - dust[2*FLUID_SLABPITCH]/dust[0]; dv_t = dv_i*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation #ifdef THREAD0_PRINTS_DBG if(threadIdx.x == 0) { printf("t=%i,b=%i,line %i: final solve reporting: dv_i = %le, dt=%le, k = %le, dv_f = %le\n", threadIdx.x, blockIdx.x, __LINE__, dv_i, t, k, dv_t); } #endif // recalculate new differential velocities gas[2*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[2*FLUID_SLABPITCH] = q = dust[0]*vstick - dv_t*mu; // accumulate change in dust kinetic energy pdustsq += q*q; // // do Y direction pdustsq -= dust[3*FLUID_SLABPITCH]*dust[3*FLUID_SLABPITCH]; vstick = (gas[3*FLUID_SLABPITCH]+dust[3*FLUID_SLABPITCH]) / (gas[0] + dust[0]); dv_i = (gas[3*FLUID_SLABPITCH])*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; dv_t = dv_i*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation gas[3*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[3*FLUID_SLABPITCH]= q = dust[0]*vstick - dv_t*mu; pdustsq += q*q; // do Z direction pdustsq -= dust[4*FLUID_SLABPITCH]*dust[4*FLUID_SLABPITCH]; vstick = (gas[4*FLUID_SLABPITCH]+dust[4*FLUID_SLABPITCH]) / (gas[0] + dust[0]); dv_i = (gas[4*FLUID_SLABPITCH])*rhoginv - dust[4*FLUID_SLABPITCH]/dust[0]; dv_t = dv_i*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation gas[4*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[4*FLUID_SLABPITCH] = q = dust[0]*vstick - dv_t*mu; pdustsq += q*q; // From conservation of total energy we have that the gas total energy decreases by whatever // amount the dust kinetic energy rises; Under (M_dust >> M_atom) the gas gets ~100% of heating gas[FLUID_SLABPITCH] -= .5*pdustsq / dust[0]; // advance ptrs x += blockDim.x*gridDim.x; gas += blockDim.x*gridDim.x; dust += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } // This awful wad of mutated copypasta from the cudaGradientKernels.cu file provides the // initial conditions for the ERK2 integrator to run; It computes five output values from nine // input values __global__ void writeScalarToVector(double *x, long numel, double f); __global__ void cukern_prepareForERK_h0(double *gas, double *dust, double *outputs, long numel); // compute grad(phi) in XYZ or R-Theta-Z with 2nd or 4th order accuracy template <geometryType_t coords> __global__ void cukern_prepareForERK3D_h2(double *gas, double *dust, double *em, int3 arraysize); template <geometryType_t coords> __global__ void cukern_prepareForERK3D_h4_partone(double *phi, double *fx, double *fy, int3 arraysize); __global__ void cukern_prepareForERK3D_h4_parttwo(double *phi, double *fz, int3 arraysize); // compute grad(phi) in X-Y or R-Theta with 2nd or 4th order accuracy template <geometryType_t coords> __global__ void cukern_prepareForERK2D_h2(double *gas, double *dust, double *em, int3 arraysize); template <geometryType_t coords> __global__ void cukern_prepareForERK2D_h4(double *phi, double *fx, double *fy, int3 arraysize); // Compute grad(phi) in X-Z or R-Z with 2nd or 4th order accuracy __global__ void cukern_prepareForERKRZ_h2(double *gas, double *dust, double *em, int3 arraysize); __global__ void cukern_prepareForERKRZ_h4(double *phi, double *fx, double *fz, int3 arraysize); #define GRADBLOCKX 16 #define GRADBLOCKY 16 // scalingParameter / 2h or /12h depending on spatial order of scheme #define LAMX devLambda[0] #define LAMY devLambda[1] #define LAMZ devLambda[2] #define RINNER devLambda[7] #define DELTAR devLambda[8] /* Given the gas (5xMGArray), dust (5xMGArray), and temporary memory (5 regs) pointers, along with * geometry information, computes five outputs into the 5 temp memory slabs: [|dv_timereversed|, uinternal, dP/dx, dP/dy, dP/dz] * for this call, spaceOrder must be 2 (or error) and scalingParameter should be 1 (or the math is wrong). */ int prepareForExpMethod(MGArray *gas, MGArray *dust, MGArray *tempMem, GeometryParams geom, int spaceOrder, double scalingParameter) { dim3 gridsize, blocksize; double lambda[11]; int i; int worked; int sub[6]; double *dx = &geom.h[0]; if(spaceOrder == 4) { lambda[0] = scalingParameter/(12.0*dx[0]); lambda[1] = scalingParameter/(12.0*dx[1]); lambda[2] = scalingParameter/(12.0*dx[2]); } else if(spaceOrder == 2) { lambda[0] = scalingParameter/(2.0*dx[0]); lambda[1] = scalingParameter/(2.0*dx[1]); lambda[2] = scalingParameter/(2.0*dx[2]); } else if(spaceOrder == 0) { lambda[0] = scalingParameter; } lambda[7] = geom.Rinner; // This is actually overwritten per partition below lambda[8] = dx[1]; int isThreeD = (gas->dim[2] > 1); int isRZ = (gas->dim[2] > 1) & (gas->dim[1] == 1); if(spaceOrder > 0) { for(i = 0; i < gas->nGPUs; i++) { hipSetDevice(gas->deviceID[i]); calcPartitionExtent(gas, i, &sub[0]); lambda[7] = geom.Rinner + dx[0] * sub[0]; // Innermost cell coord may change per-partition hipMemcpyToSymbol((const void *)devLambda, lambda, 11*sizeof(double), 0, hipMemcpyHostToDevice); worked = CHECK_CUDA_ERROR("hipMemcpyToSymbol"); if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) break; } if(worked != SUCCESSFUL) return worked; } double *gasPtr; double *dustPtr; double *tmpPtr; // Iterate over all partitions, and here... we... go! for(i = 0; i < gas->nGPUs; i++) { hipSetDevice(gas->deviceID[i]); worked = CHECK_CUDA_ERROR("hipSetDevice"); if(worked != SUCCESSFUL) break; calcPartitionExtent(gas, i, sub); int3 arraysize; arraysize.x = sub[3]; arraysize.y = sub[4]; arraysize.z = sub[5]; dim3 blocksize(GRADBLOCKX, GRADBLOCKY, 1); if(spaceOrder > 0) { gridsize.x = arraysize.x / (blocksize.x - spaceOrder); gridsize.x += ((blocksize.x-spaceOrder) * gridsize.x < arraysize.x) * 1 ; if(isRZ) { gridsize.y = arraysize.z / (blocksize.y - spaceOrder); gridsize.y += ((blocksize.y-spaceOrder) * gridsize.y < arraysize.z); } else { gridsize.y = arraysize.y / (blocksize.y - spaceOrder); gridsize.y += ((blocksize.y-spaceOrder) * gridsize.y < arraysize.y) * 1; } gridsize.z = 1; } else { gridsize.x = 256; gridsize.y = gridsize.z = 1; blocksize.x = 32; blocksize.y = blocksize.z = 1; } gasPtr = gas->devicePtr[i]; dustPtr = dust->devicePtr[i]; tmpPtr = tempMem->devicePtr[i]; long int ne = (long)sub[3] * (long)sub[4] * (long)sub[5]; switch(spaceOrder) { case 0: hipLaunchKernelGGL(( cukern_prepareForERK_h0), dim3(gridsize), dim3(blocksize), 0, 0, gasPtr, dustPtr, tmpPtr, ne); break; case 2: if(isThreeD) { if(isRZ) { hipLaunchKernelGGL(( cukern_prepareForERKRZ_h2), dim3(gridsize), dim3(blocksize), 0, 0, gasPtr, dustPtr, tmpPtr, arraysize); } else { if(geom.shape == SQUARE) { hipLaunchKernelGGL(( cukern_prepareForERK3D_h2<SQUARE>), dim3(gridsize), dim3(blocksize), 0, 0, gasPtr, dustPtr, tmpPtr, arraysize); } if(geom.shape == CYLINDRICAL) { hipLaunchKernelGGL(( cukern_prepareForERK3D_h2<CYLINDRICAL>), dim3(gridsize), dim3(blocksize), 0, 0, gasPtr, dustPtr, tmpPtr, arraysize); } } } else { if(geom.shape == SQUARE) { hipLaunchKernelGGL(( cukern_prepareForERK2D_h2<SQUARE>), dim3(gridsize), dim3(blocksize), 0, 0, gasPtr, dustPtr, tmpPtr, arraysize); } if(geom.shape == CYLINDRICAL) { hipLaunchKernelGGL(( cukern_prepareForERK2D_h2<CYLINDRICAL>), dim3(gridsize), dim3(blocksize), 0, 0, gasPtr, dustPtr, tmpPtr, arraysize); } } break; /*case 4: if(isThreeD) { if(isRZ) { cukern_prepareForERKRZ_h4<<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr + 2*gas->partNumel[i], arraysize); writeScalarToVector<<<32, 256>>>(tmpPtr + slabsize, gas->partNumel[i], 0.0); } else { if(geom.shape == SQUARE) { cukern_prepareForERK3D_h4_partone<SQUARE><<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr+ slabsize, arraysize); cukern_prepareForERK3D_h4_parttwo<<<gridsize, blocksize>>>(gasPtr, tmpPtr+ slabsize*2, arraysize); } if(geom.shape == CYLINDRICAL) { cukern_prepareForERK3D_h4_partone<CYLINDRICAL><<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr+ slabsize, arraysize); cukern_prepareForERK3D_h4_parttwo<<<gridsize, blocksize>>>(gasPtr, tmpPtr+ slabsize*2, arraysize); } } } else { if(geom.shape == SQUARE) { cukern_prepareForERK2D_h4<SQUARE><<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr+ slabsize, arraysize); } if(geom.shape == CYLINDRICAL) { cukern_prepareForERK2D_h4<CYLINDRICAL><<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr+ slabsize, arraysize); } writeScalarToVector<<<32, 256>>>(tmpPtr+2*gas->partNumel[i], gas->partNumel[i], 0.0); } break;*/ default: PRINT_FAULT_HEADER; printf("Was passed spatial order parameter of %i, must be passed 0 or 2 (2nd order)\n", spaceOrder); PRINT_FAULT_FOOTER; return ERROR_INVALID_ARGS; } worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_prepareForERK"); if(worked != SUCCESSFUL) break; } if(worked != SUCCESSFUL) return worked; // FIXME this needs to either understand slabs, or we need to fetch 3 slab ptrs into an array & pass it instead // worked = MGA_exchangeLocalHalos(gradient, 5); // need to? if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked; return CHECK_IMOGEN_ERROR(worked); } // Needed with the gradient calculators in 2D because they leave the empty directions uninitialized // Vomits the value f into array x, from x[0] to x[numel-1] __global__ void writeScalarToVector(double *x, long numel, double f) { long a = threadIdx.x + blockDim.x*blockIdx.x; for(; a < numel; a+= blockDim.x*gridDim.x) { x[a] = f; } } __device__ double gas2press(double *g) { return (g[FLUID_SLABPITCH]-.5*(g[2*FLUID_SLABPITCH]*g[2*FLUID_SLABPITCH]+g[3*FLUID_SLABPITCH]*g[3*FLUID_SLABPITCH]+g[4*FLUID_SLABPITCH]*g[4*FLUID_SLABPITCH])/g[0]); } /* Prepares for an exponential type method when no spatial differencing is involved (i.e. all cases now * since it is known that the time reversal idea doesn't work). This means that output is a simple * state function of local input so we can just treat the arrays as 1D vectors regardless of actual geometry */ __global__ void cukern_prepareForERK_h0(double *gas, double *dust, double *outputs, long numel) { int nx = blockDim.x * gridDim.x; int myX = threadIdx.x + blockDim.x * blockIdx.x; // x = thread x + block x if(myX >= numel) return; double dv, dvsq, press; for(; myX < numel; myX += nx) { dv = (gas[myX+2*FLUID_SLABPITCH])/gas[myX] - dust[myX+2*FLUID_SLABPITCH]/dust[myX]; dvsq = dv*dv; dv = (gas[myX+3*FLUID_SLABPITCH])/gas[myX] - dust[myX+3*FLUID_SLABPITCH]/dust[myX]; dvsq += dv*dv; dv = (gas[myX+4*FLUID_SLABPITCH])/gas[myX] - dust[myX+4*FLUID_SLABPITCH]/dust[myX]; dvsq += dv*dv; press = gas2press(gas + myX); outputs[myX ] = sqrt(dvsq); // output initial delta-v outputs[myX + FLUID_SLABPITCH] = press / gas[myX]; // = P/rho = specific internal energy density } } /* Algorithm: * [|dv_tr|, u_0, P_x, P_y, P_z] = exponentialSetup(gas_state, dust_state) * 5 output registers * may need slope limiter on gradient calculation? */ template <geometryType_t coords> __global__ void cukern_prepareForERK3D_h2(double *gas, double *dust, double *em, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-2)*blockIdx.x - 1; int myY = threadIdx.y + (GRADBLOCKY-2)*blockIdx.y - 1; if((myX > arraysize.x) || (myY > arraysize.y)) return; bool IWrite = (threadIdx.x > 0) && (threadIdx.x < (GRADBLOCKX-1)) && (threadIdx.y > 0) && (threadIdx.y < (GRADBLOCKY-1)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.y) % arraysize.y; int globAddr = myX + arraysize.x*myY; double deltaP; // Store derivative of phi in one direction double dv, dvsq; __shared__ double phiA[GRADBLOCKX*GRADBLOCKY]; __shared__ double phiB[GRADBLOCKX*GRADBLOCKY]; __shared__ double phiC[GRADBLOCKX*GRADBLOCKY]; double *U; double *V; double *W; double *temp; U = phiA; V = phiB; W = phiC; // compute P on lower plane U[myLocAddr] = gas2press(gas + (globAddr + arraysize.x*arraysize.y*(arraysize.z-1))); V[myLocAddr] = gas2press(gas + globAddr); __syncthreads(); int z; int deltaz = arraysize.x*arraysize.y; for(z = 0; z < arraysize.z; z++) { if(z >= arraysize.z - 1) deltaz = - arraysize.x*arraysize.y*(arraysize.z-1); if(IWrite) { deltaP = LAMX*(V[myLocAddr+1]-V[myLocAddr-1]); em[globAddr + 2*FLUID_SLABPITCH] = deltaP; // need time-reversed dv = (vgas - vdust) + t*(deltaP / rho) // = ((pgas + t deltaP)/rhogas - pdust/rhodust #ifdef EXPO_DOTR dv = (gas[globAddr+2*FLUID_SLABPITCH]+dragparams[7]*deltaP)/gas[globAddr] - dust[globAddr+2*FLUID_SLABPITCH]/dust[globAddr]; #else dv = (gas[globAddr+2*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+2*FLUID_SLABPITCH]/dust[globAddr]; #endif dvsq = dv*dv; // accumulate |dv_tr| } if(IWrite) { if(coords == SQUARE) { deltaP = LAMY*(V[myLocAddr+GRADBLOCKX]-V[myLocAddr-GRADBLOCKX]); } if(coords == CYLINDRICAL) { // In cylindrical coords, use dt/dphi * (delta-phi) / r to get d/dy deltaP = LAMY*(V[myLocAddr+GRADBLOCKX]-V[myLocAddr-GRADBLOCKX]) / (RINNER + DELTAR*myX); } em[globAddr + 3*FLUID_SLABPITCH] = deltaP; #ifdef EXPO_DOTR dv = (gas[globAddr+3*FLUID_SLABPITCH]+dragparams[7]*deltaP)/gas[globAddr] - dust[globAddr+3*FLUID_SLABPITCH]/dust[globAddr]; #else dv = (gas[globAddr+3*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+3*FLUID_SLABPITCH]/dust[globAddr]; #endif dvsq += dv*dv; } /* we must protect on both sides of this * tl;dr: with only barrier B, warps 1 and 2 depart at the same time * But if, suppose, warp 1 gets delayed in the slow sqrt() calculation while * warp 2 goes ahead and runs all the way back to where barrier A is. * * Without barrier A, warp 2 will overwrite W (which for warp 1 is still U) * and the calculation will be corrupted. */ __syncthreads(); W[myLocAddr] = gas2press(gas + (globAddr + deltaz)); __syncthreads(); // barrier B if(IWrite) { deltaP = LAMZ*(W[myLocAddr] - U[myLocAddr]); em[globAddr + 4*FLUID_SLABPITCH] = deltaP; #ifdef EXPO_DOTR dv = (gas[globAddr+4*FLUID_SLABPITCH]+dragparams[7]*deltaP)/gas[globAddr] - dust[globAddr+4*FLUID_SLABPITCH]/dust[globAddr]; #else dv = (gas[globAddr+4*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+4*FLUID_SLABPITCH]/dust[globAddr]; #endif dvsq += dv*dv; em[globAddr] = sqrt(dvsq); // output initial delta-v em[globAddr + FLUID_SLABPITCH] = V[myLocAddr] / gas[globAddr]; // = P/rho = specific internal energy density } temp = U; U = V; V = W; W = temp; // cyclically shift them back globAddr += arraysize.x * arraysize.y; } } /* Computes the gradient of 3d array phi using the 4-point centered derivative and * stores phi_x in fx, phi_y in fy, phi_z in fz. * All arrays (rho, phi, fx, fy, fz) must be of size arraysize. * In cylindrical geometry, f_x -> f_r, * f_y -> f_phi * This call must be invoked in two parts: * cukern_prepareForERK3D_h4_partone computes the X and Y (or r/theta) derivatives, * cukern_prepareForERK3D_h4_parttwo computes the Z derivative. */ template <geometryType_t coords> __global__ void cukern_prepareForERK3D_h4_partone(double *phi, double *fx, double *fy, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-4)*blockIdx.x - 2; int myY = threadIdx.y + (GRADBLOCKY-4)*blockIdx.y - 2; if((myX > (arraysize.x+1)) || (myY > (arraysize.y+1))) return; bool IWrite = (threadIdx.x > 1) && (threadIdx.x < (GRADBLOCKX-2)) && (threadIdx.y > 1) && (threadIdx.y < (GRADBLOCKY-2)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.y) % arraysize.y; int globAddr = myX + arraysize.x*myY; double deltaphi; // Store derivative of phi in one direction __shared__ double phishm[GRADBLOCKX*GRADBLOCKY]; __syncthreads(); int z; int deltaz = arraysize.x*arraysize.y; for(z = 0; z < arraysize.z; z++) { phishm[myLocAddr] = phi[globAddr]; __syncthreads(); if(IWrite) { deltaphi = LAMX*(-phishm[myLocAddr+2]+8.0*phishm[myLocAddr+1]-8.0*phishm[myLocAddr-1]+phishm[myLocAddr-2]); fx[globAddr] = deltaphi; // store px <- px - dt * rho dphi/dx; if(coords == SQUARE) { deltaphi = LAMY*(-phishm[myLocAddr+2*GRADBLOCKX]+8*phishm[myLocAddr+GRADBLOCKX]-8*phishm[myLocAddr-GRADBLOCKX]+phishm[myLocAddr-2*GRADBLOCKX]); } if(coords == CYLINDRICAL) { // In cylindrical coords, use dt/dphi * (delta-phi) / r to get d/dy deltaphi = LAMY*(-phishm[myLocAddr+2*GRADBLOCKX]+8*phishm[myLocAddr+GRADBLOCKX]-8*phishm[myLocAddr-GRADBLOCKX]+phishm[myLocAddr-2*GRADBLOCKX]) / (RINNER + DELTAR*myX); } fy[globAddr] = deltaphi; } globAddr += deltaz; } } /* 2nd part of 4th order 3D spatial gradient computes d/dz (same in cart & cyl coords so no template */ __global__ void cukern_prepareForERK3D_h4_parttwo(double *phi, double *fz, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-4)*blockIdx.x - 2; int myZ = threadIdx.y + (GRADBLOCKY-4)*blockIdx.y - 2; if((myX > (arraysize.x+1)) || (myZ > (arraysize.z+1))) return; bool IWrite = (threadIdx.x > 1) && (threadIdx.x < (GRADBLOCKX-2)) && (threadIdx.y > 1) && (threadIdx.y < (GRADBLOCKY-2)); IWrite = IWrite && (myX < arraysize.x) && (myZ < arraysize.z); myX = (myX + arraysize.x) % arraysize.x; myZ = (myZ + arraysize.z) % arraysize.z; int delta = arraysize.x*arraysize.y; int globAddr = myX + delta*myZ; double deltaphi; // Store derivative of phi in one direction __shared__ double phishm[GRADBLOCKX*GRADBLOCKY]; __syncthreads(); int y; for(y = 0; y < arraysize.y; y++) { phishm[myLocAddr] = phi[globAddr]; if(IWrite) { deltaphi = LAMZ*(-phishm[myLocAddr+2*GRADBLOCKX]+8*phishm[myLocAddr+GRADBLOCKX]-8*phishm[myLocAddr-GRADBLOCKX]+phishm[myLocAddr-2*GRADBLOCKX]); fz[globAddr] = deltaphi; } globAddr += arraysize.x; } } /* Compute the gradient of 2d array phi with 2nd order accuracy; store the results in f_x, f_y * In cylindrical geometry, f_x -> f_r, * f_y -> f_phi */ template <geometryType_t coords> __global__ void cukern_prepareForERK2D_h2(double *gas, double *dust, double *em, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-2)*blockIdx.x - 1; int myY = threadIdx.y + (GRADBLOCKY-2)*blockIdx.y - 1; if((myX > arraysize.x) || (myY > arraysize.y)) return; bool IWrite = (threadIdx.x > 0) && (threadIdx.x < (GRADBLOCKX-1)) && (threadIdx.y > 0) && (threadIdx.y < (GRADBLOCKY-1)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.y) % arraysize.y; int globAddr = myX + arraysize.x*myY; double deltaP; // Store derivative of phi in one direction double dv, dvsq; __shared__ double locPress[GRADBLOCKX*GRADBLOCKY]; locPress[myLocAddr] = gas2press(gas+globAddr); __syncthreads(); // Make sure loaded phi is visible // coupling is exactly zero if rho <= rhomin if(IWrite) { // compute dt * (dphi/dx) deltaP = LAMX*(locPress[myLocAddr+1]-locPress[myLocAddr-1]); em[globAddr+2*FLUID_SLABPITCH] = deltaP; dv = (gas[globAddr+4*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+4*FLUID_SLABPITCH]/dust[globAddr]; dvsq = dv*dv; #ifdef EXPO_DOTR dv = (gas[globAddr+2*FLUID_SLABPITCH]+dragparams[7]*deltaP)/gas[globAddr] - dust[globAddr+2*FLUID_SLABPITCH]/dust[globAddr]; #else dv = (gas[globAddr+2*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+2*FLUID_SLABPITCH]/dust[globAddr]; #endif dvsq += dv*dv; // Calculate dt*(dphi/dy) if(coords == SQUARE) { deltaP = LAMY*(locPress[myLocAddr+GRADBLOCKX]-locPress[myLocAddr-GRADBLOCKX]); } if(coords == CYLINDRICAL) { // Converts d/dphi into physical distance based on R deltaP = LAMY*(locPress[myLocAddr+GRADBLOCKX]-locPress[myLocAddr-GRADBLOCKX]) / (RINNER + myX*DELTAR); } em[globAddr+3*FLUID_SLABPITCH] = deltaP; em[globAddr+4*FLUID_SLABPITCH] = 0.0; #ifdef EXPO_DOTR dv = (gas[globAddr+3*FLUID_SLABPITCH]+dragparams[7]*deltaP)/gas[globAddr] - dust[globAddr+3*FLUID_SLABPITCH]/dust[globAddr]; #else dv = (gas[globAddr+3*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+3*FLUID_SLABPITCH]/dust[globAddr]; #endif dvsq += dv*dv; em[globAddr] = sqrt(dvsq); em[globAddr + FLUID_SLABPITCH] = locPress[myLocAddr] / gas[globAddr]; // specific internal energy for } } /* Compute the gradient of 2d array phi with 4th order accuracy; store the results in f_x, f_y * In cylindrical geometry, f_x -> f_r, * f_y -> f_phi */ template <geometryType_t coords> __global__ void cukern_prepareForERK2D_h4(double *phi, double *fx, double *fy, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-4)*blockIdx.x - 2; int myY = threadIdx.y + (GRADBLOCKY-4)*blockIdx.y - 2; if((myX > arraysize.x) || (myY > arraysize.y)) return; bool IWrite = (threadIdx.x > 1) && (threadIdx.x < (GRADBLOCKX-2)) && (threadIdx.y > 1) && (threadIdx.y < (GRADBLOCKY-2)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.y) % arraysize.y; int globAddr = myX + arraysize.x*myY; double deltaphi; // Store derivative of phi in one direction __shared__ double phiLoc[GRADBLOCKX*GRADBLOCKY]; phiLoc[myLocAddr] = phi[globAddr]; __syncthreads(); // Make sure loaded phi is visible // coupling is exactly zero if rho <= rhomin if(IWrite) { // compute dt * (dphi/dx) deltaphi = LAMX*(-phiLoc[myLocAddr+2] + 8*phiLoc[myLocAddr+1] - 8*phiLoc[myLocAddr-1] + phiLoc[myLocAddr-2]); fx[globAddr] = deltaphi; // Calculate dt*(dphi/dy) if(coords == SQUARE) { deltaphi = LAMY*(-phiLoc[myLocAddr+2*GRADBLOCKX] + 8*phiLoc[myLocAddr+1*GRADBLOCKX] - 8*phiLoc[myLocAddr-1*GRADBLOCKX] + phiLoc[myLocAddr-2*GRADBLOCKX]); } if(coords == CYLINDRICAL) { // Converts d/dphi into physical distance based on R deltaphi = LAMY*(-phiLoc[myLocAddr+2*GRADBLOCKX] + 8*phiLoc[myLocAddr+1*GRADBLOCKX] - 8*phiLoc[myLocAddr-1*GRADBLOCKX] + phiLoc[myLocAddr-2*GRADBLOCKX])/(RINNER + myX*DELTAR); } fy[globAddr] = deltaphi; } } /* Compute the gradient of R-Z array phi with 2nd order accuracy; store the results in f_x, f_z * In cylindrical geometry, f_x -> f_r */ __global__ void cukern_prepareForERKRZ_h2(double *gas, double *dust, double *em, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-2)*blockIdx.x - 1; int myY = threadIdx.y + (GRADBLOCKY-2)*blockIdx.y - 1; if((myX > arraysize.x) || (myY > arraysize.z)) return; bool IWrite = (threadIdx.x > 0) && (threadIdx.x < (GRADBLOCKX-1)) && (threadIdx.y > 0) && (threadIdx.y < (GRADBLOCKY-1)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.z); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.z) % arraysize.z; int globAddr = myX + arraysize.x*myY; double deltaP, dv, dvsq; // Store derivative of phi in one direction __shared__ double pressLoc[GRADBLOCKX*GRADBLOCKY]; pressLoc[myLocAddr] = gas2press(gas + globAddr); __syncthreads(); // Make sure loaded phi is visible // coupling is exactly zero if rho <= rhomin if(IWrite) { em[globAddr + 3*FLUID_SLABPITCH] = 0.0; // zero phi gradient // compute v_phi contribution to |dv|^2 for 2.5-D dv = (gas[globAddr+3*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+3*FLUID_SLABPITCH]/dust[globAddr]; dvsq = dv*dv; // compute dt * (dphi/dx) deltaP = LAMX*(pressLoc[myLocAddr+1]-pressLoc[myLocAddr-1]); em[globAddr + 2*FLUID_SLABPITCH] = deltaP; #ifdef EXPO_DOTR dv = (gas[globAddr+2*FLUID_SLABPITCH]+dragparams[7]*deltaP)/gas[globAddr] - dust[globAddr+2*FLUID_SLABPITCH]/dust[globAddr]; #else dv = (gas[globAddr+2*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+2*FLUID_SLABPITCH]/dust[globAddr]; #endif dvsq += dv*dv; // Calculate dt*(dphi/dz) deltaP = LAMZ*(pressLoc[myLocAddr+GRADBLOCKX]-pressLoc[myLocAddr-GRADBLOCKX]); em[globAddr + 4*FLUID_SLABPITCH] = deltaP; #ifdef EXPO_DOTR dv = (gas[globAddr+4*FLUID_SLABPITCH]+dragparams[7]*deltaP)/gas[globAddr] - dust[globAddr+4*FLUID_SLABPITCH]/dust[globAddr]; #else dv = (gas[globAddr+4*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+4*FLUID_SLABPITCH]/dust[globAddr]; #endif dvsq += dv*dv; em[globAddr] = sqrt(dvsq); // magnitude delta-v with time reversed pressure gradient em[globAddr + FLUID_SLABPITCH] = pressLoc[myLocAddr] / gas[globAddr]; // specific internal energy for } } /* Compute the gradient of RZ array phi with 4th order accuracy; store the results in f_x, f_y * In cylindrical geometry, f_x -> f_r, */ __global__ void cukern_prepareForERKRZ_h4(double *phi, double *fx, double *fz, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-4)*blockIdx.x - 2; int myY = threadIdx.y + (GRADBLOCKY-4)*blockIdx.y - 2; if((myX > arraysize.x) || (myY > arraysize.z)) return; bool IWrite = (threadIdx.x > 1) && (threadIdx.x < (GRADBLOCKX-2)) && (threadIdx.y > 1) && (threadIdx.y < (GRADBLOCKY-2)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.z); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.z) % arraysize.z; int globAddr = myX + arraysize.x*myY; double deltaphi; // Store derivative of phi in one direction __shared__ double phiLoc[GRADBLOCKX*GRADBLOCKY]; phiLoc[myLocAddr] = phi[globAddr]; __syncthreads(); // Make sure loaded phi is visible // coupling is exactly zero if rho <= rhomin if(IWrite) { // compute dt * (dphi/dx) deltaphi = LAMX*(-phiLoc[myLocAddr+2] + 8*phiLoc[myLocAddr+1] - 8*phiLoc[myLocAddr-1] + phiLoc[myLocAddr-2]); fx[globAddr] = deltaphi; // Calculate dt*(dphi/dz) deltaphi = LAMZ*(-phiLoc[myLocAddr+2*GRADBLOCKX] + 8*phiLoc[myLocAddr+1*GRADBLOCKX] - 8*phiLoc[myLocAddr-1*GRADBLOCKX] + phiLoc[myLocAddr-2*GRADBLOCKX]); fz[globAddr] = deltaphi; } } #undef GRADBLOCKX #undef GRADBLOCKY // This awful wad of mutated copypasta from the cudaGradientKernels.cu file turns the // midpoint internal energy density and the mass density into pressure & computes the // gradient for the ERK solver's second stage // compute grad(phi) in XYZ or R-Theta-Z with 2nd or 4th order accuracy template <geometryType_t coords> __global__ void cukern_findMidGradP3D_h2(double *gas, double *em, int3 arraysize); template <geometryType_t coords> __global__ void cukern_findMidGradP3D_h4_partone(double *phi, double *fx, double *fy, int3 arraysize); __global__ void cukern_findMidGradP3D_h4_parttwo(double *phi, double *fz, int3 arraysize); // compute grad(phi) in X-Y or R-Theta with 2nd or 4th order accuracy template <geometryType_t coords> __global__ void cukern_findMidGradP2D_h2(double *gas, double *em, int3 arraysize); template <geometryType_t coords> __global__ void cukern_findMidGradP2D_h4(double *phi, double *fx, double *fy, int3 arraysize); // Compute grad(phi) in X-Z or R-Z with 2nd or 4th order accuracy __global__ void cukern_findMidGradPRZ_h2(double *gas, double *em, int3 arraysize); __global__ void cukern_findMidGradPRZ_h4(double *phi, double *fx, double *fz, int3 arraysize); #define GRADBLOCKX 18 #define GRADBLOCKY 18 // scalingParameter / 2h or /12h depending on spatial order of scheme #define LAMX devLambda[0] #define LAMY devLambda[1] #define LAMZ devLambda[2] #define RINNER devLambda[7] #define DELTAR devLambda[8] /* Given the gas pointer, temp memory and geometry, uses the midpoint specific internal energy density from tempMem * and the gas mass density to compute the pressure gradient into tempMem slabs 2 through 4. scalingParameter needs * to be (gamma-1) to convert rho * u_specific = e_internal = P / (gamma-1) to P. */ int findMidGradP2(MGArray *gas, MGArray *tempMem, GeometryParams geom, int spaceOrder, double scalingParameter) { dim3 gridsize, blocksize; double lambda[11]; int i; int worked; int sub[6]; double *dx = &geom.h[0]; if(spaceOrder == 4) { lambda[0] = scalingParameter/(12.0*dx[0]); lambda[1] = scalingParameter/(12.0*dx[1]); lambda[2] = scalingParameter/(12.0*dx[2]); } else { lambda[0] = scalingParameter/(2.0*dx[0]); lambda[1] = scalingParameter/(2.0*dx[1]); lambda[2] = scalingParameter/(2.0*dx[2]); } lambda[7] = geom.Rinner; // This is actually overwritten per partition below lambda[8] = dx[1]; int isThreeD = (gas->dim[2] > 1); int isRZ = (gas->dim[2] > 1) & (gas->dim[1] == 1); for(i = 0; i < gas->nGPUs; i++) { hipSetDevice(gas->deviceID[i]); calcPartitionExtent(gas, i, &sub[0]); lambda[7] = geom.Rinner + dx[0] * sub[0]; // Innermost cell coord may change per-partition hipMemcpyToSymbol((const void *)devLambda, lambda, 11*sizeof(double), 0, hipMemcpyHostToDevice); worked = CHECK_CUDA_ERROR("hipMemcpyToSymbol"); if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) break; //hipMemcpyToSymbol((const void *)devIntParams, &sub[3], 3*sizeof(int), 0, hipMemcpyHostToDevice); //worked = CHECK_CUDA_ERROR("memcpy to symbol"); //if(worked != SUCCESSFUL) break; } if(worked != SUCCESSFUL) return worked; double *gasPtr; double *tmpPtr; // Iterate over all partitions, and here we GO! for(i = 0; i < gas->nGPUs; i++) { hipSetDevice(gas->deviceID[i]); worked = CHECK_CUDA_ERROR("hipSetDevice"); if(worked != SUCCESSFUL) break; calcPartitionExtent(gas, i, sub); int3 arraysize; arraysize.x = sub[3]; arraysize.y = sub[4]; arraysize.z = sub[5]; dim3 blocksize(GRADBLOCKX, GRADBLOCKY, 1); gridsize.x = arraysize.x / (blocksize.x - spaceOrder); gridsize.x += ((blocksize.x-spaceOrder) * gridsize.x < arraysize.x); if(isRZ) { gridsize.y = arraysize.z / (blocksize.y - spaceOrder); gridsize.y += ((blocksize.y-spaceOrder) * gridsize.y < arraysize.z); } else { gridsize.y = arraysize.y / (blocksize.y - spaceOrder); gridsize.y += ((blocksize.y-spaceOrder) * gridsize.y < arraysize.y); } gridsize.z = 1; gasPtr = gas->devicePtr[i]; // WARNING: this could be garbage if spaceOrder == 0 and we rx'd no potential array tmpPtr = tempMem->devicePtr[i]; switch(spaceOrder) { /*case 0: // dump zeros so as to have a technically-valid result and not cause reads of uninitialized memory writeScalarToVector<<<32, 256>>>(tmpPtr + 0 * slabsize, gas->partNumel[i], 0.0); writeScalarToVector<<<32, 256>>>(tmpPtr + 1 * slabsize, gas->partNumel[i], 0.0); writeScalarToVector<<<32, 256>>>(tmpPtr + 2 * slabsize, gas->partNumel[i], 0.0); break;*/ case 2: if(isThreeD) { if(isRZ) { hipLaunchKernelGGL(( cukern_findMidGradPRZ_h2), dim3(gridsize), dim3(blocksize), 0, 0, gasPtr, tmpPtr, arraysize); } else { if(geom.shape == SQUARE) { hipLaunchKernelGGL(( cukern_findMidGradP3D_h2<SQUARE>), dim3(gridsize), dim3(blocksize), 0, 0, gasPtr, tmpPtr, arraysize); } if(geom.shape == CYLINDRICAL) { hipLaunchKernelGGL(( cukern_findMidGradP3D_h2<CYLINDRICAL>), dim3(gridsize), dim3(blocksize), 0, 0, gasPtr, tmpPtr, arraysize); } } } else { if(geom.shape == SQUARE) { hipLaunchKernelGGL(( cukern_findMidGradP2D_h2<SQUARE>), dim3(gridsize), dim3(blocksize), 0, 0, gasPtr, tmpPtr, arraysize); } if(geom.shape == CYLINDRICAL) { hipLaunchKernelGGL(( cukern_findMidGradP2D_h2<CYLINDRICAL>), dim3(gridsize), dim3(blocksize), 0, 0, gasPtr, tmpPtr, arraysize); } } break; /*case 4: if(isThreeD) { if(isRZ) { cukern_findMidGradPRZ_h4<<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr + 2*gas->partNumel[i], arraysize); writeScalarToVector<<<32, 256>>>(tmpPtr + slabsize, gas->partNumel[i], 0.0); } else { if(geom.shape == SQUARE) { cukern_findMidGradP3D_h4_partone<SQUARE><<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr+ slabsize, arraysize); cukern_findMidGradP3D_h4_parttwo<<<gridsize, blocksize>>>(gasPtr, tmpPtr+ slabsize*2, arraysize); } if(geom.shape == CYLINDRICAL) { cukern_findMidGradP3D_h4_partone<CYLINDRICAL><<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr+ slabsize, arraysize); cukern_findMidGradP3D_h4_parttwo<<<gridsize, blocksize>>>(gasPtr, tmpPtr+ slabsize*2, arraysize); } } } else { if(geom.shape == SQUARE) { cukern_findMidGradP2D_h4<SQUARE><<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr+ slabsize, arraysize); } if(geom.shape == CYLINDRICAL) { cukern_findMidGradP2D_h4<CYLINDRICAL><<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr+ slabsize, arraysize); } writeScalarToVector<<<32, 256>>>(tmpPtr+2*gas->partNumel[i], gas->partNumel[i], 0.0); } break;*/ default: PRINT_FAULT_HEADER; printf("Was passed spatial order parameter of %i, must be passed 2 (2nd order)\n", spaceOrder); PRINT_FAULT_FOOTER; return ERROR_INVALID_ARGS; } worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_findMidGradP"); if(worked != SUCCESSFUL) break; } if(worked != SUCCESSFUL) return worked; // FIXME this needs to either understand slabs, or we need to fetch 3 slab ptrs into an array & pass it instead // worked = MGA_exchangeLocalHalos(gradient, 5); // need to? if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked; return CHECK_IMOGEN_ERROR(worked); } /* Algorithm: * [|dv_tr|, u_0, P_x, P_y, P_z] = exponentialSetup(gas_state, dust_state) * 5 output registers * may need slope limiter on gradient calculation? */ template <geometryType_t coords> __global__ void cukern_findMidGradP3D_h2(double *gas, double *em, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-2)*blockIdx.x - 1; int myY = threadIdx.y + (GRADBLOCKY-2)*blockIdx.y - 1; if((myX > arraysize.x) || (myY > arraysize.y)) return; bool IWrite = (threadIdx.x > 0) && (threadIdx.x < (GRADBLOCKX-1)) && (threadIdx.y > 0) && (threadIdx.y < (GRADBLOCKY-1)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.y) % arraysize.y; int globAddr = myX + arraysize.x*myY; double deltaP; // Store derivative of phi in one direction __shared__ double phiA[GRADBLOCKX*GRADBLOCKY]; __shared__ double phiB[GRADBLOCKX*GRADBLOCKY]; __shared__ double phiC[GRADBLOCKX*GRADBLOCKY]; double *U; double *V; double *W; double *temp; U = phiA; V = phiB; W = phiC; // compute epsilon_internal on lower & current planes U[myLocAddr] = gas[globAddr + arraysize.x*arraysize.y*(arraysize.z-1)] * em[globAddr + arraysize.x*arraysize.y*(arraysize.z-1) + FLUID_SLABPITCH]; V[myLocAddr] = gas[globAddr] * em[globAddr + FLUID_SLABPITCH]; __syncthreads(); int z; int deltaz = arraysize.x*arraysize.y; for(z = 0; z < arraysize.z; z++) { if(z >= arraysize.z - 1) deltaz = - arraysize.x*arraysize.y*(arraysize.z-1); if(IWrite) { deltaP = LAMX*(V[myLocAddr+1]-V[myLocAddr-1]); #ifdef EXPO_TRAPEZOID em[globAddr + 2*FLUID_SLABPITCH] = .5*(em[globAddr + 2*FLUID_SLABPITCH] + deltaP); #else em[globAddr + 2*FLUID_SLABPITCH] = deltaP; #endif } if(IWrite) { if(coords == SQUARE) { deltaP = LAMY*(V[myLocAddr+GRADBLOCKX]-V[myLocAddr-GRADBLOCKX]); } if(coords == CYLINDRICAL) { // In cylindrical coords, use dt/dphi * (delta-phi) / r to get d/dy deltaP = LAMY*(V[myLocAddr+GRADBLOCKX]-V[myLocAddr-GRADBLOCKX]) / (RINNER + DELTAR*myX); } #ifdef EXPO_TRAPEZOID em[globAddr + 3*FLUID_SLABPITCH] = .5*(em[globAddr + 3*FLUID_SLABPITCH] + deltaP); #else em[globAddr + 3*FLUID_SLABPITCH] = deltaP; #endif } W[myLocAddr] = gas[globAddr + deltaz] * em[globAddr + deltaz + FLUID_SLABPITCH]; __syncthreads(); if(IWrite) { deltaP = LAMZ*(W[myLocAddr] - U[myLocAddr]); #ifdef EXPO_TRAPEZOID em[globAddr + 4*FLUID_SLABPITCH] = .5*(em[globAddr + 4*FLUID_SLABPITCH] + deltaP); #else em[globAddr + 4*FLUID_SLABPITCH] = deltaP; #endif } temp = U; U = V; V = W; W = temp; // cyclically shift them back globAddr += arraysize.x * arraysize.y; } } /* Computes the gradient of 3d array phi using the 4-point centered derivative and * stores phi_x in fx, phi_y in fy, phi_z in fz. * All arrays (rho, phi, fx, fy, fz) must be of size arraysize. * In cylindrical geometry, f_x -> f_r, * f_y -> f_phi * This call must be invoked in two parts: * cukern_findMidGradP3D_h4_partone computes the X and Y (or r/theta) derivatives, * cukern_findMidGradP3D_h4_parttwo computes the Z derivative. */ template <geometryType_t coords> __global__ void cukern_findMidGradP3D_h4_partone(double *phi, double *fx, double *fy, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-4)*blockIdx.x - 2; int myY = threadIdx.y + (GRADBLOCKY-4)*blockIdx.y - 2; if((myX > (arraysize.x+1)) || (myY > (arraysize.y+1))) return; bool IWrite = (threadIdx.x > 1) && (threadIdx.x < (GRADBLOCKX-2)) && (threadIdx.y > 1) && (threadIdx.y < (GRADBLOCKY-2)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.y) % arraysize.y; int globAddr = myX + arraysize.x*myY; double deltaphi; // Store derivative of phi in one direction __shared__ double phishm[GRADBLOCKX*GRADBLOCKY]; __syncthreads(); int z; int deltaz = arraysize.x*arraysize.y; for(z = 0; z < arraysize.z; z++) { phishm[myLocAddr] = phi[globAddr]; __syncthreads(); if(IWrite) { deltaphi = LAMX*(-phishm[myLocAddr+2]+8.0*phishm[myLocAddr+1]-8.0*phishm[myLocAddr-1]+phishm[myLocAddr-2]); fx[globAddr] = deltaphi; // store px <- px - dt * rho dphi/dx; if(coords == SQUARE) { deltaphi = LAMY*(-phishm[myLocAddr+2*GRADBLOCKX]+8*phishm[myLocAddr+GRADBLOCKX]-8*phishm[myLocAddr-GRADBLOCKX]+phishm[myLocAddr-2*GRADBLOCKX]); } if(coords == CYLINDRICAL) { // In cylindrical coords, use dt/dphi * (delta-phi) / r to get d/dy deltaphi = LAMY*(-phishm[myLocAddr+2*GRADBLOCKX]+8*phishm[myLocAddr+GRADBLOCKX]-8*phishm[myLocAddr-GRADBLOCKX]+phishm[myLocAddr-2*GRADBLOCKX]) / (RINNER + DELTAR*myX); } fy[globAddr] = deltaphi; } globAddr += deltaz; } } /* 2nd part of 4th order 3D spatial gradient computes d/dz (same in cart & cyl coords so no template */ __global__ void cukern_findMidGradP3D_h4_parttwo(double *phi, double *fz, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-4)*blockIdx.x - 2; int myZ = threadIdx.y + (GRADBLOCKY-4)*blockIdx.y - 2; if((myX > (arraysize.x+1)) || (myZ > (arraysize.z+1))) return; bool IWrite = (threadIdx.x > 1) && (threadIdx.x < (GRADBLOCKX-2)) && (threadIdx.y > 1) && (threadIdx.y < (GRADBLOCKY-2)); IWrite = IWrite && (myX < arraysize.x) && (myZ < arraysize.z); myX = (myX + arraysize.x) % arraysize.x; myZ = (myZ + arraysize.z) % arraysize.z; int delta = arraysize.x*arraysize.y; int globAddr = myX + delta*myZ; double deltaphi; // Store derivative of phi in one direction __shared__ double phishm[GRADBLOCKX*GRADBLOCKY]; __syncthreads(); int y; for(y = 0; y < arraysize.y; y++) { phishm[myLocAddr] = phi[globAddr]; if(IWrite) { deltaphi = LAMZ*(-phishm[myLocAddr+2*GRADBLOCKX]+8*phishm[myLocAddr+GRADBLOCKX]-8*phishm[myLocAddr-GRADBLOCKX]+phishm[myLocAddr-2*GRADBLOCKX]); fz[globAddr] = deltaphi; } globAddr += arraysize.x; } } /* Compute the gradient of 2d array phi with 2nd order accuracy; store the results in f_x, f_y * In cylindrical geometry, f_x -> f_r, * f_y -> f_phi */ template <geometryType_t coords> __global__ void cukern_findMidGradP2D_h2(double *gas, double *em, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-2)*blockIdx.x - 1; int myY = threadIdx.y + (GRADBLOCKY-2)*blockIdx.y - 1; if((myX > arraysize.x) || (myY > arraysize.y)) return; bool IWrite = (threadIdx.x > 0) && (threadIdx.x < (GRADBLOCKX-1)) && (threadIdx.y > 0) && (threadIdx.y < (GRADBLOCKY-1)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.y) % arraysize.y; int globAddr = myX + arraysize.x*myY; double deltaP; // Store derivative of phi in one direction __shared__ double locPress[GRADBLOCKX*GRADBLOCKY]; locPress[myLocAddr] = gas[globAddr] * em[globAddr + FLUID_SLABPITCH]; __syncthreads(); // Make sure loaded phi is visible // coupling is exactly zero if rho <= rhomin if(IWrite) { // compute dt * (dphi/dx) deltaP = LAMX*(locPress[myLocAddr+1]-locPress[myLocAddr-1]); #ifdef EXPO_TRAPEZOID em[globAddr+2*FLUID_SLABPITCH] = .5*(em[globAddr+2*FLUID_SLABPITCH] + deltaP); #else em[globAddr+2*FLUID_SLABPITCH] = deltaP; #endif if(coords == SQUARE) { deltaP = LAMY*(locPress[myLocAddr+GRADBLOCKX]-locPress[myLocAddr-GRADBLOCKX]); } if(coords == CYLINDRICAL) { // Converts d/dphi into physical distance based on R deltaP = LAMY*(locPress[myLocAddr+GRADBLOCKX]-locPress[myLocAddr-GRADBLOCKX]) / (RINNER + myX*DELTAR); } #ifdef EXPO_TRAPEZOID em[globAddr+3*FLUID_SLABPITCH] = .5*(em[globAddr+3*FLUID_SLABPITCH] + deltaP); #else em[globAddr+3*FLUID_SLABPITCH] = deltaP; #endif em[globAddr+4*FLUID_SLABPITCH] = 0.0; } } /* Compute the gradient of 2d array phi with 4th order accuracy; store the results in f_x, f_y * In cylindrical geometry, f_x -> f_r, * f_y -> f_phi */ template <geometryType_t coords> __global__ void cukern_findMidGradP2D_h4(double *phi, double *fx, double *fy, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-4)*blockIdx.x - 2; int myY = threadIdx.y + (GRADBLOCKY-4)*blockIdx.y - 2; if((myX > arraysize.x) || (myY > arraysize.y)) return; bool IWrite = (threadIdx.x > 1) && (threadIdx.x < (GRADBLOCKX-2)) && (threadIdx.y > 1) && (threadIdx.y < (GRADBLOCKY-2)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.y) % arraysize.y; int globAddr = myX + arraysize.x*myY; double deltaphi; // Store derivative of phi in one direction __shared__ double phiLoc[GRADBLOCKX*GRADBLOCKY]; phiLoc[myLocAddr] = phi[globAddr]; __syncthreads(); // Make sure loaded phi is visible // coupling is exactly zero if rho <= rhomin if(IWrite) { // compute dt * (dphi/dx) deltaphi = LAMX*(-phiLoc[myLocAddr+2] + 8*phiLoc[myLocAddr+1] - 8*phiLoc[myLocAddr-1] + phiLoc[myLocAddr-2]); fx[globAddr] = deltaphi; // Calculate dt*(dphi/dy) if(coords == SQUARE) { deltaphi = LAMY*(-phiLoc[myLocAddr+2*GRADBLOCKX] + 8*phiLoc[myLocAddr+1*GRADBLOCKX] - 8*phiLoc[myLocAddr-1*GRADBLOCKX] + phiLoc[myLocAddr-2*GRADBLOCKX]); } if(coords == CYLINDRICAL) { // Converts d/dphi into physical distance based on R deltaphi = LAMY*(-phiLoc[myLocAddr+2*GRADBLOCKX] + 8*phiLoc[myLocAddr+1*GRADBLOCKX] - 8*phiLoc[myLocAddr-1*GRADBLOCKX] + phiLoc[myLocAddr-2*GRADBLOCKX])/(RINNER + myX*DELTAR); } fy[globAddr] = deltaphi; } } /* Compute the gradient of R-Z array phi with 2nd order accuracy; store the results in f_x, f_z * In cylindrical geometry, f_x -> f_r */ __global__ void cukern_findMidGradPRZ_h2(double *gas, double *em, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-2)*blockIdx.x - 1; int myY = threadIdx.y + (GRADBLOCKY-2)*blockIdx.y - 1; if((myX > arraysize.x) || (myY > arraysize.z)) return; bool IWrite = (threadIdx.x > 0) && (threadIdx.x < (GRADBLOCKX-1)) && (threadIdx.y > 0) && (threadIdx.y < (GRADBLOCKY-1)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.z); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.z) % arraysize.z; int globAddr = myX + arraysize.x*myY; double deltaP; // Store derivative of phi in one direction __shared__ double pressLoc[GRADBLOCKX*GRADBLOCKY]; pressLoc[myLocAddr] = gas[globAddr] * em[globAddr + FLUID_SLABPITCH]; __syncthreads(); // Make sure loaded P is visible // coupling is exactly zero if rho <= rhomin if(IWrite) { em[globAddr + 3*FLUID_SLABPITCH] = 0.0; // zero phi gradient // compute dP/dr deltaP = LAMX*(pressLoc[myLocAddr+1]-pressLoc[myLocAddr-1]); #ifdef EXPO_TRAPEZOID em[globAddr+2*FLUID_SLABPITCH] = .5*(em[globAddr+2*FLUID_SLABPITCH] + deltaP); #else em[globAddr+2*FLUID_SLABPITCH] = deltaP; #endif // Calculate dP/dz deltaP = LAMZ*(pressLoc[myLocAddr+GRADBLOCKX]-pressLoc[myLocAddr-GRADBLOCKX]); #ifdef EXPO_TRAPEZOID em[globAddr+4*FLUID_SLABPITCH] = .5*(em[globAddr+4*FLUID_SLABPITCH] + deltaP); #else em[globAddr+4*FLUID_SLABPITCH] = deltaP; #endif } } /* Compute the gradient of RZ array phi with 4th order accuracy; store the results in f_x, f_y * In cylindrical geometry, f_x -> f_r, */ __global__ void cukern_findMidGradPRZ_h4(double *phi, double *fx, double *fz, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-4)*blockIdx.x - 2; int myY = threadIdx.y + (GRADBLOCKY-4)*blockIdx.y - 2; if((myX > arraysize.x) || (myY > arraysize.z)) return; bool IWrite = (threadIdx.x > 1) && (threadIdx.x < (GRADBLOCKX-2)) && (threadIdx.y > 1) && (threadIdx.y < (GRADBLOCKY-2)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.z); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.z) % arraysize.z; int globAddr = myX + arraysize.x*myY; double deltaphi; // Store derivative of phi in one direction __shared__ double phiLoc[GRADBLOCKX*GRADBLOCKY]; phiLoc[myLocAddr] = phi[globAddr]; __syncthreads(); // Make sure loaded phi is visible // coupling is exactly zero if rho <= rhomin if(IWrite) { // compute dt * (dphi/dx) deltaphi = LAMX*(-phiLoc[myLocAddr+2] + 8*phiLoc[myLocAddr+1] - 8*phiLoc[myLocAddr-1] + phiLoc[myLocAddr-2]); fx[globAddr] = deltaphi; // Calculate dt*(dphi/dz) deltaphi = LAMZ*(-phiLoc[myLocAddr+2*GRADBLOCKX] + 8*phiLoc[myLocAddr+1*GRADBLOCKX] - 8*phiLoc[myLocAddr-1*GRADBLOCKX] + phiLoc[myLocAddr-2*GRADBLOCKX]); fz[globAddr] = deltaphi; } }
b3dfe4f871bbcb43cd690134b34f911550b4c018.cu
#include <stdio.h> #include <string.h> #include <stdarg.h> #ifdef UNIX #include <stdint.h> #include <unistd.h> #endif // CUDA #include "cuda.h" #include "nvToolsExt.h" __constant__ int devFluidParams[4]; #define FLUID_NX devFluidParams[0] #define FLUID_NY devFluidParams[1] #define FLUID_NZ devFluidParams[2] #define FLUID_SLABPITCH devFluidParams[3] __constant__ double dragparams[16]; __constant__ double devLambda[16]; // for gradient calculator kernels #define PI 3.141592653589793 //#define THREAD0_PRINTS_DBG /* NOTE NOTE NOTE IMPORTANT: If this is turned on to make them print, then the functions allregimeCdrag cukern_GasDustDrag_GeneralLinearCore cukern_LogTrapSolve which contain cuda kernel printf()s must all be moved UP HERE, ABOVE #include mex.h!!! mex.h wraps printf() and is fundamentally incompatible with cuda kernel printfs. */ /* * From dimensional analysis, by choosing L and T we can rescale... */ /* compute this on CPU and store in __constant__ double thevars[] */ #define VISC0 dragparams[0] #define VISCPOW dragparams[1] #define LAMPOW dragparams[2] #define ALPHA dragparams[3] #define BETA dragparams[4] #define DELTA dragparams[5] #define EPSILON dragparams[6] #define GAMMAM1 dragparams[8] #include "mex.h" #include "cudaCommon.h" #include "cudaSource2FluidDrag.h" // If defined in concert with ACCOUNT_GRADP, exponential methods will attempt to run the // action of the pressure gradient backwards in time to solve v' = -k(v) v + a on an // interval [-.5 .5] instead of [0 1]. This does not work and yields wrong dispersion // relations entirely. //#define EXPO_DOTR // This will account for the pressure gradient and solve v' = -k(v) v + a //#define ACCOUNT_GRADP // If the viscous temperature exponent is found to be 0.5 and the cross section exponent // is zero, the viscosity is hard spheres and some function calls can be simplified // for a speedup. typedef enum ViscosityModel { HARD_SPHERES, PCOF } ViscosityModel; //int sourcefunction_2FluidDrag(MGArray *fluidA, MGArray *fluidB, GeometryParams geo, double gam, double sigmaGas, double muGas, double sigmaDust, double muDust, double dt, int method); int sourcefunction_2FluidDrag(MGArray *fluidA, MGArray *fluidB, GeometryParams *geo, ThermoDetails *thermogas, ThermoDetails *thermodust, double dt, int method); int solveDragEMP(MGArray *gas, MGArray *dust, double dt); int solveDragRK4(MGArray *gas, MGArray *dust, double dt); int solveDragETDRK1(MGArray *gas, MGArray *dust, GeometryParams *geo, double fluidGamma, double dt); int solveDragETDRK2(MGArray *gas, MGArray *dust, GeometryParams *geo, double fluidGamma, double dt); int solveDragLogTrapezoid(MGArray *gas, MGArray *dust, GeometryParams *geo, double fluidGamma, double dt, int timeOrder); int prepareForExpMethod(MGArray *gas, MGArray *dust, MGArray *tempMem, GeometryParams geom, int spaceOrder, double scalingParameter); int findMidGradP2(MGArray *gas, MGArray *tempMem, GeometryParams geom, int spaceOrder, double scalingParameter); void dbgPrint(MGArray *gas, MGArray *dust, MGArray *t, int who, int idx); template <bool ONLY_DV_INI> __global__ void cukern_GasDustDrag_GeneralAccel(double *gas, double *dust, double *tmpmem, int srcBlock, int dstBlock, int N); __global__ void cukern_GasDustDrag_EpsteinAccel(double *gas, double *dust, double *vrel, int N); template <bool resetAccumulator> __global__ void cukern_GasDustDrag_GeneralLinearTime(double *gas, double *dust, double *tmpmem, int srcBlock, int kBlock, int N); // shell call for inner loop of above kernel template <bool resetAccumulator> __device__ void cukern_GasDustDrag_GeneralLinearCore(double *gas, double *dust, double *tmpmem, int srcBlock, int kBlock, int N); __global__ void cukern_findInitialDeltaV(double *g, double *d, double *dv, unsigned long partNumel); // Functions to evaluate explicit Butcher tableaus template <bool resetAccumulator> __global__ void cukern_SolveRK_single(double *tmpmem, int d, double A, int i, double B, unsigned long partNumel); template <bool resetAccumulator> __global__ void cukern_SolveRK_double(double *tmpmem, int d, double F[2], int i[2], double B, unsigned long partNumel); template <bool resetAccumulator> __global__ void cukern_SolveRK_triple(double *tmpmem, int d, double F[3], int i[3], double B, unsigned long partNumel); __global__ void cukern_SolveRK_final(double *tmpmem, int i, double B, double W, unsigned long partNumel); __global__ void cukern_applyFinalDeltaV(double *g, double *d, double *dv_final, unsigned long partNumel); __global__ void cukern_ExpMidpoint_partA(double *gas, double *dust, double *tmpmem, double t, unsigned long partNumel); __global__ void cukern_ExpMidpoint_partB(double *gas, double *dust, double t, double *tmpmem); __global__ void cukern_ETDRK1(double *gas, double *dust, double t, double *tmpmem); template <int order> __global__ void cukern_LogTrapSolve(double *gas, double *dust, double t, double *tmpmem, int partNumel); // Accept the following drag models: // (1) full : Use full Epstein+Stokes calculation with interpolation between all 4 quadrants // (2) Epstein : Use only Epstein force calculation, valid for any speed but only small particles // (3) Linear : Compute Epstein+Stokes in low-velocity limit, valid only for |delta-v/c| << 1 (and strictly, Re < 1) // PARITY CONVENTIONS ARE AS FOLLOWS: // delta-V is defined as GAS VELOCITY MINUS DUST VELOCITY // Drag force is positive in the direction of delta-V, // i.e. d/dt(dust momentum) = F_drag and d/dt(gas momentum) = -F_drag // ergo d/dt(delta_V) ~ -F_drag / mass void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { if ((nrhs!=3) || (nlhs != 0)) mexErrMsgTxt("Wrong number of arguments: need cudaSource2FluidDrag(FluidManager[2], geometry, [dt, solverMethod])\n"); if(CHECK_CUDA_ERROR("entering cudaSource2FluidDrag") != SUCCESSFUL) { DROP_MEX_ERROR("Failed upon entry to cudaSource2FluidDrag."); } MGArray fluidA[5]; int status = MGA_accessFluidCanister(prhs[0], 0, &fluidA[0]); if(status != SUCCESSFUL) { PRINT_FAULT_HEADER; printf("Unable to access first FluidManager.\n"); PRINT_FAULT_FOOTER; DROP_MEX_ERROR("crashing."); } const mxArray *thermostruct = derefXatNdotAdotB(prhs[0], 0, "thermoDetails", NULL); ThermoDetails thermA = accessMatlabThermoDetails(thermostruct); MGArray fluidB[5]; status = MGA_accessFluidCanister(prhs[0], 1, &fluidB[0]); if(status != SUCCESSFUL) { PRINT_FAULT_HEADER; printf("Unable to access second FluidManager.\n"); PRINT_FAULT_FOOTER; DROP_MEX_ERROR("crashing."); } thermostruct = derefXatNdotAdotB(prhs[0], 1, "thermoDetails", NULL); ThermoDetails thermB = accessMatlabThermoDetails(thermostruct); GeometryParams geo = accessMatlabGeometryClass(prhs[1]); double *params = mxGetPr(prhs[2]); size_t ne = mxGetNumberOfElements(prhs[2]); if(ne != 2) { PRINT_FAULT_HEADER; printf("3rd argument to cudaSource2FluidDrag must have 2 elements:\n[ dt (method: 0=midpt, 1=rk4, 2=exponential)]\nGiven argument has %i instead.\n", (int)ne); PRINT_FAULT_FOOTER; DROP_MEX_ERROR("Crashing."); } double dt = params[0]; int solverMethod = (int)params[1]; // For reference: //1nm iron sphere, 300K -> 56m/s thermal velocity //10nm iron ball, 300K -> 1.79m/s thermal velocity //100nm iron ball, 300K -> 56mm/s thermal velocity status = sourcefunction_2FluidDrag(&fluidA[0], &fluidB[0], &geo, &thermA, &thermB, dt, solverMethod); if(CHECK_IMOGEN_ERROR(status) != SUCCESSFUL) { DROP_MEX_ERROR("2-fluid drag code crashed!"); } return; } /* Calculates the drag between fluids A and B where B is presumed to be dust. * geo describes the physical geometry of the grids, to which fluidA an fluidB must conform. * thermogas and thermodust provide the necessary fluid microphysics constants. * dt is the time to integrate and method selects the numeric integration scheme to employ. */ int sourcefunction_2FluidDrag(MGArray *fluidA, MGArray *fluidB, GeometryParams *geo, ThermoDetails *thermogas, ThermoDetails *thermodust, double dt, int method) { int i; int sub[6]; int hostFluidParams[4]; int statusCode = SUCCESSFUL; double hostDrag[16]; double gam = thermogas -> gamma; // Reference viscosity & viscosity temperature dependence (0.5 for hard spheres) double nu0 = thermogas->mu0; double nupow = thermogas->muTindex; // double lampow = thermogas->sigmaTindex; double ddust = sqrt(thermodust->sigma0 / 3.141592653589793); // based on sigma being a kinetic cross section = pi (2r)^2, this is correct and needn't be divided by 4 double mgas = thermogas->m; double mdust = thermodust->m; hostDrag[0] = nu0; // reference viscosity, fluidDetailModel.viscosity hostDrag[1] = -nupow; // viscosity temperature dependence, fluidDetailModel.visocity hostDrag[2] = lampow; // cross section temperature dependence, fluidDetailModel. ... hostDrag[3] = mgas *(gam-1.0) / (298.15*thermogas->kBolt); // alpha = mgas * (gamma-1) / (t_ref * k_b) hostDrag[4] = sqrt(2.0)*mgas/(thermogas->sigma0 * ddust); // beta =2 mgas / (sqrt(2) * sigmaGas * dustDiameter); hostDrag[5] = ddust / nu0; // delta= dustDiameter / (visc0) hostDrag[6] = thermodust->sigma0 / (1.0*mdust); // epsilon = sigmaDust / 8 mdust hostDrag[7] = dt; hostDrag[8] = (gam-1.0); hostDrag[9] = .25*thermodust->sigma0 / mdust; hostDrag[10]= 16*(gam-1.0)/3.0; #ifdef THREAD0_PRINTS_DBG printf("hostDrag[] in sourceFunction_2FluidDrag:\n"); printf("VISC0 = %le\nVISCPOW = %le\nLAMPOW = %le\nALPHA=%le\nBETA=%le\nDELTA=%le\nEPSILON=%le\n", hostDrag[0], hostDrag[1], hostDrag[2], hostDrag[3], hostDrag[4], hostDrag[5], hostDrag[6]); #endif for(i = 0; i < fluidA->nGPUs; i++) { cudaSetDevice(fluidA->deviceID[i]); statusCode = CHECK_CUDA_ERROR("cudaSetDevice"); if(statusCode != SUCCESSFUL) break; calcPartitionExtent(fluidA, i, &sub[0]); hostFluidParams[0] = sub[3]; hostFluidParams[1] = sub[4]; hostFluidParams[2] = sub[5]; hostFluidParams[3] = fluidA->slabPitch[i] / sizeof(double); // This is important, due to padding, is isn't just .partNumel cudaMemcpyToSymbol((const void *)devFluidParams, &hostFluidParams[0], 4*sizeof(int), 0, cudaMemcpyHostToDevice); statusCode = CHECK_CUDA_ERROR("memcpyToSymbol"); if(statusCode != SUCCESSFUL) break; cudaMemcpyToSymbol((const void *)dragparams, &hostDrag[0], 11*sizeof(double), 0, cudaMemcpyHostToDevice); statusCode = CHECK_CUDA_ERROR("memcpyToSymbol"); if(statusCode != SUCCESSFUL) break; } if(statusCode != SUCCESSFUL) return statusCode; // FIXME pick a numeric method here dynamically? switch(method) { case 0: // EMP statusCode = CHECK_IMOGEN_ERROR(solveDragEMP(fluidA, fluidB, dt)); break; case 1: // RK4 statusCode = CHECK_IMOGEN_ERROR(solveDragRK4(fluidA, fluidB, dt)); break; case 2: // ETDRK1 (exponential Euler) statusCode = CHECK_IMOGEN_ERROR(solveDragETDRK1(fluidA, fluidB, geo, gam, dt)); break; case 3: // ETDRK2 (exponential midpoint) statusCode = CHECK_IMOGEN_ERROR(solveDragETDRK2(fluidA, fluidB, geo, gam, dt)); break; case 4: // LogTrap2 method (quadratic accuracy with time-variable drag coefficient) statusCode = CHECK_IMOGEN_ERROR(solveDragLogTrapezoid(fluidA, fluidB, geo, gam, dt, 2)); break; case 5: // LogTrap3 method (cubic accuracy with time-variable drag coefficient) statusCode = CHECK_IMOGEN_ERROR(solveDragLogTrapezoid(fluidA, fluidB, geo, gam, dt, 3)); break; } return statusCode; } /* Helps track the state of the integrator when debugging w/o needing cuda-gdb * i.e. a slightly more sophisticated printf()-debug * gas, dust, t are the five-MGArray pointers to gas, dust and tmp storage * who: bit 1 = print about gas, 2 = about dust, 4 = about t * idx: the linear index of the cell to print about (the test suite element generates a uniform * in space fluid state) */ void dbgPrint(MGArray *gas, MGArray *dust, MGArray *t, int who, int idx) { double *hstcpy = (double *)malloc(gas->slabPitch[0]*5); int qq = gas->slabPitch[0]/8; if(who & 1) { cudaMemcpy((void *)hstcpy, (const void *)gas->devicePtr[0], gas->slabPitch[0]*5, cudaMemcpyDeviceToHost); printf("Gas input state: [%e %e %e %e %e]\n", hstcpy[idx+0*qq], hstcpy[idx+1*qq], hstcpy[idx+2*qq], hstcpy[idx+3*qq], hstcpy[idx+4*qq]); } if(who & 2) { cudaMemcpy((void *)hstcpy, (const void *)dust->devicePtr[0], gas->slabPitch[0]*5, cudaMemcpyDeviceToHost); printf("Dust input state: [%e %e %e %e %e]\n", hstcpy[idx+0*qq], hstcpy[idx+1*qq], hstcpy[idx+2*qq], hstcpy[idx+3*qq], hstcpy[idx+4*qq]); } if(who & 4) { cudaMemcpy((void *)hstcpy, (const void *)t->devicePtr[0], gas->slabPitch[0]*5, cudaMemcpyDeviceToHost); printf("tmp memory state: [%e %e %e %e %e]\n", hstcpy[idx+0*qq], hstcpy[idx+1*qq], hstcpy[idx+2*qq], hstcpy[idx+3*qq], hstcpy[idx+4*qq]); } free(hstcpy); } /* Solves the action of gas-dust drag for one dust using the explicit midpoint method * 2nd order in time, not A-stable (dt <~ t_stop) */ int solveDragEMP(MGArray *gas, MGArray *dust, double dt) { int n = gas->nGPUs; double *g; double *d; double *vrel; int statusCode = SUCCESSFUL; MGArray tmpArrays; statusCode = MGA_allocSlab(gas, &tmpArrays, 5); if(CHECK_IMOGEN_ERROR(statusCode) != SUCCESSFUL) return statusCode; int i; int BS = 96; dim3 blocksize(BS, 1, 1); dim3 gridsize(32, 1, 1); for(i = 0; i < n; i++) { long NE = gas->partNumel[i]; // avoid launching tons of threads for small problems gridsize.x = 32; if(ROUNDUPTO(NE, BS)/BS < 32) { gridsize.x = ROUNDUPTO(NE, BS)/BS; } cudaSetDevice(gas->deviceID[i]); g = gas->devicePtr[i]; d = dust->devicePtr[i]; vrel = tmpArrays.devicePtr[i]; // compute initial delta-v cukern_findInitialDeltaV<<<gridsize, blocksize>>>(g, d, vrel, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_findInitialDeltaV"); if(statusCode != SUCCESSFUL) break; // solve gas drag on y0, store in block 3: use only ini dv for u_specific cukern_GasDustDrag_GeneralAccel<true><<<gridsize, blocksize>>>(g, d, vrel, 0, 3, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_GasDustDrag_full<false>"); if(statusCode != SUCCESSFUL) break; // compute delta-v at t=1/2; store stage at block 4 cukern_SolveRK_single<true><<<gridsize, blocksize>>>(vrel, 4, .5*dt, 3, 0, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_SolveRK_single<true>"); if(statusCode != SUCCESSFUL) break; // solve gas drag at t=1/2 using half stage, store in block 3 cukern_GasDustDrag_GeneralAccel<false><<<gridsize, blocksize>>>(g, d, vrel, 4, 3, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_GasDustDrag_full<true>"); if(statusCode != SUCCESSFUL) break; // Apply final stage derivative to compute y(t) cukern_SolveRK_final<<<gridsize, blocksize>>>(vrel, 3, 1.0, dt, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_SolveRK_final"); if(statusCode != SUCCESSFUL) break; // compute new gas/dust momentum and temperature arrays using analytic forms cukern_applyFinalDeltaV<<<gridsize, blocksize>>>(g, d, vrel, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_applyFinalDeltaV"); if(statusCode != SUCCESSFUL) break; } MGA_delete(&tmpArrays); return CHECK_IMOGEN_ERROR(statusCode); } /* Solves the action of the gas-dust drag for one dust using the 4th order RK method of Kutta (1903) * 4th order in time, conditionally stable (dt <~ 3t_stop) */ int solveDragRK4(MGArray *gas, MGArray *dust, double dt) { int n = gas->nGPUs; double *g; double *d; // short names for gas and dust gpu memory pointers double *vrel; // temp memory short pointer name int statusCode = SUCCESSFUL; int i; MGArray tmpArrays; statusCode = MGA_allocSlab(gas, &tmpArrays, 5); if(CHECK_IMOGEN_ERROR(statusCode) != SUCCESSFUL) return statusCode; int BS = 96; // FIXME this should determine an appropriate blocksize at runtime perhaps? dim3 blocksize(BS, 1, 1); dim3 gridsize(32, 1, 1); dim3 smallgrid(1,1,1); double bWeights[4] = { 1.0, 2.0, 2.0, 1.0 }; // classic RK4 weights double bRescale = dt / 6.0; for(i = 0; i < n; i++) { long NE = gas->partNumel[i]; // avoid launching tons of threads for small problems gridsize.x = 32; if(ROUNDUPTO(NE, BS)/BS < 32) { gridsize.x = ROUNDUPTO(NE, BS)/BS; } cudaSetDevice(gas->deviceID[i]); g = gas->devicePtr[i]; d = dust->devicePtr[i]; vrel = tmpArrays.devicePtr[i]; // compute initial delta-v cukern_findInitialDeltaV<<<gridsize, blocksize>>>(g, d, vrel, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_findInitialDeltaV"); if(statusCode != SUCCESSFUL) break; // solve gas drag on y0, store in block 3 cukern_GasDustDrag_GeneralAccel<true><<<gridsize, blocksize>>>(g, d, vrel, 0, 3, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_GasDustDrag_GeneralAccel<true>"); if(statusCode != SUCCESSFUL) break; // compute delta-v at t=1/2; store stage at block 4 cukern_SolveRK_single<true><<<gridsize, blocksize>>>(vrel, 4, 0.5*dt, 3, bWeights[0], NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_SolveRK_single<true>"); if(statusCode != SUCCESSFUL) break; // solve gas drag on k2, store in block 3 cukern_GasDustDrag_GeneralAccel<false><<<gridsize, blocksize>>>(g, d, vrel, 4, 3, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_GasDustDrag_GeneralAccel<false>"); if(statusCode != SUCCESSFUL) break; // compute delta-v at t=1/2; store stage at block 4 cukern_SolveRK_single<false><<<gridsize, blocksize>>>(vrel, 4, 0.5*dt, 3, bWeights[1], NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_SolveRK_single<true>"); if(statusCode != SUCCESSFUL) break; // solve gas drag on k3, store in block 3 cukern_GasDustDrag_GeneralAccel<false><<<gridsize, blocksize>>>(g, d, vrel, 4, 3, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_GasDustDrag_GeneralAccel<false>"); if(statusCode != SUCCESSFUL) break; // compute delta-v at t=1/2; store stage at block 4 cukern_SolveRK_single<false><<<gridsize, blocksize>>>(vrel, 4, 1.0*dt, 3, bWeights[2], NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_SolveRK_single<true>"); if(statusCode != SUCCESSFUL) break; // solve gas drag on k4, store in block 3 cukern_GasDustDrag_GeneralAccel<false><<<gridsize, blocksize>>>(g, d, vrel, 4, 3, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_GasDustDrag_GeneralAccel<false>"); if(statusCode != SUCCESSFUL) break; // add block 3 to accumulator, rescale by dt / 6.0 and add y0 to find final dv. cukern_SolveRK_final<<<gridsize, blocksize>>>(vrel, 3, bWeights[3], bRescale, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_SolveRK_final"); if(statusCode != SUCCESSFUL) break; // compute new gas/dust momentum and temperature arrays cukern_applyFinalDeltaV<<<gridsize, blocksize>>>(g, d, vrel, NE); statusCode = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_applyFinalDeltaV"); if(statusCode != SUCCESSFUL) break; } if(statusCode != SUCCESSFUL) { printf("RK4 was unsuccessful: Trying to free temp memory, then returning crash condition.\n"); MGA_delete(&tmpArrays); PRINT_FAULT_FOOTER; } else { statusCode = MGA_delete(&tmpArrays); } return CHECK_IMOGEN_ERROR(statusCode); } /* Solves the gas-dust drag equations using the 2nd order Explicit Exponential Runge-Kutta method * aka the exponential midpoint method: * u_stiff(hf) = u_stiff(0) exp(M_stiff(0) t/2) * u_soft(hf) = u_0 + M_soft(0)*t/2 * u_stiff(t) = u_stiff(0) exp(M_stiff(hf) t) * u_soft(t) = u_0 + M_soft(hf)*t/2 * where the stiff term (gas-dust drag) is solved by directly exponentiating its characteristic matrix * and the nonstiff terms are handled by simple explicit RK2 * * We are advantaged here that to an excellent approximation the stiff terms are truly linear * (i.e. the effect of drag heating in altering pressure gradients is neglectable) if drag is strong * enough to require calling this method. * formally order 2, stiff order 1, L-stable */ int solveDragETDRK1(MGArray *gas, MGArray *dust, GeometryParams *geo, double fluidGamma, double dt) { int dbprint = 0; int n = gas->nGPUs; double *g; double *d; double *tempPtr; int statusCode = SUCCESSFUL; MGArray tmpMem; MGArray *gs = &tmpMem; int numTmpArrays, spaceOrder; #ifdef ACCOUNT_GRADP numTmpArrays = 5; spaceOrder = 2; #else numTmpArrays = 2; spaceOrder = 0; #endif statusCode = MGA_allocSlab(gas, gs, numTmpArrays); if(CHECK_IMOGEN_ERROR(statusCode) != SUCCESSFUL) return statusCode; int i; int BS = 96; // for kernels not requiring finite differencing dim3 linblock(BS, 1, 1); dim3 lingrid(32, 1, 1); // for kernels that do need to do FD dim3 fdgrid(4, 4, 1); dim3 fdblock(16, 16, 1); // Emits [|dv_tr|, u_0, P_x, P_y, P_z] into temp memory at gs statusCode = prepareForExpMethod(gas, dust, gs, *geo, spaceOrder, fluidGamma - 1); if(CHECK_IMOGEN_ERROR(statusCode) != SUCCESSFUL) return statusCode; if(dbprint) { dbgPrint(gas, dust, gs, 7, 6); } int velblock = 0; int kblock = 0; for(i = 0; i < n; i++) { long NE = gas->partNumel[i]; // avoid launching tons of threads for small problems lingrid.x = 32; if(ROUNDUPTO(NE, BS)/BS < 32) { lingrid.x = ROUNDUPTO(NE, BS)/BS; } cudaSetDevice(gas->deviceID[i]); g = gas->devicePtr[i]; d = dust->devicePtr[i]; tempPtr = tmpMem.devicePtr[i]; // Use u_0 and dv_tr to compute the drag eigenvalue at t=0 // overwrite the |dv_tr| value (block 0) with K cukern_GasDustDrag_GeneralLinearTime<true><<<lingrid, linblock>>>(g, d, tempPtr, velblock, kblock, gas->partNumel[i]); statusCode = CHECK_CUDA_LAUNCH_ERROR(linblock, lingrid, gas, i, "cukern_GasDustDrag_linearTime"); if(statusCode != SUCCESSFUL) break; if(dbprint) { dbgPrint(gas, dust, gs, 4, 6); } // Use 1st order exponential time differencing (exponential euler) cukern_ETDRK1<<<lingrid, linblock>>>(g, d, dt, tempPtr); statusCode = CHECK_CUDA_LAUNCH_ERROR(linblock, lingrid, gas, i, "cukern_ExponentialEulerHalf"); if(statusCode != SUCCESSFUL) break; if(dbprint) { dbgPrint(gas, dust, gs, 7, 6); } } // Make sure node's internal boundaries are consistent if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) MGA_exchangeLocalHalos(gas + 1, 4); if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) MGA_exchangeLocalHalos(dust + 1, 4); if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) MGA_delete(gs); return statusCode; } // TODO /* Implement Exponential Time Differencing, 2nd order RK: * y_1 = exp(h L) y_0 + h phi_1(h L) f(t=0) * y_n+1 = exp(h L) y_0 + h (phi_1(h L) - phi_2(h L)) f(t=0) + h phi_2(h L) f(t=1) * *L = -k *-> y_1 = exp(-k t) y_0 + t (exp(-k t)-1) / (-k t) f_0 *-> y_1 = exp(-k t) y_0 + f_0 (1 - exp(-k t)) / k *-> y_1 = f_0 / k + (y_0 - f_0/k) exp(-k t) * *y_n+1 = exp(-k t) y_0 + f_0 (-(exp(-k t)-1)/k - (exp(-k t)-1-kt)/(k^2 t)) + f_1 (exp(-kt)-1-k t)/k^2t *y_n+1 = exp(-k t) y_0 + f_0/k + (f_0-f_1)/k^2t + f_0/k - f_1/k + (-f_0/k - f_0/k^2t + f_1/k^2t) exp(-k t) *y_n+1 = exp(-k t) y_0 + (2f_0-f_1)/k -f_0/k exp(-kt) + (f_0-f_1)/k^2t - (f_0 - f_1) exp(-k t)/k^2t *y_n+1 = (2f_0-f_1)/k + (y_0-f_0/k) exp(-kt) + (f_0-f_1)(1-exp(-kt))/k^2t *y_n+1 = y_0 exp(-kt) + f_0(2/k - exp(-kt)/k + 1/k^2t -exp(-kt)/k^2t) + f_1(-1/k + exp(-kt)/k^2t) */ int solveDragETDRK2(MGArray *gas, MGArray *dust, GeometryParams *geo, double fluidGamma, double dt) { int n = gas->nGPUs; int dbprint = 0; double *g; double *d; double *tempPtr; int statusCode = SUCCESSFUL; MGArray tmpMem; MGArray *gs = &tmpMem; statusCode = MGA_allocSlab(gas, gs, 6); if(CHECK_IMOGEN_ERROR(statusCode) != SUCCESSFUL) return statusCode; int i; int BS = 96; // for kernels not requiring finite differencing dim3 linblock(BS, 1, 1); dim3 lingrid(32, 1, 1); // for kernels that do need to do FD dim3 fdgrid(4, 4, 1); dim3 fdblock(16, 16, 1); // Emits [|dv_tr|, u_0, P_x, P_y, P_z] into temp memory at gs statusCode = prepareForExpMethod(gas, dust, gs, *geo, 2, fluidGamma - 1); if(CHECK_IMOGEN_ERROR(statusCode) != SUCCESSFUL) return statusCode; if(dbprint) { dbgPrint(gas, dust, gs, 7, 6); } int velblock = 0; int kblock = 0; for(i = 0; i < n; i++) { long NE = gas->partNumel[i]; // avoid launching tons of threads for small problems lingrid.x = 32; if(ROUNDUPTO(NE, BS)/BS < 32) { lingrid.x = ROUNDUPTO(NE, BS)/BS; } cudaSetDevice(gas->deviceID[i]); g = gas->devicePtr[i]; d = dust->devicePtr[i]; tempPtr = tmpMem.devicePtr[i]; // Use u_0 and dv_tr to compute the drag eigenvalue at t=0 // overwrites the |dv_tr| value (block 0) with K // replace [|dv_tr|, u_0, P_x, P_y, P_z] into temp memory at gs // with [K , u_0, P_x, P_y, P_z] into temp memory at gs cukern_GasDustDrag_GeneralLinearTime<true><<<lingrid, linblock>>>(g, d, tempPtr, velblock, kblock, gas->partNumel[i]); statusCode = CHECK_CUDA_LAUNCH_ERROR(linblock, lingrid, gas, i, "cukern_GasDustDrag_linearTime"); if(statusCode != SUCCESSFUL) break; if(dbprint) { dbgPrint(gas, dust, gs, 4, 6); } // Use the eigenvalue from t=0 to advance to t=1/2 // Output only new uint & dv values from this stage, // We do this only do re-evaluate the pressure gradient & eigenvalue at the midpoint // This reads K from register 0 and overwrites it with dv_half // overwrite [K , u_0, P_x, P_y, P_z] into temp memory at gs // with [dv_new , u_new, P_x, P_y, P_z] into temp memory at gs cukern_ExpMidpoint_partA<<<lingrid, linblock>>>(g, d, tempPtr, dt, gas->partNumel[i]); statusCode = CHECK_CUDA_LAUNCH_ERROR(linblock, lingrid, gas, i, "doing cukern_ExponentialEulerIntermediate"); if(statusCode != SUCCESSFUL) break; if(dbprint) { dbgPrint(gas, dust, gs, 4, 6); } } // Solve gradient-P again statusCode = findMidGradP2(gas, gs, *geo, 2, fluidGamma - 1); if(dbprint) { dbgPrint(gas, dust, gs, 4, 6); } for(i = 0; i < n; i++) { long NE = gas->partNumel[i]; // avoid launching tons of threads for small problems lingrid.x = 32; if(ROUNDUPTO(NE, BS)/BS < 32) { lingrid.x = ROUNDUPTO(NE, BS)/BS; } cudaSetDevice(gas->deviceID[i]); g = gas->devicePtr[i]; d = dust->devicePtr[i]; tempPtr = tmpMem.devicePtr[i]; // accumulates new k onto original k, such that block 0 is now (k0 + k1)... cukern_GasDustDrag_GeneralLinearTime<true><<<lingrid, linblock>>>(g, d, tempPtr, velblock, kblock, gas->partNumel[i]); statusCode = CHECK_CUDA_LAUNCH_ERROR(linblock, lingrid, gas, i, "cukern_GasDustDrag_linearTime"); if(statusCode != SUCCESSFUL) break; if(dbprint) { dbgPrint(gas, dust, gs, 4, 6); } // Use averaged pressure gradient and k value to compute timestep. // we divide t by 2 since we simply summed the k values previously cukern_ExpMidpoint_partB<<<lingrid, linblock>>>(g, d, dt, tempPtr); statusCode = CHECK_CUDA_LAUNCH_ERROR(linblock, lingrid, gas, i, "cukern_exponentialMidpoint"); if(statusCode != SUCCESSFUL) break; if(dbprint) { dbgPrint(gas, dust, gs, 3, 6); } } // Make sure node's internal boundaries are consistent if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) statusCode = MGA_exchangeLocalHalos(gas + 1, 4); if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) statusCode = MGA_exchangeLocalHalos(dust + 1, 4); if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) statusCode = MGA_delete(gs); return statusCode; } /* Second or third order method that handles variable drag coefficients * 2nd order (trapezoid): * u_0 = P(y_0) * k_0 = compute_kdrag(y_0, u_0) * (y_1, u_1) = y_0 exp(-k_0 t) * k_1 = compute_kdrag(y_1, u_1) * y_n+1 = y_0 exp(-0.5(k_0 + k_1)t) * * 3rd order: (Richardson extrapolated trapezoid) * u_0 = P(y_0) * k_0 = compute_kdrag(y_0, u_0) * (y_1, u_1) = y_0 exp(-k_0 t) * k_1 = compute_kdrag(y_1, u_1) * (y_nhf,u_nhf)= y_0 exp(-0.5 * 0.5(k_0 + k_1)t) * k_nhf = compute_kdrag(y_nhf, u_nhf) * k_integral = richardson_extrap(.25 k_0 + .5 k_nhf + .25 k1, .5k_0 + .5k_1) * y_1 = y_0 exp(-k_integral t) */ int solveDragLogTrapezoid(MGArray *gas, MGArray *dust, GeometryParams *geo, double fluidGamma, double dt, int timeOrder) { int n = gas->nGPUs; int dbprint = 0; double *g; double *d; double *tempPtr; int statusCode = SUCCESSFUL; MGArray tmpMem; MGArray *gs = &tmpMem; #ifdef USE_NVTX nvtxMark("Large alloc (3 arrays)"); #endif statusCode = MGA_allocSlab(gas, gs, 3); if(CHECK_IMOGEN_ERROR(statusCode) != SUCCESSFUL) return statusCode; int i; int BS = 96; // for kernels not requiring finite differencing dim3 linblock(BS, 1, 1); dim3 lingrid(32, 1, 1); // for kernels that do need to do FD dim3 fdgrid(4, 4, 1); dim3 fdblock(16, 16, 1); // Emits [|dv_tr|, u_0] into temp memory at gs statusCode = prepareForExpMethod(gas, dust, gs, *geo, 0, fluidGamma - 1); if(CHECK_IMOGEN_ERROR(statusCode) != SUCCESSFUL) return statusCode; /* These often fail wrongly in parallel because of invalid halo entries */ /* int dbcheckval = dbgfcn_CheckArrayVals(gs, 5, 1); if(CHECK_IMOGEN_ERROR(dbcheckval) != SUCCESSFUL) return dbcheckval; */ if(dbprint) { dbgPrint(gas, dust, gs, 7, 6); } for(i = 0; i < n; i++) { long NE = gas->partNumel[i]; // avoid launching tons of threads for small problems lingrid.x = 32; if(ROUNDUPTO(NE, BS)/BS < 32) { lingrid.x = ROUNDUPTO(NE, BS)/BS; } cudaSetDevice(gas->deviceID[i]); g = gas->devicePtr[i]; d = dust->devicePtr[i]; tempPtr = tmpMem.devicePtr[i]; switch(timeOrder) { case 2: cukern_LogTrapSolve<2><<<lingrid, linblock>>>(g, d, dt, tempPtr, gas->partNumel[i]); break; case 3: cukern_LogTrapSolve<3><<<lingrid, linblock>>>(g, d, dt, tempPtr, gas->partNumel[i]); break; default: statusCode = ERROR_INVALID_ARGS; break; } statusCode = CHECK_CUDA_LAUNCH_ERROR(linblock, lingrid, gas, i, "doing cukern_LogTrapSolve"); if(statusCode != SUCCESSFUL) break; if(dbprint) { dbgPrint(gas, dust, gs, 7, 6); } } // See to it that internal gpu-gpu boundaries for momentum & energy are consistent if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) statusCode = MGA_exchangeLocalHalos(gas + 1, 4); if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) statusCode = MGA_exchangeLocalHalos(dust + 1, 4); /* These often fail in parallel due to boundary conditions or halo cells dbcheckval = dbgfcn_CheckFluidVals(gas, 1); if(CHECK_IMOGEN_ERROR(dbcheckval) != SUCCESSFUL) return dbcheckval; dbcheckval = dbgfcn_CheckFluidVals(dust, 1); if(CHECK_IMOGEN_ERROR(dbcheckval) != SUCCESSFUL) return dbcheckval; */ // Dump the temporary memory if(CHECK_IMOGEN_ERROR(statusCode) == SUCCESSFUL) statusCode = MGA_delete(gs); return statusCode; } /* This function returns the Stokes coefficient, scaled by 1/2 * This parameter is experimentally measured except for the low-Re regime */ __device__ double drag_coeff(double Re) { if(Re < 1) { // 24 / Re return 12 / (Re+1e-15); } if(Re > 7.845084191866316e+02) { // .44 return 0.22; } // 24 Re^-.6 return 12.0*pow(Re,-0.6); } /* Computes the drag coefficient for all Reynolds and Knudsen numbers with an accuracy of <1% for * speeds less than approximately Mach 0.1. * The coefficients are divided by 8 per a factor that appears in the drag time formula * The Cunninghand correction coefficients of Allen & Raabe (1.142, .558, .998) are used. */ __device__ double allregimeCdrag(double Re, double Kn) { // Prevent 1/0 errors which may occur when a simulation is initialized with dv = 0 // The only physical way to acheive Re = 0 is if dv = 0, and if dv =0 then skip wasting time // Note that all leading numeric coefficients in C_drag are divided by 8 from their normal // presentation to absorb numeric factors elsewhere in the drag equations double cunningham = 1 + Kn*(1.142 + 1*0.558*exp(-0.999/Kn)); double C_drag = (3 / Re + .45*pow(Re, -.319) + .0509*Re/(8710+Re)) / cunningham; #ifdef THREAD0_PRINTS_DBG if(threadIdx.x == 0) { printf("b=%i,t=%i,line %i: Cdrag reporting: Cd0 = %.12lf, Cu = %.12lf\n, Cd = %.12lf\n", __LINE__, blockIdx.x, threadIdx.x, C_drag, cunningham, C_drag / cunningham); } #endif return C_drag; } /* The general linear core is called upon by the LogTrap solver as well so it is here separated out */ template <bool resetAccumulator> __device__ void cukern_GasDustDrag_GeneralLinearCore(double *gas, double *dust, double *tmpmem, int srcBlock, int kBlock, int N) { double rhoA, rhoB; // gas and dust densities respectively double magdv; // magnitude velocity difference double uspecific; // specific internal energy density = e_{int} / rho double Tnormalized; // Temperature normalized by the reference temp for the viscosity double Re, Kn; // Reynolds number and Knudsen number double kdrag, Cd_hat; // drag time constant & drag coefficient magdv = tmpmem[srcBlock*FLUID_SLABPITCH]; if(magdv < 1e-9) { if(resetAccumulator) { tmpmem[kBlock*FLUID_SLABPITCH] = 0; } #ifdef THREAD0_PRINTS_DBG if(threadIdx.x == 0) { printf("b=%i,t=%i, line=%i: general linear core reporting: |dv| < 1e-9, returning no drag\n", __LINE__, blockIdx.x, threadIdx.x); } #endif return; } rhoA = gas[0]; rhoB = dust[0]; // make sure computation includes gas heating term! // fixme double check this calculation I think it may be in error if(srcBlock != 0) { // If srcblock != zero, we're evaluating a different dv than originally used to give uinternal: // must add dissipated relative KE to gas internal energy. uspecific = tmpmem[FLUID_SLABPITCH] + .5 * rhoB * (tmpmem[0]*tmpmem[0] - magdv*magdv) / (rhoA + rhoB); } else { // If srcBlock is zero, we're reading the original dv for which uinternal was computed: No change uspecific = tmpmem[FLUID_SLABPITCH]; } Tnormalized = ALPHA * uspecific; Re = DELTA * rhoA * magdv * pow(Tnormalized, VISCPOW); Kn = BETA * pow(Tnormalized, LAMPOW) / rhoA; Cd_hat = allregimeCdrag(Re, Kn); kdrag = Cd_hat * magdv * (rhoA + rhoB) * EPSILON; #ifdef THREAD0_PRINTS_DBG if(threadIdx.x == 0) { printf("b=%i,t=%i, line %i: general linear core reporting: uspecific=%le, T/T0=%le, Re=%le, Kn=%le, Cd=%le, k=%le, a=k*v=%le\n", __LINE__, blockIdx.x, threadIdx.x, uspecific, Tnormalized, Re, Kn, 8*Cd_hat, kdrag, kdrag*magdv); } #endif if(resetAccumulator) { tmpmem[kBlock*FLUID_SLABPITCH] = kdrag; } else { tmpmem[kBlock*FLUID_SLABPITCH] += kdrag; } //tmpmem[2*FLUID_SLABPITCH] = Re; //tmpmem[3*FLUID_SLABPITCH] = Kn; //tmpmem[4*FLUID_SLABPITCH] = Cd_hat * 8; } /* This function directly computes the gas-dust drag force in the full (stokes+epstein) regime * This is suited for weaker drag or strange regimes, but unnecessary and time-consuming for * small particles which will never exit the low-speed Epstein regime. * - Uses staged dv value stored at srcBlock, writes acceleration into dstBlock * - template saves on evaluating drag heating if true */ template <bool ONLY_DV_INI> __global__ void cukern_GasDustDrag_GeneralAccel(double *gas, double *dust, double *tmpmem, int srcBlock, int dstBlock, int N) { int i = threadIdx.x + blockIdx.x*blockDim.x; double rhoA, rhoB; // gas and dust densities respectively double magdv; // magnitude velocity difference double uspecific; // specific internal energy density double Tnormalized; double Re, Kn; // Spherical particle Reynolds number double Cd_hat, accel; gas += i; dust += i; tmpmem += i; for(; i < N; i+= blockDim.x*gridDim.x) { magdv = tmpmem[srcBlock*FLUID_SLABPITCH]; rhoA = gas[0]; rhoB = dust[0]; if(ONLY_DV_INI) { uspecific = tmpmem[FLUID_SLABPITCH]; } else { // make sure computation includes gas heating term! uspecific = tmpmem[FLUID_SLABPITCH] + .5 * rhoB * (tmpmem[0]*tmpmem[0] - magdv*magdv) / (rhoA + rhoB); } Tnormalized = ALPHA * uspecific; Re = DELTA * rhoA * magdv * pow(Tnormalized, VISCPOW); Kn = BETA * pow(Tnormalized, LAMPOW) / rhoA; Cd_hat = allregimeCdrag(Re, Kn); accel = Cd_hat * magdv * magdv * (rhoA + rhoB) * EPSILON; tmpmem[dstBlock*FLUID_SLABPITCH] = -accel; gas += blockDim.x*gridDim.x; dust += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } #define EPSTEIN_ALPHA dragparams[9] #define EPSTEIN_BETA dragparams[10] /* This function computes particle drag in the Epstein regime (particles much smaller than gas MFP) * but is unsuited to large particles or dense gas */ __global__ void cukern_GasDustDrag_EpsteinAccel(double *gas, double *dust, double *vrel, int N) { int i = threadIdx.x + blockIdx.x*blockDim.x; double rhoA, rhoB; // gas and dust densities respectively double magdv; // magnitude velocity difference double uinternal; // specific internal energy density double accel; // Relative acceleration (d/dt of vrel) gas += i; dust += i; vrel += i; for(; i < N; i+= blockDim.x*gridDim.x) { magdv = vrel[FLUID_SLABPITCH]; rhoA = gas[0]; rhoB = dust[0]; // make sure computation includes gas heating term! uinternal = vrel[2*FLUID_SLABPITCH] + rhoB * (vrel[0]*vrel[0] - magdv*magdv) / (rhoA + rhoB); // compute f(single particle) = sqrt(f_slow^2 + f_fast^2) // where f_slow = (4/3) A_dust cbar rho_g dv // f_fast = A_dust rho_g dv^2 // and accel = f(single particle) * (rho_dust / m_dust) * (rho_g + rho_d)/(rhog rhod) // = f(single particle) * ndust / reduced mass accel = EPSTEIN_ALPHA * magdv * rhoA * sqrt(magdv*magdv + EPSTEIN_BETA*uinternal) * (1.0+rhoB/rhoA); vrel[3*FLUID_SLABPITCH] = accel; gas += blockDim.x*gridDim.x; dust += blockDim.x*gridDim.x; vrel += blockDim.x*gridDim.x; } } /* This function returns the drag rate K = (dv/dt) / v which is useful for e.g. exponential methods * for very stiff drag * * If motion is acted on exclusively by drag, a simple formula is available to determine heating * as a result of drag friction exactly. In this case, the original and current velocities are used * If it is not, the result is not as trivial and ONLY_DV_INI = true just uses a given input specific * internal energy. */ template <bool resetAccumulator> __global__ void cukern_GasDustDrag_GeneralLinearTime(double *gas, double *dust, double *tmpmem, int srcBlock, int kBlock, int N) { int i = threadIdx.x + blockIdx.x*blockDim.x; gas += i; dust += i; tmpmem += i; for(; i < N; i+= blockDim.x*gridDim.x) { cukern_GasDustDrag_GeneralLinearCore<resetAccumulator>(gas, dust, tmpmem, srcBlock, kBlock, N); gas += blockDim.x*gridDim.x; dust += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } /* Computes initial magnitude velocity ("w") into dv[0] and u_internal initial into dv[slabPitch] * and computes Uint_ini (e_internal / rho evaluated at original |w|) into dv[2*slabNumel] */ __global__ void cukern_findInitialDeltaV(double *g, double *d, double *dv, unsigned long partNumel) { int x = threadIdx.x + blockIdx.x*blockDim.x; g += x; d += x; dv+= x; double u, q, dvsq, rhoginv, rhodinv; double momsq; while(x < partNumel) { rhoginv = 1/g[0]; rhodinv = 1/d[0]; q = g[2*FLUID_SLABPITCH]; u = q*rhoginv - d[2*FLUID_SLABPITCH]*rhodinv; momsq = q*q; dvsq = u*u; q = g[3*FLUID_SLABPITCH]; u = q*rhoginv - d[3*FLUID_SLABPITCH]*rhodinv; momsq += q*q; dvsq += u*u; q = g[4*FLUID_SLABPITCH]; u = q*rhoginv - d[4*FLUID_SLABPITCH]*rhodinv; momsq += q*q; dvsq += u*u; // Store magnitude delta-v and initial specific internal energy for use by gas drag routine dv[0] = sqrt(dvsq); dv[FLUID_SLABPITCH] = (g[FLUID_SLABPITCH] - .5*momsq * rhoginv)*rhoginv; x += blockDim.x*gridDim.x; g += blockDim.x*gridDim.x; d += blockDim.x*gridDim.x; dv+= blockDim.x*gridDim.x; } } /* This set of functions implement evaluation of the rows in RK Butcher tableaux containing * from 1 to 3 nonzero entries */ /* This function completes evaluation of an explicit Butcher tableau. * the final y' stored at i gets added with weight B to the accumulator * The accumulator is rescaled by W, added to block 0, and overwritten * tmpmem[2] += B * tmpmem[i] * tmpmem[d] = tmpmem[0] + W*tmpmem[2]; */ __global__ void cukern_SolveRK_final(double *tmpmem, int i, double B, double W, unsigned long partNumel) { int x = threadIdx.x + blockIdx.x*blockDim.x; tmpmem += x; while(x < partNumel) { /* compute Y1 value */ tmpmem[2*FLUID_SLABPITCH] = tmpmem[0] + W*(tmpmem[2*FLUID_SLABPITCH] + B*tmpmem[i*FLUID_SLABPITCH]); x += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } /* This function computes an explicit RK intermediate that takes one F eval * the new stage is computed using * tmpmem[d] = tmpmem[0] + (A*tmpmem[i] * and the accumulator goes as * tmpmem[2] += B * tmpmem[i1] */ template <bool resetAccumulator> __global__ void cukern_SolveRK_single(double *tmpmem, int d, double A, int i, double B, unsigned long partNumel) { int x = threadIdx.x + blockIdx.x*blockDim.x; tmpmem += x; while(x < partNumel) { /* compute stage value */ tmpmem[d*FLUID_SLABPITCH] = tmpmem[0] + A*tmpmem[i*FLUID_SLABPITCH]; /* compute accumulator */ if(resetAccumulator) { tmpmem[2*FLUID_SLABPITCH] = B * tmpmem[i*FLUID_SLABPITCH]; } else { tmpmem[2*FLUID_SLABPITCH] += B * tmpmem[i*FLUID_SLABPITCH]; } x += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } /* This function computes an explicit RK intermediate that takes two F evals * the new stage is computed using * tmpmem[d] = tmpmem[0] + (F0 * tmpmem[i0] + F1 * tmpmem[i1]); * and the accumulator goes as * tmpmem[2] += B * tmpmem[i1] * (Implicitly, F1 at i1 is the new F eval to be accumulated) */ template <bool resetAccumulator> __global__ void cukern_SolveRK_double(double *tmpmem, int d, double F[2], int i[2], double B, unsigned long partNumel) { int x = threadIdx.x + blockIdx.x*blockDim.x; tmpmem += x; while(x < partNumel) { /* compute stage value */ tmpmem[d*FLUID_SLABPITCH] = tmpmem[0] + (F[0]*tmpmem[i[0]*FLUID_SLABPITCH] + F[1]*tmpmem[i[1]*FLUID_SLABPITCH]); /* compute accumulator */ if(resetAccumulator) { tmpmem[2*FLUID_SLABPITCH] = B * tmpmem[i[1]*FLUID_SLABPITCH]; } else { tmpmem[2*FLUID_SLABPITCH] += B * tmpmem[i[1]*FLUID_SLABPITCH]; } x += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } /* This function computes an explicit RK intermediate that takes two F evals * the new stage is computed using * tmpmem[d] = tmpmem[0] + sum_{i=0}^{i=2} (F[i] * tmpmem[idx[i]]); * and the accumulator goes as * tmpmem[2] += B * tmpmem[i[2]] * (Implicitly, F1 at i[2] is the new F eval to be accumulated) */ template <bool resetAccumulator> __global__ void cukern_SolveRK_triple(double *tmpmem, int d, double F[3], int i[3], double B, unsigned long partNumel) { int x = threadIdx.x + blockIdx.x*blockDim.x; tmpmem += x; while(x < partNumel) { /* compute stage value */ tmpmem[d*FLUID_SLABPITCH] = tmpmem[0] + (F[0]*tmpmem[i[0]*FLUID_SLABPITCH] + F[1]*tmpmem[i[1]*FLUID_SLABPITCH] + F[2]*tmpmem[i[2]*FLUID_SLABPITCH]); /* compute accumulator */ if(resetAccumulator) { tmpmem[2*FLUID_SLABPITCH] = B * tmpmem[i[2]*FLUID_SLABPITCH]; } else { tmpmem[2*FLUID_SLABPITCH] += B * tmpmem[i[2]*FLUID_SLABPITCH]; } x += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } /* From the initial momentum difference from *gas and *dust, computes the change in their momentum * densities to reach momentum difference *dp, given the relative fraction of acceleration * experienced by the gas and dust particles, and applies total energy conservation to solve * the gas/dust energy densities */ __global__ void cukern_applyFinalDeltaV(double *g, double *d, double *dv_final, unsigned long partNumel) { int x = threadIdx.x + blockIdx.x*blockDim.x; g += x; d += x; dv_final += x; double vstick[3]; double dvhat[3]; double rhog, rhod; double a, b, c, p1, p2; double dustmom, dustmomfin; while(x < partNumel) { rhog = g[0]; rhod = d[0]; // convert rho & momentum into CoM velocity & differential velocity p1 = g[2*FLUID_SLABPITCH]; p2 = d[2*FLUID_SLABPITCH]; vstick[0] = (p1+p2)/(rhog+rhod); dvhat[0] = p1/rhog - p2/rhod; p1 = g[3*FLUID_SLABPITCH]; p2 = d[3*FLUID_SLABPITCH]; vstick[1] = (p1+p2)/(rhog+rhod); dvhat[1] = p1/rhog - p2/rhod; p1 = g[4*FLUID_SLABPITCH]; p2 = d[4*FLUID_SLABPITCH]; vstick[2] = (p1+p2)/(rhog+rhod); dvhat[2] = p1/rhog - p2/rhod; // Compute differential velocity unit vector a = dv_final[2*FLUID_SLABPITCH] / sqrt(dvhat[0]*dvhat[0] + dvhat[1]*dvhat[1]+dvhat[2]*dvhat[2]); dvhat[0] *= a; dvhat[1] *= a; dvhat[2] *= a; // Reduced mass proves useful b = rhog*rhod/(rhog+rhod); // Accumulate initial & final dust momenta for exact energy conservation; // Convert CoM and decayed differential velocities back to momenta densities dustmom = d[2*FLUID_SLABPITCH]*d[2*FLUID_SLABPITCH]; g[2*FLUID_SLABPITCH] = rhog*vstick[0] + dvhat[0]*b; d[2*FLUID_SLABPITCH] = c = rhod*vstick[0] - dvhat[0]*b; dustmomfin = c*c; dustmom += d[3*FLUID_SLABPITCH]*d[3*FLUID_SLABPITCH]; g[3*FLUID_SLABPITCH] = rhog*vstick[1] + dvhat[1]*b; d[3*FLUID_SLABPITCH] = c = rhod*vstick[1] - dvhat[1]*b; dustmomfin += c*c; dustmom += d[4*FLUID_SLABPITCH]*d[4*FLUID_SLABPITCH]; g[4*FLUID_SLABPITCH] = rhog*vstick[2] + dvhat[2]*b; d[4*FLUID_SLABPITCH] = c = rhod*vstick[2] - dvhat[2]*b; dustmomfin += c*c; // Conserve total energy to machine precision // d/dt (KE_gas + Eint_gas + KE_dust) = 0 // d/dt (KE_gas + Eint_gas) = -d/dt(KE_dust) // Etot_gas(after) - Etot_gas(before) = -(KE_dust(after)-KE_dust(before)) // -> Etot_gas += KE_dust(ini) - KE_dust(fin) g[FLUID_SLABPITCH] += .5*(dustmom - dustmomfin)/d[0]; // FIXME - this is a hack to preserve dust "pressure" because I lack an inviscid // FIXME - Burgers solver or sticky-dust Godunov routine. So simply set it to a // FIXME - uniform low temperature d[FLUID_SLABPITCH] = .5*dustmomfin/d[0] + 1e-4 * d[0]; x += blockDim.x*gridDim.x; g += blockDim.x*gridDim.x; d += blockDim.x*gridDim.x; dv_final += blockDim.x*gridDim.x; } } /* (2) [u_hf, |dv_hf|] = cukern_ExpMidpoint_partA(gas_state, dust_state, k_0, P_x, P_y, P_z) * compute time-reversed elements of dv again (memory & memory BW precious, v_i = (p_i - 2 P_i t)/rho cheap as dirt) * solve y_i' = -k_0 y_i + a_i, a_i = - P_i / rho_gas per vector element * y(t) = a_i / k_0 + (y_i - a_i/k_0) exp(-k_0 t) * this is an L-stable method for the drag equation * Our only interest in solving this is to re-evaluate the linear operation matrix at t_half * Linear matrix is diag([k_n k_n k_n]) -> require only |dv_half| to re-call gasDustDrag */ __global__ void cukern_ExpMidpoint_partA(double *gas, double *dust, double *tmpmem, double t, unsigned long partNumel) { int x = threadIdx.x + blockIdx.x*blockDim.x; gas += x; dust += x; tmpmem += x; double rhoginv; // 1/rho_gas double dv_i; // element of delta-v double k; // drag eigenvalue double a0; // element of accel = gradient(P)/rho_gas double dv_t; // updated delta v. not sure if needed independently... double dvsq; // accumulated (delta-v)^2 double duint; // accumulated drag heating while(x < partNumel) { // load k, solve driven linear system k = tmpmem[0]; // load & compute time-reversed delta-vx rhoginv= 1.0 / gas[0]; a0 = tmpmem[2*FLUID_SLABPITCH]; #ifdef EXPO_DOTR dv_i = (gas[2*FLUID_SLABPITCH] + t*a0)*rhoginv - dust[2*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[2*FLUID_SLABPITCH] + t*a0)*rhoginv - dust[2*FLUID_SLABPITCH]/dust[0]; #endif // compute decay of this value a0 *= rhoginv / k; dv_t = a0 + (dv_i - a0)*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation // accumulate new delta-v^2 dvsq = dv_t*dv_t; // accumulate drag heating duint = k*a0*a0*t - 2*a0*(dv_i - a0)*expm1(-k*t) - (dv_i - a0)*(dv_i - a0)*expm1(-2*k*t); // Repeat the above for the other two components a0 = tmpmem[3*FLUID_SLABPITCH]; #ifdef EXPO_DOTR dv_i = (gas[3*FLUID_SLABPITCH] + t*a0)*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[3*FLUID_SLABPITCH])*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; #endif a0 *= rhoginv/k; dv_t = a0 + (dv_i - a0)*exp(-t*k); dvsq += dv_t*dv_t; duint += k*a0*a0*t - 2*a0*(dv_i - a0)*expm1(-k*t) - (dv_i - a0)*(dv_i - a0)*expm1(-2*k*t); a0 = tmpmem[4*FLUID_SLABPITCH]; #ifdef EXPO_DOTR dv_i = (gas[4*FLUID_SLABPITCH] + t*a0)*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[4*FLUID_SLABPITCH])*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; #endif a0 *= rhoginv/k; dv_t = a0 + (dv_i - a0)*exp(-t*k); dvsq += dv_t*dv_t; duint += k*a0*a0*t - 2*a0*(dv_i - a0)*expm1(-k*t) - (dv_i - a0)*(dv_i - a0)*expm1(-2*k*t); tmpmem[0] = sqrt(dvsq); // overwrite in place tmpmem[FLUID_SLABPITCH] += GAMMAM1 * duint * rhoginv; // advance ptrs x += blockDim.x*gridDim.x; gas += blockDim.x*gridDim.x; dust += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } /*(5) [(gas_state), (dust_state)] = exponentialMidpt(gas_state, dust_state, k_hf, P_x, P_y, P_z) * compute time-reversed elements of dv a 3rd time (memory & memory BW precious, v_i = (p_i - 2 P_i t)/rho cheap as dirt) * advance to drag-applied dv values dv_i <- -P_i/(k_hf rho) + (dv_i + P_i/(k_hf rho))*exp(-k_hf t) * compute new u_specific? or let d/dt(Etotal) = 0 do the job? does that still work? * overwrite gas_state/dust_state using updated values * ... */ __global__ void cukern_ExpMidpoint_partB(double *gas, double *dust, double t, double *tmpmem) { int x = threadIdx.x + blockIdx.x*blockDim.x; gas += x; dust += x; tmpmem += x; double rhoginv; // 1/rho_gas // double rhodinv; // 1/rho_dust double dv_i; // element of delta-v double k; // drag eigenvalue double dpdt; // element of accel = gradient(P)/rho_gas double dv_t; // updated delta v. not sure if needed independently... double pdustsq; // use to track accumulated transfer of total energy double vstick; // barycentric velocity of gas-dust system double mu; // reduced mass double q; // scratchpad variable while(x < FLUID_SLABPITCH) { // load & compute time-reversed delta-vx and stick velocity rhoginv = 1.0 / gas[0]; mu = gas[0]*dust[0]/(gas[0]+dust[0]); pdustsq = -dust[2*FLUID_SLABPITCH] * dust[2*FLUID_SLABPITCH]; vstick = (gas[2*FLUID_SLABPITCH]+dust[2*FLUID_SLABPITCH]) / (gas[0] + dust[0]); #ifdef ACCOUNT_GRADP dpdt = -tmpmem[2*FLUID_SLABPITCH]; #else dpdt = 0; #endif #ifdef EXPO_DOTR dv_i = (gas[2*FLUID_SLABPITCH] - t*dpdt)*rhoginv - dust[2*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[2*FLUID_SLABPITCH])*rhoginv - dust[2*FLUID_SLABPITCH]/dust[0]; #endif // load k, solve driven linear system k = tmpmem[0]; dpdt *= mu*rhoginv; dv_t = dpdt/k + (dv_i - dpdt/k)*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation // recalculate new differential velocities gas[2*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[2*FLUID_SLABPITCH] = q = dust[0]*vstick - dv_t*mu; // accumulate change in dust kinetic energy pdustsq += q*q; // // do Y direction pdustsq -= dust[3*FLUID_SLABPITCH]*dust[3*FLUID_SLABPITCH]; vstick = (gas[3*FLUID_SLABPITCH]+dust[3*FLUID_SLABPITCH]) / (gas[0] + dust[0]); #ifdef ACCOUNT_GRADP dpdt = -tmpmem[3*FLUID_SLABPITCH]; #endif #ifdef EXPO_DOTR dv_i = (gas[3*FLUID_SLABPITCH] - t*dpdt)*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[3*FLUID_SLABPITCH])*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; #endif dpdt *= mu*rhoginv; dv_t = dpdt/k + (dv_i - dpdt/k)*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation gas[3*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[3*FLUID_SLABPITCH]= q = dust[0]*vstick - dv_t*mu; pdustsq += q*q; // do Z direction pdustsq -= dust[4*FLUID_SLABPITCH]*dust[4*FLUID_SLABPITCH]; vstick = (gas[4*FLUID_SLABPITCH]+dust[4*FLUID_SLABPITCH]) / (gas[0] + dust[0]); #ifdef ACCOUNT_GRADP dpdt = -tmpmem[4*FLUID_SLABPITCH]; #endif #ifdef EXPO_DOTR dv_i = (gas[4*FLUID_SLABPITCH] - t*dpdt)*rhoginv - dust[4*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[4*FLUID_SLABPITCH])*rhoginv - dust[4*FLUID_SLABPITCH]/dust[0]; #endif dpdt *= mu*rhoginv; dv_t = dpdt/k + (dv_i - dpdt/k)*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation gas[4*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[4*FLUID_SLABPITCH] = q = dust[0]*vstick - dv_t*mu; pdustsq += q*q; // From conservation of total energy we have that the gas total energy decreases by whatever // amount the dust kinetic energy rises; Under (M_dust >> M_atom) the gas gets ~100% of heating gas[FLUID_SLABPITCH] -= .5*pdustsq / dust[0]; // advance ptrs x += blockDim.x*gridDim.x; gas += blockDim.x*gridDim.x; dust += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } /*(5) [(gas_state), (dust_state)] = cukern_ETD1RK(gas_state, dust_state, k_hf, P_x, P_y, P_z) * compute time-reversed elements of dv a 3rd time (memory & memory BW precious, v_i = (p_i - 2 P_i t)/rho cheap as dirt) * advance to drag-applied dv values dv_i <- -P_i/(k_hf rho) + (dv_i + P_i/(k_hf rho))*exp(-k_hf t) * compute new u_specific? or let d/dt(Etotal) = 0 do the job? does that still work? * overwrite gas_state/dust_state using updated values * ... */ __global__ void cukern_ETDRK1(double *gas, double *dust, double t, double *tmpmem) { int x = threadIdx.x + blockIdx.x*blockDim.x; gas += x; dust += x; tmpmem += x; double rhoginv; // 1/rho_gas // double rhodinv; // 1/rho_dust double dv_i; // element of delta-v double k; // drag eigenvalue double dpdt; // element of accel = gradient(P)/rho_gas double dv_t; // updated delta v. not sure if needed independently... double pdustsq; // use to track accumulated transfer of total energy double vstick; // barycentric velocity of gas-dust system double mu; // reduced mass double q; // scratchpad variable while(x < FLUID_SLABPITCH) { // load & compute time-reversed delta-vx and stick velocity rhoginv = 1.0 / gas[0]; mu = gas[0]*dust[0]/(gas[0]+dust[0]); pdustsq = -dust[2*FLUID_SLABPITCH] * dust[2*FLUID_SLABPITCH]; vstick = (gas[2*FLUID_SLABPITCH]+dust[2*FLUID_SLABPITCH]) / (gas[0] + dust[0]); #ifdef ACCOUNT_GRADP dpdt = -tmpmem[2*FLUID_SLABPITCH]; #else dpdt = 0; #endif #ifdef EXPO_DOTR dv_i = (gas[2*FLUID_SLABPITCH] - t*dpdt)*rhoginv - dust[2*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[2*FLUID_SLABPITCH])*rhoginv - dust[2*FLUID_SLABPITCH]/dust[0]; #endif // load k, solve driven linear system k = tmpmem[0]; dpdt *= mu*rhoginv; dv_t = dpdt/k + (dv_i - dpdt/k)*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation // recalculate new differential velocities gas[2*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[2*FLUID_SLABPITCH] = q = dust[0]*vstick - dv_t*mu; // accumulate change in dust kinetic energy pdustsq += q*q; // // do Y direction pdustsq -= dust[3*FLUID_SLABPITCH]*dust[3*FLUID_SLABPITCH]; vstick = (gas[3*FLUID_SLABPITCH]+dust[3*FLUID_SLABPITCH]) / (gas[0] + dust[0]); #ifdef ACCOUNT_GRADP dpdt = -tmpmem[3*FLUID_SLABPITCH]; #endif #ifdef EXPO_DOTR dv_i = (gas[3*FLUID_SLABPITCH] - t*dpdt)*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[3*FLUID_SLABPITCH])*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; #endif dpdt *= mu*rhoginv; dv_t = dpdt/k + (dv_i - dpdt/k)*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation gas[3*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[3*FLUID_SLABPITCH]= q = dust[0]*vstick - dv_t*mu; pdustsq += q*q; // do Z direction pdustsq -= dust[4*FLUID_SLABPITCH]*dust[4*FLUID_SLABPITCH]; vstick = (gas[4*FLUID_SLABPITCH]+dust[4*FLUID_SLABPITCH]) / (gas[0] + dust[0]); #ifdef ACCOUNT_GRADP dpdt = -tmpmem[4*FLUID_SLABPITCH]; #endif #ifdef EXPO_DOTR dv_i = (gas[4*FLUID_SLABPITCH] - t*dpdt)*rhoginv - dust[4*FLUID_SLABPITCH]/dust[0]; #else dv_i = (gas[4*FLUID_SLABPITCH])*rhoginv - dust[4*FLUID_SLABPITCH]/dust[0]; #endif dpdt *= mu*rhoginv; dv_t = dpdt/k + (dv_i - dpdt/k)*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation gas[4*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[4*FLUID_SLABPITCH] = q = dust[0]*vstick - dv_t*mu; pdustsq += q*q; // From conservation of total energy we have that the gas total energy decreases by whatever // amount the dust kinetic energy rises; Under (M_dust >> M_atom) the gas gets ~100% of heating gas[FLUID_SLABPITCH] -= .5*pdustsq / dust[0]; // advance ptrs x += blockDim.x*gridDim.x; gas += blockDim.x*gridDim.x; dust += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } /* Assuming the temp registers are preloaded with * [dv_0 u_0 Px Py Pz] * First call drag solve to get * [dv_0 u_0 Px Py Pz k_0] * Then solve ETD1RK to get dv_1, u_1: * [dv_1 u_1 Px Py Pz k_0] * call drag solve with accumulate=yes set to get * [dv_1 u_1 Px Py Pz (k_0+k_1)] * solve the log integral to find y_n+1 */ template <int order> __global__ void cukern_LogTrapSolve(double *gas, double *dust, double t, double *tmpmem, int partNumel) { double rhoginv; // 1/rho_gas // double rhodinv; // 1/rho_dust double dv_i; // element of delta-v double k; // drag eigenvalue double pdustsq; // element of accel = gradient(P)/rho_gas double dv_t; // updated delta v. not sure if needed independently... double dvsq; // use to track accumulated transfer of total energy double vstick; // barycentric velocity of gas-dust system double mu; // reduced mass double q; // scratchpad variable double duint; int x = threadIdx.x + blockIdx.x*blockDim.x; gas += x; dust += x; tmpmem += x; // Use ETDRK1 to approximate y_1 to first order while(x < FLUID_SLABPITCH) { mu = dust[0]/(gas[0]+dust[0]); // reduced density is needed more or less immediately /* Assuming the temp registers are preloaded with * [dv_0 u_0] */ // call drag eigenvalue solver. cukern_GasDustDrag_GeneralLinearCore<true>(gas, dust, tmpmem, 0, 2, partNumel); /* temp contents: * [dv_0 u_0 k_0] */ k = tmpmem[2*FLUID_SLABPITCH]; dv_i = tmpmem[0]; dv_t = dv_i*exp(-t*k); #ifdef THREAD0_PRINTS_DBG if(threadIdx.x == 0) { printf("b=%i,t=%i,line %i: first point: initial dv=%le, k = %le, t=%le, new dv=%le\n", __LINE__, blockIdx.x, threadIdx.x, dv_i, k, t, dv_t); } #endif // accumulate new delta-v^2 and drag heating effect dvsq = dv_t*dv_t; duint = -.5*dv_i*dv_i*mu*expm1(-2*k*t); tmpmem[0] = sqrt(dvsq); // Add the dissipated relative KE before reassessing the drag coefficient tmpmem[FLUID_SLABPITCH] += duint; /* temp contents: * [dv_1 u_1 k_0] */ // Solve drag eigenvalue k1 and accumulate in register 2 cukern_GasDustDrag_GeneralLinearCore<false>(gas, dust, tmpmem, 0, 2, partNumel); #ifdef THREAD0_PRINTS_DBG if(threadIdx.x == 0) { printf("b=%i,t=%i,line %i: Second k solve: k = %le\n", __LINE__, blockIdx.x, threadIdx.x, tmpmem[2*FLUID_SLABPITCH]); } #endif /* temp contents: * [dv_1 u_1 (k_0+k_1)] */ // Now the cutesy tricksy bit: // Cleverly reverse our way to t=1/2 and compute k just once more... // If this is set to zero, our calculation is exponential trapezoid // and has stiff time order two if(order==3) { // If one, we perform a cubic algorithmic fit and acheive third stiff order // with outrageous accuracy // Step one, back half way up: reset original internal U tmpmem[FLUID_SLABPITCH] -= duint; // take a halfstep k = .25*(tmpmem[2*FLUID_SLABPITCH]); dv_t = dv_i*exp(-t*k); // accumulate new delta-v^2 and drag heating effect dvsq = dv_t*dv_t; duint = -0.5*dv_i*dv_i*mu*expm1(-2*k*t); // #ifdef THREAD0_PRINTS_DBG if(threadIdx.x == 0) { printf("b=%i,t=%i, line %i: halfstep: k=%le, dv_t = %le\n", __LINE__, blockIdx.x, threadIdx.x, k, dv_t); } #endif // write these into storage tmpmem[0] = sqrt(dvsq); tmpmem[FLUID_SLABPITCH] += duint; // compute the explicit midpoint value of k cukern_GasDustDrag_GeneralLinearCore<true>(gas, dust, tmpmem, 0, 0, partNumel); // Apply Richardson extrapolation to find the new K value k = (0.16666666666666666667 *tmpmem[2*FLUID_SLABPITCH] + 0.66666666666666666667*tmpmem[0]); } else { k = .5*tmpmem[2*FLUID_SLABPITCH]; } // The clever weighting of k above yields in it a value which will take us to the point that the // complex actual drag ends up at, to third order, when we do exp(-k t) // horray for path independent work integrals! mu = mu * gas[0]; // mu was abused to compute the heating integral above // load & compute time-reversed delta-vx and stick velocity rhoginv = 1.0 / gas[0]; pdustsq = -dust[2*FLUID_SLABPITCH] * dust[2*FLUID_SLABPITCH]; vstick = (gas[2*FLUID_SLABPITCH]+dust[2*FLUID_SLABPITCH]) / (gas[0] + dust[0]); dv_i = (gas[2*FLUID_SLABPITCH])*rhoginv - dust[2*FLUID_SLABPITCH]/dust[0]; dv_t = dv_i*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation #ifdef THREAD0_PRINTS_DBG if(threadIdx.x == 0) { printf("t=%i,b=%i,line %i: final solve reporting: dv_i = %le, dt=%le, k = %le, dv_f = %le\n", threadIdx.x, blockIdx.x, __LINE__, dv_i, t, k, dv_t); } #endif // recalculate new differential velocities gas[2*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[2*FLUID_SLABPITCH] = q = dust[0]*vstick - dv_t*mu; // accumulate change in dust kinetic energy pdustsq += q*q; // // do Y direction pdustsq -= dust[3*FLUID_SLABPITCH]*dust[3*FLUID_SLABPITCH]; vstick = (gas[3*FLUID_SLABPITCH]+dust[3*FLUID_SLABPITCH]) / (gas[0] + dust[0]); dv_i = (gas[3*FLUID_SLABPITCH])*rhoginv - dust[3*FLUID_SLABPITCH]/dust[0]; dv_t = dv_i*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation gas[3*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[3*FLUID_SLABPITCH]= q = dust[0]*vstick - dv_t*mu; pdustsq += q*q; // do Z direction pdustsq -= dust[4*FLUID_SLABPITCH]*dust[4*FLUID_SLABPITCH]; vstick = (gas[4*FLUID_SLABPITCH]+dust[4*FLUID_SLABPITCH]) / (gas[0] + dust[0]); dv_i = (gas[4*FLUID_SLABPITCH])*rhoginv - dust[4*FLUID_SLABPITCH]/dust[0]; dv_t = dv_i*exp(-t*k); // I assume it will auto-optimize this into one transcendental evaluation gas[4*FLUID_SLABPITCH] = gas[0]*vstick + dv_t*mu; dust[4*FLUID_SLABPITCH] = q = dust[0]*vstick - dv_t*mu; pdustsq += q*q; // From conservation of total energy we have that the gas total energy decreases by whatever // amount the dust kinetic energy rises; Under (M_dust >> M_atom) the gas gets ~100% of heating gas[FLUID_SLABPITCH] -= .5*pdustsq / dust[0]; // advance ptrs x += blockDim.x*gridDim.x; gas += blockDim.x*gridDim.x; dust += blockDim.x*gridDim.x; tmpmem += blockDim.x*gridDim.x; } } // This awful wad of mutated copypasta from the cudaGradientKernels.cu file provides the // initial conditions for the ERK2 integrator to run; It computes five output values from nine // input values __global__ void writeScalarToVector(double *x, long numel, double f); __global__ void cukern_prepareForERK_h0(double *gas, double *dust, double *outputs, long numel); // compute grad(phi) in XYZ or R-Theta-Z with 2nd or 4th order accuracy template <geometryType_t coords> __global__ void cukern_prepareForERK3D_h2(double *gas, double *dust, double *em, int3 arraysize); template <geometryType_t coords> __global__ void cukern_prepareForERK3D_h4_partone(double *phi, double *fx, double *fy, int3 arraysize); __global__ void cukern_prepareForERK3D_h4_parttwo(double *phi, double *fz, int3 arraysize); // compute grad(phi) in X-Y or R-Theta with 2nd or 4th order accuracy template <geometryType_t coords> __global__ void cukern_prepareForERK2D_h2(double *gas, double *dust, double *em, int3 arraysize); template <geometryType_t coords> __global__ void cukern_prepareForERK2D_h4(double *phi, double *fx, double *fy, int3 arraysize); // Compute grad(phi) in X-Z or R-Z with 2nd or 4th order accuracy __global__ void cukern_prepareForERKRZ_h2(double *gas, double *dust, double *em, int3 arraysize); __global__ void cukern_prepareForERKRZ_h4(double *phi, double *fx, double *fz, int3 arraysize); #define GRADBLOCKX 16 #define GRADBLOCKY 16 // scalingParameter / 2h or /12h depending on spatial order of scheme #define LAMX devLambda[0] #define LAMY devLambda[1] #define LAMZ devLambda[2] #define RINNER devLambda[7] #define DELTAR devLambda[8] /* Given the gas (5xMGArray), dust (5xMGArray), and temporary memory (5 regs) pointers, along with * geometry information, computes five outputs into the 5 temp memory slabs: [|dv_timereversed|, uinternal, dP/dx, dP/dy, dP/dz] * for this call, spaceOrder must be 2 (or error) and scalingParameter should be 1 (or the math is wrong). */ int prepareForExpMethod(MGArray *gas, MGArray *dust, MGArray *tempMem, GeometryParams geom, int spaceOrder, double scalingParameter) { dim3 gridsize, blocksize; double lambda[11]; int i; int worked; int sub[6]; double *dx = &geom.h[0]; if(spaceOrder == 4) { lambda[0] = scalingParameter/(12.0*dx[0]); lambda[1] = scalingParameter/(12.0*dx[1]); lambda[2] = scalingParameter/(12.0*dx[2]); } else if(spaceOrder == 2) { lambda[0] = scalingParameter/(2.0*dx[0]); lambda[1] = scalingParameter/(2.0*dx[1]); lambda[2] = scalingParameter/(2.0*dx[2]); } else if(spaceOrder == 0) { lambda[0] = scalingParameter; } lambda[7] = geom.Rinner; // This is actually overwritten per partition below lambda[8] = dx[1]; int isThreeD = (gas->dim[2] > 1); int isRZ = (gas->dim[2] > 1) & (gas->dim[1] == 1); if(spaceOrder > 0) { for(i = 0; i < gas->nGPUs; i++) { cudaSetDevice(gas->deviceID[i]); calcPartitionExtent(gas, i, &sub[0]); lambda[7] = geom.Rinner + dx[0] * sub[0]; // Innermost cell coord may change per-partition cudaMemcpyToSymbol((const void *)devLambda, lambda, 11*sizeof(double), 0, cudaMemcpyHostToDevice); worked = CHECK_CUDA_ERROR("cudaMemcpyToSymbol"); if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) break; } if(worked != SUCCESSFUL) return worked; } double *gasPtr; double *dustPtr; double *tmpPtr; // Iterate over all partitions, and here... we... go! for(i = 0; i < gas->nGPUs; i++) { cudaSetDevice(gas->deviceID[i]); worked = CHECK_CUDA_ERROR("cudaSetDevice"); if(worked != SUCCESSFUL) break; calcPartitionExtent(gas, i, sub); int3 arraysize; arraysize.x = sub[3]; arraysize.y = sub[4]; arraysize.z = sub[5]; dim3 blocksize(GRADBLOCKX, GRADBLOCKY, 1); if(spaceOrder > 0) { gridsize.x = arraysize.x / (blocksize.x - spaceOrder); gridsize.x += ((blocksize.x-spaceOrder) * gridsize.x < arraysize.x) * 1 ; if(isRZ) { gridsize.y = arraysize.z / (blocksize.y - spaceOrder); gridsize.y += ((blocksize.y-spaceOrder) * gridsize.y < arraysize.z); } else { gridsize.y = arraysize.y / (blocksize.y - spaceOrder); gridsize.y += ((blocksize.y-spaceOrder) * gridsize.y < arraysize.y) * 1; } gridsize.z = 1; } else { gridsize.x = 256; gridsize.y = gridsize.z = 1; blocksize.x = 32; blocksize.y = blocksize.z = 1; } gasPtr = gas->devicePtr[i]; dustPtr = dust->devicePtr[i]; tmpPtr = tempMem->devicePtr[i]; long int ne = (long)sub[3] * (long)sub[4] * (long)sub[5]; switch(spaceOrder) { case 0: cukern_prepareForERK_h0<<<gridsize, blocksize>>>(gasPtr, dustPtr, tmpPtr, ne); break; case 2: if(isThreeD) { if(isRZ) { cukern_prepareForERKRZ_h2<<<gridsize, blocksize>>>(gasPtr, dustPtr, tmpPtr, arraysize); } else { if(geom.shape == SQUARE) { cukern_prepareForERK3D_h2<SQUARE><<<gridsize, blocksize>>> (gasPtr, dustPtr, tmpPtr, arraysize); } if(geom.shape == CYLINDRICAL) { cukern_prepareForERK3D_h2<CYLINDRICAL><<<gridsize, blocksize>>> (gasPtr, dustPtr, tmpPtr, arraysize); } } } else { if(geom.shape == SQUARE) { cukern_prepareForERK2D_h2<SQUARE><<<gridsize, blocksize>>>(gasPtr, dustPtr, tmpPtr, arraysize); } if(geom.shape == CYLINDRICAL) { cukern_prepareForERK2D_h2<CYLINDRICAL><<<gridsize, blocksize>>>(gasPtr, dustPtr, tmpPtr, arraysize); } } break; /*case 4: if(isThreeD) { if(isRZ) { cukern_prepareForERKRZ_h4<<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr + 2*gas->partNumel[i], arraysize); writeScalarToVector<<<32, 256>>>(tmpPtr + slabsize, gas->partNumel[i], 0.0); } else { if(geom.shape == SQUARE) { cukern_prepareForERK3D_h4_partone<SQUARE><<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr+ slabsize, arraysize); cukern_prepareForERK3D_h4_parttwo<<<gridsize, blocksize>>>(gasPtr, tmpPtr+ slabsize*2, arraysize); } if(geom.shape == CYLINDRICAL) { cukern_prepareForERK3D_h4_partone<CYLINDRICAL><<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr+ slabsize, arraysize); cukern_prepareForERK3D_h4_parttwo<<<gridsize, blocksize>>>(gasPtr, tmpPtr+ slabsize*2, arraysize); } } } else { if(geom.shape == SQUARE) { cukern_prepareForERK2D_h4<SQUARE><<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr+ slabsize, arraysize); } if(geom.shape == CYLINDRICAL) { cukern_prepareForERK2D_h4<CYLINDRICAL><<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr+ slabsize, arraysize); } writeScalarToVector<<<32, 256>>>(tmpPtr+2*gas->partNumel[i], gas->partNumel[i], 0.0); } break;*/ default: PRINT_FAULT_HEADER; printf("Was passed spatial order parameter of %i, must be passed 0 or 2 (2nd order)\n", spaceOrder); PRINT_FAULT_FOOTER; return ERROR_INVALID_ARGS; } worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_prepareForERK"); if(worked != SUCCESSFUL) break; } if(worked != SUCCESSFUL) return worked; // FIXME this needs to either understand slabs, or we need to fetch 3 slab ptrs into an array & pass it instead // worked = MGA_exchangeLocalHalos(gradient, 5); // need to? if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked; return CHECK_IMOGEN_ERROR(worked); } // Needed with the gradient calculators in 2D because they leave the empty directions uninitialized // Vomits the value f into array x, from x[0] to x[numel-1] __global__ void writeScalarToVector(double *x, long numel, double f) { long a = threadIdx.x + blockDim.x*blockIdx.x; for(; a < numel; a+= blockDim.x*gridDim.x) { x[a] = f; } } __device__ double gas2press(double *g) { return (g[FLUID_SLABPITCH]-.5*(g[2*FLUID_SLABPITCH]*g[2*FLUID_SLABPITCH]+g[3*FLUID_SLABPITCH]*g[3*FLUID_SLABPITCH]+g[4*FLUID_SLABPITCH]*g[4*FLUID_SLABPITCH])/g[0]); } /* Prepares for an exponential type method when no spatial differencing is involved (i.e. all cases now * since it is known that the time reversal idea doesn't work). This means that output is a simple * state function of local input so we can just treat the arrays as 1D vectors regardless of actual geometry */ __global__ void cukern_prepareForERK_h0(double *gas, double *dust, double *outputs, long numel) { int nx = blockDim.x * gridDim.x; int myX = threadIdx.x + blockDim.x * blockIdx.x; // x = thread x + block x if(myX >= numel) return; double dv, dvsq, press; for(; myX < numel; myX += nx) { dv = (gas[myX+2*FLUID_SLABPITCH])/gas[myX] - dust[myX+2*FLUID_SLABPITCH]/dust[myX]; dvsq = dv*dv; dv = (gas[myX+3*FLUID_SLABPITCH])/gas[myX] - dust[myX+3*FLUID_SLABPITCH]/dust[myX]; dvsq += dv*dv; dv = (gas[myX+4*FLUID_SLABPITCH])/gas[myX] - dust[myX+4*FLUID_SLABPITCH]/dust[myX]; dvsq += dv*dv; press = gas2press(gas + myX); outputs[myX ] = sqrt(dvsq); // output initial delta-v outputs[myX + FLUID_SLABPITCH] = press / gas[myX]; // = P/rho = specific internal energy density } } /* Algorithm: * [|dv_tr|, u_0, P_x, P_y, P_z] = exponentialSetup(gas_state, dust_state) * 5 output registers * may need slope limiter on gradient calculation? */ template <geometryType_t coords> __global__ void cukern_prepareForERK3D_h2(double *gas, double *dust, double *em, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-2)*blockIdx.x - 1; int myY = threadIdx.y + (GRADBLOCKY-2)*blockIdx.y - 1; if((myX > arraysize.x) || (myY > arraysize.y)) return; bool IWrite = (threadIdx.x > 0) && (threadIdx.x < (GRADBLOCKX-1)) && (threadIdx.y > 0) && (threadIdx.y < (GRADBLOCKY-1)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.y) % arraysize.y; int globAddr = myX + arraysize.x*myY; double deltaP; // Store derivative of phi in one direction double dv, dvsq; __shared__ double phiA[GRADBLOCKX*GRADBLOCKY]; __shared__ double phiB[GRADBLOCKX*GRADBLOCKY]; __shared__ double phiC[GRADBLOCKX*GRADBLOCKY]; double *U; double *V; double *W; double *temp; U = phiA; V = phiB; W = phiC; // compute P on lower plane U[myLocAddr] = gas2press(gas + (globAddr + arraysize.x*arraysize.y*(arraysize.z-1))); V[myLocAddr] = gas2press(gas + globAddr); __syncthreads(); int z; int deltaz = arraysize.x*arraysize.y; for(z = 0; z < arraysize.z; z++) { if(z >= arraysize.z - 1) deltaz = - arraysize.x*arraysize.y*(arraysize.z-1); if(IWrite) { deltaP = LAMX*(V[myLocAddr+1]-V[myLocAddr-1]); em[globAddr + 2*FLUID_SLABPITCH] = deltaP; // need time-reversed dv = (vgas - vdust) + t*(deltaP / rho) // = ((pgas + t deltaP)/rhogas - pdust/rhodust #ifdef EXPO_DOTR dv = (gas[globAddr+2*FLUID_SLABPITCH]+dragparams[7]*deltaP)/gas[globAddr] - dust[globAddr+2*FLUID_SLABPITCH]/dust[globAddr]; #else dv = (gas[globAddr+2*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+2*FLUID_SLABPITCH]/dust[globAddr]; #endif dvsq = dv*dv; // accumulate |dv_tr| } if(IWrite) { if(coords == SQUARE) { deltaP = LAMY*(V[myLocAddr+GRADBLOCKX]-V[myLocAddr-GRADBLOCKX]); } if(coords == CYLINDRICAL) { // In cylindrical coords, use dt/dphi * (delta-phi) / r to get d/dy deltaP = LAMY*(V[myLocAddr+GRADBLOCKX]-V[myLocAddr-GRADBLOCKX]) / (RINNER + DELTAR*myX); } em[globAddr + 3*FLUID_SLABPITCH] = deltaP; #ifdef EXPO_DOTR dv = (gas[globAddr+3*FLUID_SLABPITCH]+dragparams[7]*deltaP)/gas[globAddr] - dust[globAddr+3*FLUID_SLABPITCH]/dust[globAddr]; #else dv = (gas[globAddr+3*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+3*FLUID_SLABPITCH]/dust[globAddr]; #endif dvsq += dv*dv; } /* we must protect on both sides of this * tl;dr: with only barrier B, warps 1 and 2 depart at the same time * But if, suppose, warp 1 gets delayed in the slow sqrt() calculation while * warp 2 goes ahead and runs all the way back to where barrier A is. * * Without barrier A, warp 2 will overwrite W (which for warp 1 is still U) * and the calculation will be corrupted. */ __syncthreads(); W[myLocAddr] = gas2press(gas + (globAddr + deltaz)); __syncthreads(); // barrier B if(IWrite) { deltaP = LAMZ*(W[myLocAddr] - U[myLocAddr]); em[globAddr + 4*FLUID_SLABPITCH] = deltaP; #ifdef EXPO_DOTR dv = (gas[globAddr+4*FLUID_SLABPITCH]+dragparams[7]*deltaP)/gas[globAddr] - dust[globAddr+4*FLUID_SLABPITCH]/dust[globAddr]; #else dv = (gas[globAddr+4*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+4*FLUID_SLABPITCH]/dust[globAddr]; #endif dvsq += dv*dv; em[globAddr] = sqrt(dvsq); // output initial delta-v em[globAddr + FLUID_SLABPITCH] = V[myLocAddr] / gas[globAddr]; // = P/rho = specific internal energy density } temp = U; U = V; V = W; W = temp; // cyclically shift them back globAddr += arraysize.x * arraysize.y; } } /* Computes the gradient of 3d array phi using the 4-point centered derivative and * stores phi_x in fx, phi_y in fy, phi_z in fz. * All arrays (rho, phi, fx, fy, fz) must be of size arraysize. * In cylindrical geometry, f_x -> f_r, * f_y -> f_phi * This call must be invoked in two parts: * cukern_prepareForERK3D_h4_partone computes the X and Y (or r/theta) derivatives, * cukern_prepareForERK3D_h4_parttwo computes the Z derivative. */ template <geometryType_t coords> __global__ void cukern_prepareForERK3D_h4_partone(double *phi, double *fx, double *fy, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-4)*blockIdx.x - 2; int myY = threadIdx.y + (GRADBLOCKY-4)*blockIdx.y - 2; if((myX > (arraysize.x+1)) || (myY > (arraysize.y+1))) return; bool IWrite = (threadIdx.x > 1) && (threadIdx.x < (GRADBLOCKX-2)) && (threadIdx.y > 1) && (threadIdx.y < (GRADBLOCKY-2)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.y) % arraysize.y; int globAddr = myX + arraysize.x*myY; double deltaphi; // Store derivative of phi in one direction __shared__ double phishm[GRADBLOCKX*GRADBLOCKY]; __syncthreads(); int z; int deltaz = arraysize.x*arraysize.y; for(z = 0; z < arraysize.z; z++) { phishm[myLocAddr] = phi[globAddr]; __syncthreads(); if(IWrite) { deltaphi = LAMX*(-phishm[myLocAddr+2]+8.0*phishm[myLocAddr+1]-8.0*phishm[myLocAddr-1]+phishm[myLocAddr-2]); fx[globAddr] = deltaphi; // store px <- px - dt * rho dphi/dx; if(coords == SQUARE) { deltaphi = LAMY*(-phishm[myLocAddr+2*GRADBLOCKX]+8*phishm[myLocAddr+GRADBLOCKX]-8*phishm[myLocAddr-GRADBLOCKX]+phishm[myLocAddr-2*GRADBLOCKX]); } if(coords == CYLINDRICAL) { // In cylindrical coords, use dt/dphi * (delta-phi) / r to get d/dy deltaphi = LAMY*(-phishm[myLocAddr+2*GRADBLOCKX]+8*phishm[myLocAddr+GRADBLOCKX]-8*phishm[myLocAddr-GRADBLOCKX]+phishm[myLocAddr-2*GRADBLOCKX]) / (RINNER + DELTAR*myX); } fy[globAddr] = deltaphi; } globAddr += deltaz; } } /* 2nd part of 4th order 3D spatial gradient computes d/dz (same in cart & cyl coords so no template */ __global__ void cukern_prepareForERK3D_h4_parttwo(double *phi, double *fz, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-4)*blockIdx.x - 2; int myZ = threadIdx.y + (GRADBLOCKY-4)*blockIdx.y - 2; if((myX > (arraysize.x+1)) || (myZ > (arraysize.z+1))) return; bool IWrite = (threadIdx.x > 1) && (threadIdx.x < (GRADBLOCKX-2)) && (threadIdx.y > 1) && (threadIdx.y < (GRADBLOCKY-2)); IWrite = IWrite && (myX < arraysize.x) && (myZ < arraysize.z); myX = (myX + arraysize.x) % arraysize.x; myZ = (myZ + arraysize.z) % arraysize.z; int delta = arraysize.x*arraysize.y; int globAddr = myX + delta*myZ; double deltaphi; // Store derivative of phi in one direction __shared__ double phishm[GRADBLOCKX*GRADBLOCKY]; __syncthreads(); int y; for(y = 0; y < arraysize.y; y++) { phishm[myLocAddr] = phi[globAddr]; if(IWrite) { deltaphi = LAMZ*(-phishm[myLocAddr+2*GRADBLOCKX]+8*phishm[myLocAddr+GRADBLOCKX]-8*phishm[myLocAddr-GRADBLOCKX]+phishm[myLocAddr-2*GRADBLOCKX]); fz[globAddr] = deltaphi; } globAddr += arraysize.x; } } /* Compute the gradient of 2d array phi with 2nd order accuracy; store the results in f_x, f_y * In cylindrical geometry, f_x -> f_r, * f_y -> f_phi */ template <geometryType_t coords> __global__ void cukern_prepareForERK2D_h2(double *gas, double *dust, double *em, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-2)*blockIdx.x - 1; int myY = threadIdx.y + (GRADBLOCKY-2)*blockIdx.y - 1; if((myX > arraysize.x) || (myY > arraysize.y)) return; bool IWrite = (threadIdx.x > 0) && (threadIdx.x < (GRADBLOCKX-1)) && (threadIdx.y > 0) && (threadIdx.y < (GRADBLOCKY-1)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.y) % arraysize.y; int globAddr = myX + arraysize.x*myY; double deltaP; // Store derivative of phi in one direction double dv, dvsq; __shared__ double locPress[GRADBLOCKX*GRADBLOCKY]; locPress[myLocAddr] = gas2press(gas+globAddr); __syncthreads(); // Make sure loaded phi is visible // coupling is exactly zero if rho <= rhomin if(IWrite) { // compute dt * (dphi/dx) deltaP = LAMX*(locPress[myLocAddr+1]-locPress[myLocAddr-1]); em[globAddr+2*FLUID_SLABPITCH] = deltaP; dv = (gas[globAddr+4*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+4*FLUID_SLABPITCH]/dust[globAddr]; dvsq = dv*dv; #ifdef EXPO_DOTR dv = (gas[globAddr+2*FLUID_SLABPITCH]+dragparams[7]*deltaP)/gas[globAddr] - dust[globAddr+2*FLUID_SLABPITCH]/dust[globAddr]; #else dv = (gas[globAddr+2*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+2*FLUID_SLABPITCH]/dust[globAddr]; #endif dvsq += dv*dv; // Calculate dt*(dphi/dy) if(coords == SQUARE) { deltaP = LAMY*(locPress[myLocAddr+GRADBLOCKX]-locPress[myLocAddr-GRADBLOCKX]); } if(coords == CYLINDRICAL) { // Converts d/dphi into physical distance based on R deltaP = LAMY*(locPress[myLocAddr+GRADBLOCKX]-locPress[myLocAddr-GRADBLOCKX]) / (RINNER + myX*DELTAR); } em[globAddr+3*FLUID_SLABPITCH] = deltaP; em[globAddr+4*FLUID_SLABPITCH] = 0.0; #ifdef EXPO_DOTR dv = (gas[globAddr+3*FLUID_SLABPITCH]+dragparams[7]*deltaP)/gas[globAddr] - dust[globAddr+3*FLUID_SLABPITCH]/dust[globAddr]; #else dv = (gas[globAddr+3*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+3*FLUID_SLABPITCH]/dust[globAddr]; #endif dvsq += dv*dv; em[globAddr] = sqrt(dvsq); em[globAddr + FLUID_SLABPITCH] = locPress[myLocAddr] / gas[globAddr]; // specific internal energy for } } /* Compute the gradient of 2d array phi with 4th order accuracy; store the results in f_x, f_y * In cylindrical geometry, f_x -> f_r, * f_y -> f_phi */ template <geometryType_t coords> __global__ void cukern_prepareForERK2D_h4(double *phi, double *fx, double *fy, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-4)*blockIdx.x - 2; int myY = threadIdx.y + (GRADBLOCKY-4)*blockIdx.y - 2; if((myX > arraysize.x) || (myY > arraysize.y)) return; bool IWrite = (threadIdx.x > 1) && (threadIdx.x < (GRADBLOCKX-2)) && (threadIdx.y > 1) && (threadIdx.y < (GRADBLOCKY-2)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.y) % arraysize.y; int globAddr = myX + arraysize.x*myY; double deltaphi; // Store derivative of phi in one direction __shared__ double phiLoc[GRADBLOCKX*GRADBLOCKY]; phiLoc[myLocAddr] = phi[globAddr]; __syncthreads(); // Make sure loaded phi is visible // coupling is exactly zero if rho <= rhomin if(IWrite) { // compute dt * (dphi/dx) deltaphi = LAMX*(-phiLoc[myLocAddr+2] + 8*phiLoc[myLocAddr+1] - 8*phiLoc[myLocAddr-1] + phiLoc[myLocAddr-2]); fx[globAddr] = deltaphi; // Calculate dt*(dphi/dy) if(coords == SQUARE) { deltaphi = LAMY*(-phiLoc[myLocAddr+2*GRADBLOCKX] + 8*phiLoc[myLocAddr+1*GRADBLOCKX] - 8*phiLoc[myLocAddr-1*GRADBLOCKX] + phiLoc[myLocAddr-2*GRADBLOCKX]); } if(coords == CYLINDRICAL) { // Converts d/dphi into physical distance based on R deltaphi = LAMY*(-phiLoc[myLocAddr+2*GRADBLOCKX] + 8*phiLoc[myLocAddr+1*GRADBLOCKX] - 8*phiLoc[myLocAddr-1*GRADBLOCKX] + phiLoc[myLocAddr-2*GRADBLOCKX])/(RINNER + myX*DELTAR); } fy[globAddr] = deltaphi; } } /* Compute the gradient of R-Z array phi with 2nd order accuracy; store the results in f_x, f_z * In cylindrical geometry, f_x -> f_r */ __global__ void cukern_prepareForERKRZ_h2(double *gas, double *dust, double *em, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-2)*blockIdx.x - 1; int myY = threadIdx.y + (GRADBLOCKY-2)*blockIdx.y - 1; if((myX > arraysize.x) || (myY > arraysize.z)) return; bool IWrite = (threadIdx.x > 0) && (threadIdx.x < (GRADBLOCKX-1)) && (threadIdx.y > 0) && (threadIdx.y < (GRADBLOCKY-1)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.z); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.z) % arraysize.z; int globAddr = myX + arraysize.x*myY; double deltaP, dv, dvsq; // Store derivative of phi in one direction __shared__ double pressLoc[GRADBLOCKX*GRADBLOCKY]; pressLoc[myLocAddr] = gas2press(gas + globAddr); __syncthreads(); // Make sure loaded phi is visible // coupling is exactly zero if rho <= rhomin if(IWrite) { em[globAddr + 3*FLUID_SLABPITCH] = 0.0; // zero phi gradient // compute v_phi contribution to |dv|^2 for 2.5-D dv = (gas[globAddr+3*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+3*FLUID_SLABPITCH]/dust[globAddr]; dvsq = dv*dv; // compute dt * (dphi/dx) deltaP = LAMX*(pressLoc[myLocAddr+1]-pressLoc[myLocAddr-1]); em[globAddr + 2*FLUID_SLABPITCH] = deltaP; #ifdef EXPO_DOTR dv = (gas[globAddr+2*FLUID_SLABPITCH]+dragparams[7]*deltaP)/gas[globAddr] - dust[globAddr+2*FLUID_SLABPITCH]/dust[globAddr]; #else dv = (gas[globAddr+2*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+2*FLUID_SLABPITCH]/dust[globAddr]; #endif dvsq += dv*dv; // Calculate dt*(dphi/dz) deltaP = LAMZ*(pressLoc[myLocAddr+GRADBLOCKX]-pressLoc[myLocAddr-GRADBLOCKX]); em[globAddr + 4*FLUID_SLABPITCH] = deltaP; #ifdef EXPO_DOTR dv = (gas[globAddr+4*FLUID_SLABPITCH]+dragparams[7]*deltaP)/gas[globAddr] - dust[globAddr+4*FLUID_SLABPITCH]/dust[globAddr]; #else dv = (gas[globAddr+4*FLUID_SLABPITCH])/gas[globAddr] - dust[globAddr+4*FLUID_SLABPITCH]/dust[globAddr]; #endif dvsq += dv*dv; em[globAddr] = sqrt(dvsq); // magnitude delta-v with time reversed pressure gradient em[globAddr + FLUID_SLABPITCH] = pressLoc[myLocAddr] / gas[globAddr]; // specific internal energy for } } /* Compute the gradient of RZ array phi with 4th order accuracy; store the results in f_x, f_y * In cylindrical geometry, f_x -> f_r, */ __global__ void cukern_prepareForERKRZ_h4(double *phi, double *fx, double *fz, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-4)*blockIdx.x - 2; int myY = threadIdx.y + (GRADBLOCKY-4)*blockIdx.y - 2; if((myX > arraysize.x) || (myY > arraysize.z)) return; bool IWrite = (threadIdx.x > 1) && (threadIdx.x < (GRADBLOCKX-2)) && (threadIdx.y > 1) && (threadIdx.y < (GRADBLOCKY-2)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.z); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.z) % arraysize.z; int globAddr = myX + arraysize.x*myY; double deltaphi; // Store derivative of phi in one direction __shared__ double phiLoc[GRADBLOCKX*GRADBLOCKY]; phiLoc[myLocAddr] = phi[globAddr]; __syncthreads(); // Make sure loaded phi is visible // coupling is exactly zero if rho <= rhomin if(IWrite) { // compute dt * (dphi/dx) deltaphi = LAMX*(-phiLoc[myLocAddr+2] + 8*phiLoc[myLocAddr+1] - 8*phiLoc[myLocAddr-1] + phiLoc[myLocAddr-2]); fx[globAddr] = deltaphi; // Calculate dt*(dphi/dz) deltaphi = LAMZ*(-phiLoc[myLocAddr+2*GRADBLOCKX] + 8*phiLoc[myLocAddr+1*GRADBLOCKX] - 8*phiLoc[myLocAddr-1*GRADBLOCKX] + phiLoc[myLocAddr-2*GRADBLOCKX]); fz[globAddr] = deltaphi; } } #undef GRADBLOCKX #undef GRADBLOCKY // This awful wad of mutated copypasta from the cudaGradientKernels.cu file turns the // midpoint internal energy density and the mass density into pressure & computes the // gradient for the ERK solver's second stage // compute grad(phi) in XYZ or R-Theta-Z with 2nd or 4th order accuracy template <geometryType_t coords> __global__ void cukern_findMidGradP3D_h2(double *gas, double *em, int3 arraysize); template <geometryType_t coords> __global__ void cukern_findMidGradP3D_h4_partone(double *phi, double *fx, double *fy, int3 arraysize); __global__ void cukern_findMidGradP3D_h4_parttwo(double *phi, double *fz, int3 arraysize); // compute grad(phi) in X-Y or R-Theta with 2nd or 4th order accuracy template <geometryType_t coords> __global__ void cukern_findMidGradP2D_h2(double *gas, double *em, int3 arraysize); template <geometryType_t coords> __global__ void cukern_findMidGradP2D_h4(double *phi, double *fx, double *fy, int3 arraysize); // Compute grad(phi) in X-Z or R-Z with 2nd or 4th order accuracy __global__ void cukern_findMidGradPRZ_h2(double *gas, double *em, int3 arraysize); __global__ void cukern_findMidGradPRZ_h4(double *phi, double *fx, double *fz, int3 arraysize); #define GRADBLOCKX 18 #define GRADBLOCKY 18 // scalingParameter / 2h or /12h depending on spatial order of scheme #define LAMX devLambda[0] #define LAMY devLambda[1] #define LAMZ devLambda[2] #define RINNER devLambda[7] #define DELTAR devLambda[8] /* Given the gas pointer, temp memory and geometry, uses the midpoint specific internal energy density from tempMem * and the gas mass density to compute the pressure gradient into tempMem slabs 2 through 4. scalingParameter needs * to be (gamma-1) to convert rho * u_specific = e_internal = P / (gamma-1) to P. */ int findMidGradP2(MGArray *gas, MGArray *tempMem, GeometryParams geom, int spaceOrder, double scalingParameter) { dim3 gridsize, blocksize; double lambda[11]; int i; int worked; int sub[6]; double *dx = &geom.h[0]; if(spaceOrder == 4) { lambda[0] = scalingParameter/(12.0*dx[0]); lambda[1] = scalingParameter/(12.0*dx[1]); lambda[2] = scalingParameter/(12.0*dx[2]); } else { lambda[0] = scalingParameter/(2.0*dx[0]); lambda[1] = scalingParameter/(2.0*dx[1]); lambda[2] = scalingParameter/(2.0*dx[2]); } lambda[7] = geom.Rinner; // This is actually overwritten per partition below lambda[8] = dx[1]; int isThreeD = (gas->dim[2] > 1); int isRZ = (gas->dim[2] > 1) & (gas->dim[1] == 1); for(i = 0; i < gas->nGPUs; i++) { cudaSetDevice(gas->deviceID[i]); calcPartitionExtent(gas, i, &sub[0]); lambda[7] = geom.Rinner + dx[0] * sub[0]; // Innermost cell coord may change per-partition cudaMemcpyToSymbol((const void *)devLambda, lambda, 11*sizeof(double), 0, cudaMemcpyHostToDevice); worked = CHECK_CUDA_ERROR("cudaMemcpyToSymbol"); if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) break; //cudaMemcpyToSymbol((const void *)devIntParams, &sub[3], 3*sizeof(int), 0, cudaMemcpyHostToDevice); //worked = CHECK_CUDA_ERROR("memcpy to symbol"); //if(worked != SUCCESSFUL) break; } if(worked != SUCCESSFUL) return worked; double *gasPtr; double *tmpPtr; // Iterate over all partitions, and here we GO! for(i = 0; i < gas->nGPUs; i++) { cudaSetDevice(gas->deviceID[i]); worked = CHECK_CUDA_ERROR("cudaSetDevice"); if(worked != SUCCESSFUL) break; calcPartitionExtent(gas, i, sub); int3 arraysize; arraysize.x = sub[3]; arraysize.y = sub[4]; arraysize.z = sub[5]; dim3 blocksize(GRADBLOCKX, GRADBLOCKY, 1); gridsize.x = arraysize.x / (blocksize.x - spaceOrder); gridsize.x += ((blocksize.x-spaceOrder) * gridsize.x < arraysize.x); if(isRZ) { gridsize.y = arraysize.z / (blocksize.y - spaceOrder); gridsize.y += ((blocksize.y-spaceOrder) * gridsize.y < arraysize.z); } else { gridsize.y = arraysize.y / (blocksize.y - spaceOrder); gridsize.y += ((blocksize.y-spaceOrder) * gridsize.y < arraysize.y); } gridsize.z = 1; gasPtr = gas->devicePtr[i]; // WARNING: this could be garbage if spaceOrder == 0 and we rx'd no potential array tmpPtr = tempMem->devicePtr[i]; switch(spaceOrder) { /*case 0: // dump zeros so as to have a technically-valid result and not cause reads of uninitialized memory writeScalarToVector<<<32, 256>>>(tmpPtr + 0 * slabsize, gas->partNumel[i], 0.0); writeScalarToVector<<<32, 256>>>(tmpPtr + 1 * slabsize, gas->partNumel[i], 0.0); writeScalarToVector<<<32, 256>>>(tmpPtr + 2 * slabsize, gas->partNumel[i], 0.0); break;*/ case 2: if(isThreeD) { if(isRZ) { cukern_findMidGradPRZ_h2<<<gridsize, blocksize>>>(gasPtr, tmpPtr, arraysize); } else { if(geom.shape == SQUARE) { cukern_findMidGradP3D_h2<SQUARE><<<gridsize, blocksize>>> (gasPtr, tmpPtr, arraysize); } if(geom.shape == CYLINDRICAL) { cukern_findMidGradP3D_h2<CYLINDRICAL><<<gridsize, blocksize>>> (gasPtr, tmpPtr, arraysize); } } } else { if(geom.shape == SQUARE) { cukern_findMidGradP2D_h2<SQUARE><<<gridsize, blocksize>>>(gasPtr, tmpPtr, arraysize); } if(geom.shape == CYLINDRICAL) { cukern_findMidGradP2D_h2<CYLINDRICAL><<<gridsize, blocksize>>>(gasPtr, tmpPtr, arraysize); } } break; /*case 4: if(isThreeD) { if(isRZ) { cukern_findMidGradPRZ_h4<<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr + 2*gas->partNumel[i], arraysize); writeScalarToVector<<<32, 256>>>(tmpPtr + slabsize, gas->partNumel[i], 0.0); } else { if(geom.shape == SQUARE) { cukern_findMidGradP3D_h4_partone<SQUARE><<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr+ slabsize, arraysize); cukern_findMidGradP3D_h4_parttwo<<<gridsize, blocksize>>>(gasPtr, tmpPtr+ slabsize*2, arraysize); } if(geom.shape == CYLINDRICAL) { cukern_findMidGradP3D_h4_partone<CYLINDRICAL><<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr+ slabsize, arraysize); cukern_findMidGradP3D_h4_parttwo<<<gridsize, blocksize>>>(gasPtr, tmpPtr+ slabsize*2, arraysize); } } } else { if(geom.shape == SQUARE) { cukern_findMidGradP2D_h4<SQUARE><<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr+ slabsize, arraysize); } if(geom.shape == CYLINDRICAL) { cukern_findMidGradP2D_h4<CYLINDRICAL><<<gridsize, blocksize>>>(gasPtr, tmpPtr, tmpPtr+ slabsize, arraysize); } writeScalarToVector<<<32, 256>>>(tmpPtr+2*gas->partNumel[i], gas->partNumel[i], 0.0); } break;*/ default: PRINT_FAULT_HEADER; printf("Was passed spatial order parameter of %i, must be passed 2 (2nd order)\n", spaceOrder); PRINT_FAULT_FOOTER; return ERROR_INVALID_ARGS; } worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, gas, i, "cukern_findMidGradP"); if(worked != SUCCESSFUL) break; } if(worked != SUCCESSFUL) return worked; // FIXME this needs to either understand slabs, or we need to fetch 3 slab ptrs into an array & pass it instead // worked = MGA_exchangeLocalHalos(gradient, 5); // need to? if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked; return CHECK_IMOGEN_ERROR(worked); } /* Algorithm: * [|dv_tr|, u_0, P_x, P_y, P_z] = exponentialSetup(gas_state, dust_state) * 5 output registers * may need slope limiter on gradient calculation? */ template <geometryType_t coords> __global__ void cukern_findMidGradP3D_h2(double *gas, double *em, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-2)*blockIdx.x - 1; int myY = threadIdx.y + (GRADBLOCKY-2)*blockIdx.y - 1; if((myX > arraysize.x) || (myY > arraysize.y)) return; bool IWrite = (threadIdx.x > 0) && (threadIdx.x < (GRADBLOCKX-1)) && (threadIdx.y > 0) && (threadIdx.y < (GRADBLOCKY-1)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.y) % arraysize.y; int globAddr = myX + arraysize.x*myY; double deltaP; // Store derivative of phi in one direction __shared__ double phiA[GRADBLOCKX*GRADBLOCKY]; __shared__ double phiB[GRADBLOCKX*GRADBLOCKY]; __shared__ double phiC[GRADBLOCKX*GRADBLOCKY]; double *U; double *V; double *W; double *temp; U = phiA; V = phiB; W = phiC; // compute epsilon_internal on lower & current planes U[myLocAddr] = gas[globAddr + arraysize.x*arraysize.y*(arraysize.z-1)] * em[globAddr + arraysize.x*arraysize.y*(arraysize.z-1) + FLUID_SLABPITCH]; V[myLocAddr] = gas[globAddr] * em[globAddr + FLUID_SLABPITCH]; __syncthreads(); int z; int deltaz = arraysize.x*arraysize.y; for(z = 0; z < arraysize.z; z++) { if(z >= arraysize.z - 1) deltaz = - arraysize.x*arraysize.y*(arraysize.z-1); if(IWrite) { deltaP = LAMX*(V[myLocAddr+1]-V[myLocAddr-1]); #ifdef EXPO_TRAPEZOID em[globAddr + 2*FLUID_SLABPITCH] = .5*(em[globAddr + 2*FLUID_SLABPITCH] + deltaP); #else em[globAddr + 2*FLUID_SLABPITCH] = deltaP; #endif } if(IWrite) { if(coords == SQUARE) { deltaP = LAMY*(V[myLocAddr+GRADBLOCKX]-V[myLocAddr-GRADBLOCKX]); } if(coords == CYLINDRICAL) { // In cylindrical coords, use dt/dphi * (delta-phi) / r to get d/dy deltaP = LAMY*(V[myLocAddr+GRADBLOCKX]-V[myLocAddr-GRADBLOCKX]) / (RINNER + DELTAR*myX); } #ifdef EXPO_TRAPEZOID em[globAddr + 3*FLUID_SLABPITCH] = .5*(em[globAddr + 3*FLUID_SLABPITCH] + deltaP); #else em[globAddr + 3*FLUID_SLABPITCH] = deltaP; #endif } W[myLocAddr] = gas[globAddr + deltaz] * em[globAddr + deltaz + FLUID_SLABPITCH]; __syncthreads(); if(IWrite) { deltaP = LAMZ*(W[myLocAddr] - U[myLocAddr]); #ifdef EXPO_TRAPEZOID em[globAddr + 4*FLUID_SLABPITCH] = .5*(em[globAddr + 4*FLUID_SLABPITCH] + deltaP); #else em[globAddr + 4*FLUID_SLABPITCH] = deltaP; #endif } temp = U; U = V; V = W; W = temp; // cyclically shift them back globAddr += arraysize.x * arraysize.y; } } /* Computes the gradient of 3d array phi using the 4-point centered derivative and * stores phi_x in fx, phi_y in fy, phi_z in fz. * All arrays (rho, phi, fx, fy, fz) must be of size arraysize. * In cylindrical geometry, f_x -> f_r, * f_y -> f_phi * This call must be invoked in two parts: * cukern_findMidGradP3D_h4_partone computes the X and Y (or r/theta) derivatives, * cukern_findMidGradP3D_h4_parttwo computes the Z derivative. */ template <geometryType_t coords> __global__ void cukern_findMidGradP3D_h4_partone(double *phi, double *fx, double *fy, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-4)*blockIdx.x - 2; int myY = threadIdx.y + (GRADBLOCKY-4)*blockIdx.y - 2; if((myX > (arraysize.x+1)) || (myY > (arraysize.y+1))) return; bool IWrite = (threadIdx.x > 1) && (threadIdx.x < (GRADBLOCKX-2)) && (threadIdx.y > 1) && (threadIdx.y < (GRADBLOCKY-2)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.y) % arraysize.y; int globAddr = myX + arraysize.x*myY; double deltaphi; // Store derivative of phi in one direction __shared__ double phishm[GRADBLOCKX*GRADBLOCKY]; __syncthreads(); int z; int deltaz = arraysize.x*arraysize.y; for(z = 0; z < arraysize.z; z++) { phishm[myLocAddr] = phi[globAddr]; __syncthreads(); if(IWrite) { deltaphi = LAMX*(-phishm[myLocAddr+2]+8.0*phishm[myLocAddr+1]-8.0*phishm[myLocAddr-1]+phishm[myLocAddr-2]); fx[globAddr] = deltaphi; // store px <- px - dt * rho dphi/dx; if(coords == SQUARE) { deltaphi = LAMY*(-phishm[myLocAddr+2*GRADBLOCKX]+8*phishm[myLocAddr+GRADBLOCKX]-8*phishm[myLocAddr-GRADBLOCKX]+phishm[myLocAddr-2*GRADBLOCKX]); } if(coords == CYLINDRICAL) { // In cylindrical coords, use dt/dphi * (delta-phi) / r to get d/dy deltaphi = LAMY*(-phishm[myLocAddr+2*GRADBLOCKX]+8*phishm[myLocAddr+GRADBLOCKX]-8*phishm[myLocAddr-GRADBLOCKX]+phishm[myLocAddr-2*GRADBLOCKX]) / (RINNER + DELTAR*myX); } fy[globAddr] = deltaphi; } globAddr += deltaz; } } /* 2nd part of 4th order 3D spatial gradient computes d/dz (same in cart & cyl coords so no template */ __global__ void cukern_findMidGradP3D_h4_parttwo(double *phi, double *fz, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-4)*blockIdx.x - 2; int myZ = threadIdx.y + (GRADBLOCKY-4)*blockIdx.y - 2; if((myX > (arraysize.x+1)) || (myZ > (arraysize.z+1))) return; bool IWrite = (threadIdx.x > 1) && (threadIdx.x < (GRADBLOCKX-2)) && (threadIdx.y > 1) && (threadIdx.y < (GRADBLOCKY-2)); IWrite = IWrite && (myX < arraysize.x) && (myZ < arraysize.z); myX = (myX + arraysize.x) % arraysize.x; myZ = (myZ + arraysize.z) % arraysize.z; int delta = arraysize.x*arraysize.y; int globAddr = myX + delta*myZ; double deltaphi; // Store derivative of phi in one direction __shared__ double phishm[GRADBLOCKX*GRADBLOCKY]; __syncthreads(); int y; for(y = 0; y < arraysize.y; y++) { phishm[myLocAddr] = phi[globAddr]; if(IWrite) { deltaphi = LAMZ*(-phishm[myLocAddr+2*GRADBLOCKX]+8*phishm[myLocAddr+GRADBLOCKX]-8*phishm[myLocAddr-GRADBLOCKX]+phishm[myLocAddr-2*GRADBLOCKX]); fz[globAddr] = deltaphi; } globAddr += arraysize.x; } } /* Compute the gradient of 2d array phi with 2nd order accuracy; store the results in f_x, f_y * In cylindrical geometry, f_x -> f_r, * f_y -> f_phi */ template <geometryType_t coords> __global__ void cukern_findMidGradP2D_h2(double *gas, double *em, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-2)*blockIdx.x - 1; int myY = threadIdx.y + (GRADBLOCKY-2)*blockIdx.y - 1; if((myX > arraysize.x) || (myY > arraysize.y)) return; bool IWrite = (threadIdx.x > 0) && (threadIdx.x < (GRADBLOCKX-1)) && (threadIdx.y > 0) && (threadIdx.y < (GRADBLOCKY-1)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.y) % arraysize.y; int globAddr = myX + arraysize.x*myY; double deltaP; // Store derivative of phi in one direction __shared__ double locPress[GRADBLOCKX*GRADBLOCKY]; locPress[myLocAddr] = gas[globAddr] * em[globAddr + FLUID_SLABPITCH]; __syncthreads(); // Make sure loaded phi is visible // coupling is exactly zero if rho <= rhomin if(IWrite) { // compute dt * (dphi/dx) deltaP = LAMX*(locPress[myLocAddr+1]-locPress[myLocAddr-1]); #ifdef EXPO_TRAPEZOID em[globAddr+2*FLUID_SLABPITCH] = .5*(em[globAddr+2*FLUID_SLABPITCH] + deltaP); #else em[globAddr+2*FLUID_SLABPITCH] = deltaP; #endif if(coords == SQUARE) { deltaP = LAMY*(locPress[myLocAddr+GRADBLOCKX]-locPress[myLocAddr-GRADBLOCKX]); } if(coords == CYLINDRICAL) { // Converts d/dphi into physical distance based on R deltaP = LAMY*(locPress[myLocAddr+GRADBLOCKX]-locPress[myLocAddr-GRADBLOCKX]) / (RINNER + myX*DELTAR); } #ifdef EXPO_TRAPEZOID em[globAddr+3*FLUID_SLABPITCH] = .5*(em[globAddr+3*FLUID_SLABPITCH] + deltaP); #else em[globAddr+3*FLUID_SLABPITCH] = deltaP; #endif em[globAddr+4*FLUID_SLABPITCH] = 0.0; } } /* Compute the gradient of 2d array phi with 4th order accuracy; store the results in f_x, f_y * In cylindrical geometry, f_x -> f_r, * f_y -> f_phi */ template <geometryType_t coords> __global__ void cukern_findMidGradP2D_h4(double *phi, double *fx, double *fy, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-4)*blockIdx.x - 2; int myY = threadIdx.y + (GRADBLOCKY-4)*blockIdx.y - 2; if((myX > arraysize.x) || (myY > arraysize.y)) return; bool IWrite = (threadIdx.x > 1) && (threadIdx.x < (GRADBLOCKX-2)) && (threadIdx.y > 1) && (threadIdx.y < (GRADBLOCKY-2)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.y); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.y) % arraysize.y; int globAddr = myX + arraysize.x*myY; double deltaphi; // Store derivative of phi in one direction __shared__ double phiLoc[GRADBLOCKX*GRADBLOCKY]; phiLoc[myLocAddr] = phi[globAddr]; __syncthreads(); // Make sure loaded phi is visible // coupling is exactly zero if rho <= rhomin if(IWrite) { // compute dt * (dphi/dx) deltaphi = LAMX*(-phiLoc[myLocAddr+2] + 8*phiLoc[myLocAddr+1] - 8*phiLoc[myLocAddr-1] + phiLoc[myLocAddr-2]); fx[globAddr] = deltaphi; // Calculate dt*(dphi/dy) if(coords == SQUARE) { deltaphi = LAMY*(-phiLoc[myLocAddr+2*GRADBLOCKX] + 8*phiLoc[myLocAddr+1*GRADBLOCKX] - 8*phiLoc[myLocAddr-1*GRADBLOCKX] + phiLoc[myLocAddr-2*GRADBLOCKX]); } if(coords == CYLINDRICAL) { // Converts d/dphi into physical distance based on R deltaphi = LAMY*(-phiLoc[myLocAddr+2*GRADBLOCKX] + 8*phiLoc[myLocAddr+1*GRADBLOCKX] - 8*phiLoc[myLocAddr-1*GRADBLOCKX] + phiLoc[myLocAddr-2*GRADBLOCKX])/(RINNER + myX*DELTAR); } fy[globAddr] = deltaphi; } } /* Compute the gradient of R-Z array phi with 2nd order accuracy; store the results in f_x, f_z * In cylindrical geometry, f_x -> f_r */ __global__ void cukern_findMidGradPRZ_h2(double *gas, double *em, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-2)*blockIdx.x - 1; int myY = threadIdx.y + (GRADBLOCKY-2)*blockIdx.y - 1; if((myX > arraysize.x) || (myY > arraysize.z)) return; bool IWrite = (threadIdx.x > 0) && (threadIdx.x < (GRADBLOCKX-1)) && (threadIdx.y > 0) && (threadIdx.y < (GRADBLOCKY-1)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.z); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.z) % arraysize.z; int globAddr = myX + arraysize.x*myY; double deltaP; // Store derivative of phi in one direction __shared__ double pressLoc[GRADBLOCKX*GRADBLOCKY]; pressLoc[myLocAddr] = gas[globAddr] * em[globAddr + FLUID_SLABPITCH]; __syncthreads(); // Make sure loaded P is visible // coupling is exactly zero if rho <= rhomin if(IWrite) { em[globAddr + 3*FLUID_SLABPITCH] = 0.0; // zero phi gradient // compute dP/dr deltaP = LAMX*(pressLoc[myLocAddr+1]-pressLoc[myLocAddr-1]); #ifdef EXPO_TRAPEZOID em[globAddr+2*FLUID_SLABPITCH] = .5*(em[globAddr+2*FLUID_SLABPITCH] + deltaP); #else em[globAddr+2*FLUID_SLABPITCH] = deltaP; #endif // Calculate dP/dz deltaP = LAMZ*(pressLoc[myLocAddr+GRADBLOCKX]-pressLoc[myLocAddr-GRADBLOCKX]); #ifdef EXPO_TRAPEZOID em[globAddr+4*FLUID_SLABPITCH] = .5*(em[globAddr+4*FLUID_SLABPITCH] + deltaP); #else em[globAddr+4*FLUID_SLABPITCH] = deltaP; #endif } } /* Compute the gradient of RZ array phi with 4th order accuracy; store the results in f_x, f_y * In cylindrical geometry, f_x -> f_r, */ __global__ void cukern_findMidGradPRZ_h4(double *phi, double *fx, double *fz, int3 arraysize) { int myLocAddr = threadIdx.x + GRADBLOCKX*threadIdx.y; int myX = threadIdx.x + (GRADBLOCKX-4)*blockIdx.x - 2; int myY = threadIdx.y + (GRADBLOCKY-4)*blockIdx.y - 2; if((myX > arraysize.x) || (myY > arraysize.z)) return; bool IWrite = (threadIdx.x > 1) && (threadIdx.x < (GRADBLOCKX-2)) && (threadIdx.y > 1) && (threadIdx.y < (GRADBLOCKY-2)); IWrite = IWrite && (myX < arraysize.x) && (myY < arraysize.z); myX = (myX + arraysize.x) % arraysize.x; myY = (myY + arraysize.z) % arraysize.z; int globAddr = myX + arraysize.x*myY; double deltaphi; // Store derivative of phi in one direction __shared__ double phiLoc[GRADBLOCKX*GRADBLOCKY]; phiLoc[myLocAddr] = phi[globAddr]; __syncthreads(); // Make sure loaded phi is visible // coupling is exactly zero if rho <= rhomin if(IWrite) { // compute dt * (dphi/dx) deltaphi = LAMX*(-phiLoc[myLocAddr+2] + 8*phiLoc[myLocAddr+1] - 8*phiLoc[myLocAddr-1] + phiLoc[myLocAddr-2]); fx[globAddr] = deltaphi; // Calculate dt*(dphi/dz) deltaphi = LAMZ*(-phiLoc[myLocAddr+2*GRADBLOCKX] + 8*phiLoc[myLocAddr+1*GRADBLOCKX] - 8*phiLoc[myLocAddr-1*GRADBLOCKX] + phiLoc[myLocAddr-2*GRADBLOCKX]); fz[globAddr] = deltaphi; } }
eeaf44de7a9688e9db50c4f193e3bf5720f962ee.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/GpuSubBurstNnfDistance.h> #include <faiss/gpu/GpuResources.h> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/impl/SubBurstPatchDistance.cuh> #include <faiss/gpu/utils/ConversionOperators.cuh> #include <faiss/gpu/utils/CopyUtils.cuh> #include <faiss/gpu/utils/DeviceTensor.cuh> namespace faiss { namespace gpu { template <typename T> void bfSubBurstNnfConvert(GpuResourcesProvider* prov, const GpuSubBurstNnfDistanceParams& args) { // Validate the input data // FAISS_THROW_IF_NOT_MSG( // args.k > 0 || args.k == -1, // "bfSubBurstNnf: k must be > 0 for top-k reduction, " // "or -1 for all pairwise distances"); // FAISS_THROW_IF_NOT_MSG(args.dims > 0, "bfSubBurstNnf: dims must be > 0"); // FAISS_THROW_IF_NOT_MSG( // args.numVectors > 0, "bfSubBurstNnf: numVectors must be > 0"); // FAISS_THROW_IF_NOT_MSG( // args.vectors, "bfSubBurstNnf: vectors must be provided (passed null)"); // FAISS_THROW_IF_NOT_MSG( // args.numQueries > 0, "bfSubBurstNnf: numQueries must be > 0"); // FAISS_THROW_IF_NOT_MSG( // args.queries, "bfSubBurstNnf: queries must be provided (passed null)"); FAISS_THROW_IF_NOT_MSG( args.outDistances, "bfSubBurstNnf: outDistances must be provided (passed null)"); FAISS_THROW_IF_NOT_MSG( args.outIndices || args.k == -1, "bfSubBurstNnf: outIndices must be provided (passed null)"); // Don't let the resources go out of scope // std::cout << "about to get res" << std::endl; auto resImpl = prov->getResources(); auto res = resImpl.get(); // std::cout << "res" << std::endl; auto device = getCurrentDevice(); auto stream = res->getDefaultStreamCurrentDevice(); // std::cout << "Got the Stream!" << std::endl; int psHalf = ::floor(args.ps/2); int pad = ::floor(args.ps/2) + ::floor(args.nblocks/2); auto burst = toDeviceTemporary<T, 4>(res,device, const_cast<T*>(reinterpret_cast<const T*> (args.burst)), stream, {args.sub_t,args.c,args.h+2*pad,args.w+2*pad}); auto subAve = toDeviceTemporary<T, 3>(res,device, const_cast<T*>( reinterpret_cast<const T*> (args.subAve)), stream, {args.c,args.h+2*psHalf,args.w+2*psHalf}); auto blockLabels = toDeviceTemporary<int, 5>(res,device, args.blockLabels, stream, {args.nblocks_total, args.h+2*psHalf,args.w+2*psHalf, args.sub_t,2}); auto mask = toDeviceTemporary<bool, 4>(res,device, const_cast<bool*>( reinterpret_cast<const bool*> (args.mask)), stream, {args.nblocks_total, args.h+2*psHalf,args.w+2*psHalf, args.sub_t}); auto tOutDistances = toDeviceTemporary<float, 3>( res, device, args.outDistances, stream, {args.h,args.w,args.k}); if (args.outIndicesType == IndicesDataType::I64) { // The brute-force API only supports an interface for i32 indices only, // so we must create an output i32 buffer then convert back DeviceTensor<int, 5, true> tOutIntIndices(res, makeTempAlloc(AllocType::Other, stream), {args.sub_t, args.h, args.w, args.k, 2}); // Since we've guaranteed that all arguments are on device, call the // implementation bfSubBurstNnfOnDevice<T>( res, device, stream, burst, subAve, blockLabels, mask, args.k, args.t, args.h, args.w, args.c, args.ps, args.nblocks, args.valMean, args.metric, args.metricArg, tOutDistances, tOutIntIndices, args.ignoreOutDistances); // Convert and copy int indices out auto tOutIndices = toDeviceTemporary<Index::idx_t, 5>(res,device, (Index::idx_t*) args.outIndices, stream, {args.sub_t, args.h, args.w, args.k, 2}); // Convert int to idx_t convertTensor<int, Index::idx_t, 5>(stream, tOutIntIndices, tOutIndices); // Copy back if necessary fromDevice<Index::idx_t, 5>(tOutIndices, (Index::idx_t*)args.outIndices, stream); } else if (args.outIndicesType == IndicesDataType::I32) { // We can use the brute-force API directly, as it takes i32 indices // FIXME: convert to int32_t everywhere? static_assert(sizeof(int) == 4, ""); auto tOutIntIndices = toDeviceTemporary<int, 5>(res,device, (int*)args.outIndices, stream, {args.sub_t, args.h, args.w, args.k, 2}); // Since we've guaranteed that all arguments are on device, call the // implementation bfSubBurstNnfOnDevice<T>( res, device, stream, burst, subAve, blockLabels, mask, args.k, args.t, args.h, args.w, args.c, args.ps, args.nblocks, args.valMean, args.metric, args.metricArg, tOutDistances, tOutIntIndices, args.ignoreOutDistances); // Copy back if necessary fromDevice<int, 5>(tOutIntIndices, (int*)args.outIndices, stream); } else { FAISS_THROW_MSG("unknown outIndicesType"); } // Copy distances back if necessary fromDevice<float, 3>(tOutDistances, args.outDistances, stream); } void bfSubBurstNnf(GpuResourcesProvider* res, const GpuSubBurstNnfDistanceParams& args) { // For now, both vectors and queries must be of the same data type if (args.dType == DistanceDataType::F32) { bfSubBurstNnfConvert<float>(res, args); } else if (args.dType == DistanceDataType::F16) { bfSubBurstNnfConvert<half>(res, args); } else { FAISS_THROW_MSG("unknown vectorType"); } } } }
eeaf44de7a9688e9db50c4f193e3bf5720f962ee.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/GpuSubBurstNnfDistance.h> #include <faiss/gpu/GpuResources.h> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/impl/SubBurstPatchDistance.cuh> #include <faiss/gpu/utils/ConversionOperators.cuh> #include <faiss/gpu/utils/CopyUtils.cuh> #include <faiss/gpu/utils/DeviceTensor.cuh> namespace faiss { namespace gpu { template <typename T> void bfSubBurstNnfConvert(GpuResourcesProvider* prov, const GpuSubBurstNnfDistanceParams& args) { // Validate the input data // FAISS_THROW_IF_NOT_MSG( // args.k > 0 || args.k == -1, // "bfSubBurstNnf: k must be > 0 for top-k reduction, " // "or -1 for all pairwise distances"); // FAISS_THROW_IF_NOT_MSG(args.dims > 0, "bfSubBurstNnf: dims must be > 0"); // FAISS_THROW_IF_NOT_MSG( // args.numVectors > 0, "bfSubBurstNnf: numVectors must be > 0"); // FAISS_THROW_IF_NOT_MSG( // args.vectors, "bfSubBurstNnf: vectors must be provided (passed null)"); // FAISS_THROW_IF_NOT_MSG( // args.numQueries > 0, "bfSubBurstNnf: numQueries must be > 0"); // FAISS_THROW_IF_NOT_MSG( // args.queries, "bfSubBurstNnf: queries must be provided (passed null)"); FAISS_THROW_IF_NOT_MSG( args.outDistances, "bfSubBurstNnf: outDistances must be provided (passed null)"); FAISS_THROW_IF_NOT_MSG( args.outIndices || args.k == -1, "bfSubBurstNnf: outIndices must be provided (passed null)"); // Don't let the resources go out of scope // std::cout << "about to get res" << std::endl; auto resImpl = prov->getResources(); auto res = resImpl.get(); // std::cout << "res" << std::endl; auto device = getCurrentDevice(); auto stream = res->getDefaultStreamCurrentDevice(); // std::cout << "Got the Stream!" << std::endl; int psHalf = std::floor(args.ps/2); int pad = std::floor(args.ps/2) + std::floor(args.nblocks/2); auto burst = toDeviceTemporary<T, 4>(res,device, const_cast<T*>(reinterpret_cast<const T*> (args.burst)), stream, {args.sub_t,args.c,args.h+2*pad,args.w+2*pad}); auto subAve = toDeviceTemporary<T, 3>(res,device, const_cast<T*>( reinterpret_cast<const T*> (args.subAve)), stream, {args.c,args.h+2*psHalf,args.w+2*psHalf}); auto blockLabels = toDeviceTemporary<int, 5>(res,device, args.blockLabels, stream, {args.nblocks_total, args.h+2*psHalf,args.w+2*psHalf, args.sub_t,2}); auto mask = toDeviceTemporary<bool, 4>(res,device, const_cast<bool*>( reinterpret_cast<const bool*> (args.mask)), stream, {args.nblocks_total, args.h+2*psHalf,args.w+2*psHalf, args.sub_t}); auto tOutDistances = toDeviceTemporary<float, 3>( res, device, args.outDistances, stream, {args.h,args.w,args.k}); if (args.outIndicesType == IndicesDataType::I64) { // The brute-force API only supports an interface for i32 indices only, // so we must create an output i32 buffer then convert back DeviceTensor<int, 5, true> tOutIntIndices(res, makeTempAlloc(AllocType::Other, stream), {args.sub_t, args.h, args.w, args.k, 2}); // Since we've guaranteed that all arguments are on device, call the // implementation bfSubBurstNnfOnDevice<T>( res, device, stream, burst, subAve, blockLabels, mask, args.k, args.t, args.h, args.w, args.c, args.ps, args.nblocks, args.valMean, args.metric, args.metricArg, tOutDistances, tOutIntIndices, args.ignoreOutDistances); // Convert and copy int indices out auto tOutIndices = toDeviceTemporary<Index::idx_t, 5>(res,device, (Index::idx_t*) args.outIndices, stream, {args.sub_t, args.h, args.w, args.k, 2}); // Convert int to idx_t convertTensor<int, Index::idx_t, 5>(stream, tOutIntIndices, tOutIndices); // Copy back if necessary fromDevice<Index::idx_t, 5>(tOutIndices, (Index::idx_t*)args.outIndices, stream); } else if (args.outIndicesType == IndicesDataType::I32) { // We can use the brute-force API directly, as it takes i32 indices // FIXME: convert to int32_t everywhere? static_assert(sizeof(int) == 4, ""); auto tOutIntIndices = toDeviceTemporary<int, 5>(res,device, (int*)args.outIndices, stream, {args.sub_t, args.h, args.w, args.k, 2}); // Since we've guaranteed that all arguments are on device, call the // implementation bfSubBurstNnfOnDevice<T>( res, device, stream, burst, subAve, blockLabels, mask, args.k, args.t, args.h, args.w, args.c, args.ps, args.nblocks, args.valMean, args.metric, args.metricArg, tOutDistances, tOutIntIndices, args.ignoreOutDistances); // Copy back if necessary fromDevice<int, 5>(tOutIntIndices, (int*)args.outIndices, stream); } else { FAISS_THROW_MSG("unknown outIndicesType"); } // Copy distances back if necessary fromDevice<float, 3>(tOutDistances, args.outDistances, stream); } void bfSubBurstNnf(GpuResourcesProvider* res, const GpuSubBurstNnfDistanceParams& args) { // For now, both vectors and queries must be of the same data type if (args.dType == DistanceDataType::F32) { bfSubBurstNnfConvert<float>(res, args); } else if (args.dType == DistanceDataType::F16) { bfSubBurstNnfConvert<half>(res, args); } else { FAISS_THROW_MSG("unknown vectorType"); } } } }
2c5c36807667aecdf6969d1c862bbb96c15dd367.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cstdlib> #include <time.h> #include "CImg.h" using namespace std; using namespace cimg_library; #include <iostream> #define BLOCK_SIZE 32 #define WA 512 #define HA 512 #define HC 3 #define WC 3 #define WB (WA - WC + 1) #define HB (HA - HC + 1) using namespace std; __global__ void Convolucion(float* A, float* B, float* C, int numARows, int numACols, int numBRows, int numBCols, int numCRows, int numCCols) { int col = blockIdx.x * (BLOCK_SIZE - WC + 1) + threadIdx.x; int row = blockIdx.y * (BLOCK_SIZE - WC + 1) + threadIdx.y; int row_i = row - WC + 1; int col_i = col - WC + 1; float tmp = 0; __shared__ float shm[BLOCK_SIZE][BLOCK_SIZE]; if (row_i < WA && row_i >= 0 && col_i < WA && col_i >= 0) { shm[threadIdx.y][threadIdx.x] = A[col_i * WA + row_i]; } else { shm[threadIdx.y][threadIdx.x] = 0; } __syncthreads(); if (threadIdx.y < (BLOCK_SIZE - WC + 1) && threadIdx.x < (BLOCK_SIZE - WC + 1) && row < (WB - WC + 1) && col < (WB - WC + 1)) { for (int i = 0; i< WC;i++) for (int j = 0;j<WC;j++) tmp += shm[threadIdx.y + i][threadIdx.x + j] * C[j*WC + i]; B[col*WB + row] = tmp; } } int main(int argc, char** argv) { CImg<unsigned char> src("a.png"); int width = src.width(); int height = src.height(); unsigned long size = src.size(); int A[width][width]; hipError_t error; hipEvent_t start_G, stop_G; hipEventCreate(&start_G); hipEventCreate(&stop_G); unsigned int size_A = WA * HA; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*)malloc(mem_size_A); unsigned int size_B = WB * HB; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*)malloc(mem_size_B); unsigned int size_C = WC * HC; unsigned int mem_size_C = sizeof(float) * size_C; float* h_C = (float*)malloc(mem_size_C); //Sobel derivada en x /*h_C[0]=-1;h_C[1]=0;h_C[2]=1; h_C[3]=-2;h_C[4]=0;h_C[5]=2; h_C[6]=-1;h_C[7]=0;h_C[8]=1;*/ //Sobel derivada en y /*h_C[0]=-1;h_C[1]=-2;h_C[2]=-1; h_C[3]=0;h_C[4]=0;h_C[5]=0; h_C[6]=1;h_C[7]=2;h_C[8]=1;*/ //filtro de prewit derivada en x /*h_C[0]=-1;h_C[1]=0;h_C[2]=1; h_C[3]=-1;h_C[4]=0;h_C[5]=1; h_C[6]=-1;h_C[7]=0;h_C[8]=1;*/ //filtro de scharr derivada en x /*h_C[0]=-3;h_C[1]=0;h_C[2]=3; h_C[3]=-10;h_C[4]=0;h_C[5]=10; h_C[6]=-1;h_C[7]=0;h_C[8]=1;*/ //filtro de laplace h_C[0]=-1;h_C[1]=-1;h_C[2]=-1; h_C[3]=-1;h_C[4]=8;h_C[5]=-1; h_C[6]=-1;h_C[7]=-1;h_C[8]=-1; //int B[xF][yF]={{0,0,0},{0,1,0},{0,0,0}}; int n=0; for (int x=0;x<width;x++){ for (int y=0;y<width;y++){ unsigned char r =(float)src(x,y,0,0); unsigned char g =(float)src(x,y,0,1); unsigned char b =(float)src(x,y,0,2); int _gray = (unsigned int)((float)(r + g + b) / 3.0f + 0.5); src(x,y,0,0)=_gray; src(x,y,0,1)=_gray; src(x,y,0,2)=_gray; A[x][y] =_gray; h_A[n]=_gray; //cout<<_gray<<" "; n++; } //cout<<endl; } src.display();// imagen transformada en esacala de grises float* d_A; float* d_B; float* d_C; hipMalloc((void**)&d_A, mem_size_A); hipMalloc((void**)&d_B, mem_size_B); hipMalloc((void**)&d_C, mem_size_C); hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice); hipMemcpy(d_C, h_C, mem_size_C, hipMemcpyHostToDevice); dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((WB - 1) / (BLOCK_SIZE - WC + 1), (WB - 1) / (BLOCK_SIZE - WC + 1)); Convolucion << < grid, threads >> >(d_A, d_B, d_C, HA, WA, HB, WB, HC, WC); hipMemcpy(h_B, d_B, mem_size_B, hipMemcpyDeviceToHost); for (int i = 0;i < HB;i++) { for (int j = 0;j < WB;j++) { src(i,j,0,0)=h_B[i*HB + j]; src(i,j,0,1)=h_B[i*HB + j]; src(i,j,0,2)=h_B[i*HB + j]; //cout<<h_B[i*HB + j]<<" "; } } free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); src.display(); return EXIT_SUCCESS; }
2c5c36807667aecdf6969d1c862bbb96c15dd367.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cstdlib> #include <time.h> #include "CImg.h" using namespace std; using namespace cimg_library; #include <iostream> #define BLOCK_SIZE 32 #define WA 512 #define HA 512 #define HC 3 #define WC 3 #define WB (WA - WC + 1) #define HB (HA - HC + 1) using namespace std; __global__ void Convolucion(float* A, float* B, float* C, int numARows, int numACols, int numBRows, int numBCols, int numCRows, int numCCols) { int col = blockIdx.x * (BLOCK_SIZE - WC + 1) + threadIdx.x; int row = blockIdx.y * (BLOCK_SIZE - WC + 1) + threadIdx.y; int row_i = row - WC + 1; int col_i = col - WC + 1; float tmp = 0; __shared__ float shm[BLOCK_SIZE][BLOCK_SIZE]; if (row_i < WA && row_i >= 0 && col_i < WA && col_i >= 0) { shm[threadIdx.y][threadIdx.x] = A[col_i * WA + row_i]; } else { shm[threadIdx.y][threadIdx.x] = 0; } __syncthreads(); if (threadIdx.y < (BLOCK_SIZE - WC + 1) && threadIdx.x < (BLOCK_SIZE - WC + 1) && row < (WB - WC + 1) && col < (WB - WC + 1)) { for (int i = 0; i< WC;i++) for (int j = 0;j<WC;j++) tmp += shm[threadIdx.y + i][threadIdx.x + j] * C[j*WC + i]; B[col*WB + row] = tmp; } } int main(int argc, char** argv) { CImg<unsigned char> src("a.png"); int width = src.width(); int height = src.height(); unsigned long size = src.size(); int A[width][width]; cudaError_t error; cudaEvent_t start_G, stop_G; cudaEventCreate(&start_G); cudaEventCreate(&stop_G); unsigned int size_A = WA * HA; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*)malloc(mem_size_A); unsigned int size_B = WB * HB; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*)malloc(mem_size_B); unsigned int size_C = WC * HC; unsigned int mem_size_C = sizeof(float) * size_C; float* h_C = (float*)malloc(mem_size_C); //Sobel derivada en x /*h_C[0]=-1;h_C[1]=0;h_C[2]=1; h_C[3]=-2;h_C[4]=0;h_C[5]=2; h_C[6]=-1;h_C[7]=0;h_C[8]=1;*/ //Sobel derivada en y /*h_C[0]=-1;h_C[1]=-2;h_C[2]=-1; h_C[3]=0;h_C[4]=0;h_C[5]=0; h_C[6]=1;h_C[7]=2;h_C[8]=1;*/ //filtro de prewit derivada en x /*h_C[0]=-1;h_C[1]=0;h_C[2]=1; h_C[3]=-1;h_C[4]=0;h_C[5]=1; h_C[6]=-1;h_C[7]=0;h_C[8]=1;*/ //filtro de scharr derivada en x /*h_C[0]=-3;h_C[1]=0;h_C[2]=3; h_C[3]=-10;h_C[4]=0;h_C[5]=10; h_C[6]=-1;h_C[7]=0;h_C[8]=1;*/ //filtro de laplace h_C[0]=-1;h_C[1]=-1;h_C[2]=-1; h_C[3]=-1;h_C[4]=8;h_C[5]=-1; h_C[6]=-1;h_C[7]=-1;h_C[8]=-1; //int B[xF][yF]={{0,0,0},{0,1,0},{0,0,0}}; int n=0; for (int x=0;x<width;x++){ for (int y=0;y<width;y++){ unsigned char r =(float)src(x,y,0,0); unsigned char g =(float)src(x,y,0,1); unsigned char b =(float)src(x,y,0,2); int _gray = (unsigned int)((float)(r + g + b) / 3.0f + 0.5); src(x,y,0,0)=_gray; src(x,y,0,1)=_gray; src(x,y,0,2)=_gray; A[x][y] =_gray; h_A[n]=_gray; //cout<<_gray<<" "; n++; } //cout<<endl; } src.display();// imagen transformada en esacala de grises float* d_A; float* d_B; float* d_C; cudaMalloc((void**)&d_A, mem_size_A); cudaMalloc((void**)&d_B, mem_size_B); cudaMalloc((void**)&d_C, mem_size_C); cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice); cudaMemcpy(d_C, h_C, mem_size_C, cudaMemcpyHostToDevice); dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((WB - 1) / (BLOCK_SIZE - WC + 1), (WB - 1) / (BLOCK_SIZE - WC + 1)); Convolucion << < grid, threads >> >(d_A, d_B, d_C, HA, WA, HB, WB, HC, WC); cudaMemcpy(h_B, d_B, mem_size_B, cudaMemcpyDeviceToHost); for (int i = 0;i < HB;i++) { for (int j = 0;j < WB;j++) { src(i,j,0,0)=h_B[i*HB + j]; src(i,j,0,1)=h_B[i*HB + j]; src(i,j,0,2)=h_B[i*HB + j]; //cout<<h_B[i*HB + j]<<" "; } } free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); src.display(); return EXIT_SUCCESS; }
dc9599a7a596c3802150614016b3b13f5c646c05.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/native/hip/UpSample.cuh> namespace at { namespace native { namespace { template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_bicubic2d_out_frame( const int num_elements, const accscalar_t height_scale, const accscalar_t width_scale, const bool align_corners, const PackedTensorAccessor64<scalar_t, 4> idata, PackedTensorAccessor64<scalar_t, 4> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int input_height = idata.size(2); const int input_width = idata.size(3); const int output_height = odata.size(2); const int output_width = odata.size(3); if (index >= num_elements) { return; } // Special case: input and output are the same size, just copy const int output_x = index % output_width; const int output_y = index / output_width; if (input_height == output_height && input_width == output_width) { for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; c++) { const scalar_t val = idata[n][c][output_y][output_x]; odata[n][c][output_y][output_x] = val; } } return; } // Interpolation kernel accscalar_t real_x = area_pixel_compute_source_index( width_scale, output_x, align_corners, /*cubic=*/true); int in_x = floorf(real_x); accscalar_t t_x = real_x - in_x; accscalar_t real_y = area_pixel_compute_source_index( height_scale, output_y, align_corners, /*cubic=*/true); int in_y = floorf(real_y); accscalar_t t_y = real_y - in_y; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; c++) { accscalar_t coefficients[4]; for (int k = 0; k < 4; k++) { coefficients[k] = cubic_interp1d( upsample_get_value_bounded<scalar_t>( idata, n, c, input_height, input_width, in_y - 1 + k, in_x - 1), upsample_get_value_bounded<scalar_t>( idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 0), upsample_get_value_bounded<scalar_t>( idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 1), upsample_get_value_bounded<scalar_t>( idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 2), t_x); } odata[n][c][output_y][output_x] = static_cast<scalar_t>(cubic_interp1d( coefficients[0], coefficients[1], coefficients[2], coefficients[3], t_y)); } } } // Backward (adjoint) operation 1 <- 2 (accumulates) template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_bicubic2d_backward_out_frame( const int num_elements, const accscalar_t height_scale, const accscalar_t width_scale, const bool align_corners, PackedTensorAccessor64<scalar_t, 4> idata, const PackedTensorAccessor64<scalar_t, 4> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int input_height = idata.size(2); const int input_width = idata.size(3); const int output_height = odata.size(2); const int output_width = odata.size(3); if (index >= num_elements) { return; } const int output_x = index % output_width; const int output_y = index / output_width; // special case: output_xust copy if (input_height == output_height && input_width == output_width) { for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = odata[n][c][output_y][output_x]; idata[n][c][output_y][output_x] = val; } } return; } accscalar_t real_x = area_pixel_compute_source_index( width_scale, output_x, align_corners, /*cubic=*/true); int input_x = floorf(real_x); accscalar_t t_x = real_x - input_x; accscalar_t real_y = area_pixel_compute_source_index( height_scale, output_y, align_corners, /*cubic=*/true); int input_y = floorf(real_y); accscalar_t t_y = real_y - input_y; accscalar_t x_coeffs[4]; accscalar_t y_coeffs[4]; get_cubic_upsampling_coefficients(x_coeffs, t_x); get_cubic_upsampling_coefficients(y_coeffs, t_y); for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { scalar_t out_value = odata[n][c][output_y][output_x]; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { upsample_increment_value_bounded<scalar_t, accscalar_t>( idata, n, c, input_height, input_width, input_y - 1 + i, input_x - 1 + j, out_value * y_coeffs[i] * x_coeffs[j]); } } } } } static void upsample_bicubic2d_out_cuda_template( const Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU(__func__, {input_arg, output_arg}); int output_height = output_size[0]; int output_width = output_size[1]; int nbatch = input.size(0); int channels = input.size(1); int input_height = input.size(2); int input_width = input.size(3); output.zero_(); const int num_output_elements = output_height * output_width; const int max_threads = ::min( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); // Launch kernel hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "upsample_bicubic2d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.packed_accessor64<scalar_t, 4>(); auto odata = output.packed_accessor64<scalar_t, 4>(); // Get scaling factors const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>( input_height, output_height, align_corners, scales_h); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners, scales_w); hipLaunchKernelGGL(( upsample_bicubic2d_out_frame<scalar_t, accscalar_t>) , dim3(cuda::ATenCeilDiv(num_output_elements, max_threads)), dim3(max_threads), 0, stream, num_output_elements, rheight, rwidth, align_corners, idata, odata); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } static void upsample_bicubic2d_backward_out_cuda_template( const Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU(__func__, {grad_output_arg, grad_input_arg}); int output_height = output_size[0]; int output_width = output_size[1]; int nbatch = input_size[0]; int channels = input_size[1]; int input_height = input_size[2]; int input_width = input_size[3]; Tensor grad_output = grad_output_.contiguous(); grad_input.zero_(); const int num_kernels = output_height * output_width; const int num_threads = ::min( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "upsample_bicubic2d_backward_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.packed_accessor64<scalar_t, 4>(); auto odata = grad_output.packed_accessor64<scalar_t, 4>(); const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>( input_height, output_height, align_corners, scales_h); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners, scales_w); hipLaunchKernelGGL(( upsample_bicubic2d_backward_out_frame<scalar_t, accscalar_t>) , dim3(cuda::ATenCeilDiv(num_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, rheight, rwidth, align_corners, idata, odata); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } } // namespace TORCH_IMPL_FUNC(upsample_bicubic2d_out_cuda) ( const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, const Tensor& output) { upsample_bicubic2d_out_cuda_template(output, input, output_size, align_corners, scales_h, scales_w); } TORCH_IMPL_FUNC(upsample_bicubic2d_backward_out_cuda) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, const Tensor& grad_input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("upsample_bicubic2d_backward_out_cuda"); upsample_bicubic2d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w); } } // namespace native } // namespace at
dc9599a7a596c3802150614016b3b13f5c646c05.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/native/cuda/UpSample.cuh> namespace at { namespace native { namespace { template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_bicubic2d_out_frame( const int num_elements, const accscalar_t height_scale, const accscalar_t width_scale, const bool align_corners, const PackedTensorAccessor64<scalar_t, 4> idata, PackedTensorAccessor64<scalar_t, 4> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int input_height = idata.size(2); const int input_width = idata.size(3); const int output_height = odata.size(2); const int output_width = odata.size(3); if (index >= num_elements) { return; } // Special case: input and output are the same size, just copy const int output_x = index % output_width; const int output_y = index / output_width; if (input_height == output_height && input_width == output_width) { for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; c++) { const scalar_t val = idata[n][c][output_y][output_x]; odata[n][c][output_y][output_x] = val; } } return; } // Interpolation kernel accscalar_t real_x = area_pixel_compute_source_index( width_scale, output_x, align_corners, /*cubic=*/true); int in_x = floorf(real_x); accscalar_t t_x = real_x - in_x; accscalar_t real_y = area_pixel_compute_source_index( height_scale, output_y, align_corners, /*cubic=*/true); int in_y = floorf(real_y); accscalar_t t_y = real_y - in_y; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; c++) { accscalar_t coefficients[4]; for (int k = 0; k < 4; k++) { coefficients[k] = cubic_interp1d( upsample_get_value_bounded<scalar_t>( idata, n, c, input_height, input_width, in_y - 1 + k, in_x - 1), upsample_get_value_bounded<scalar_t>( idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 0), upsample_get_value_bounded<scalar_t>( idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 1), upsample_get_value_bounded<scalar_t>( idata, n, c, input_height, input_width, in_y - 1 + k, in_x + 2), t_x); } odata[n][c][output_y][output_x] = static_cast<scalar_t>(cubic_interp1d( coefficients[0], coefficients[1], coefficients[2], coefficients[3], t_y)); } } } // Backward (adjoint) operation 1 <- 2 (accumulates) template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_bicubic2d_backward_out_frame( const int num_elements, const accscalar_t height_scale, const accscalar_t width_scale, const bool align_corners, PackedTensorAccessor64<scalar_t, 4> idata, const PackedTensorAccessor64<scalar_t, 4> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int input_height = idata.size(2); const int input_width = idata.size(3); const int output_height = odata.size(2); const int output_width = odata.size(3); if (index >= num_elements) { return; } const int output_x = index % output_width; const int output_y = index / output_width; // special case: output_xust copy if (input_height == output_height && input_width == output_width) { for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = odata[n][c][output_y][output_x]; idata[n][c][output_y][output_x] = val; } } return; } accscalar_t real_x = area_pixel_compute_source_index( width_scale, output_x, align_corners, /*cubic=*/true); int input_x = floorf(real_x); accscalar_t t_x = real_x - input_x; accscalar_t real_y = area_pixel_compute_source_index( height_scale, output_y, align_corners, /*cubic=*/true); int input_y = floorf(real_y); accscalar_t t_y = real_y - input_y; accscalar_t x_coeffs[4]; accscalar_t y_coeffs[4]; get_cubic_upsampling_coefficients(x_coeffs, t_x); get_cubic_upsampling_coefficients(y_coeffs, t_y); for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { scalar_t out_value = odata[n][c][output_y][output_x]; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { upsample_increment_value_bounded<scalar_t, accscalar_t>( idata, n, c, input_height, input_width, input_y - 1 + i, input_x - 1 + j, out_value * y_coeffs[i] * x_coeffs[j]); } } } } } static void upsample_bicubic2d_out_cuda_template( const Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU(__func__, {input_arg, output_arg}); int output_height = output_size[0]; int output_width = output_size[1]; int nbatch = input.size(0); int channels = input.size(1); int input_height = input.size(2); int input_width = input.size(3); output.zero_(); const int num_output_elements = output_height * output_width; const int max_threads = std::min( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); // Launch kernel cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "upsample_bicubic2d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.packed_accessor64<scalar_t, 4>(); auto odata = output.packed_accessor64<scalar_t, 4>(); // Get scaling factors const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>( input_height, output_height, align_corners, scales_h); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners, scales_w); upsample_bicubic2d_out_frame<scalar_t, accscalar_t> <<<cuda::ATenCeilDiv(num_output_elements, max_threads), max_threads, 0, stream>>>( num_output_elements, rheight, rwidth, align_corners, idata, odata); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } static void upsample_bicubic2d_backward_out_cuda_template( const Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU(__func__, {grad_output_arg, grad_input_arg}); int output_height = output_size[0]; int output_width = output_size[1]; int nbatch = input_size[0]; int channels = input_size[1]; int input_height = input_size[2]; int input_width = input_size[3]; Tensor grad_output = grad_output_.contiguous(); grad_input.zero_(); const int num_kernels = output_height * output_width; const int num_threads = std::min( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "upsample_bicubic2d_backward_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.packed_accessor64<scalar_t, 4>(); auto odata = grad_output.packed_accessor64<scalar_t, 4>(); const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>( input_height, output_height, align_corners, scales_h); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners, scales_w); upsample_bicubic2d_backward_out_frame<scalar_t, accscalar_t> <<<cuda::ATenCeilDiv(num_kernels, num_threads), num_threads, 0, stream>>>( num_kernels, rheight, rwidth, align_corners, idata, odata); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } } // namespace TORCH_IMPL_FUNC(upsample_bicubic2d_out_cuda) ( const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, const Tensor& output) { upsample_bicubic2d_out_cuda_template(output, input, output_size, align_corners, scales_h, scales_w); } TORCH_IMPL_FUNC(upsample_bicubic2d_backward_out_cuda) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, const Tensor& grad_input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("upsample_bicubic2d_backward_out_cuda"); upsample_bicubic2d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w); } } // namespace native } // namespace at
d326a470a483fe5e171614a73252f76ae7256a28.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define N 5 void addMatrices(float *h_A, float *h_B, float *h_C); void fillMatrix(float *h_A); void printMatrix(float *A); int main(int argc, char const *argv[]) { float *h_A = (float *) malloc(N * N * sizeof(float)); float *h_B = (float *) malloc(N * N * sizeof(float)); float *h_C = (float *) malloc(N * N * sizeof(float)); fillMatrix(h_A); fillMatrix(h_B); addMatrices(h_A, h_B, h_C); printMatrix(h_C); free(h_A); free(h_B); free(h_C); return 0; } __global__ void matAddKernel(float *d_A, float *d_B, float *d_C, int size) { int i = threadIdx.x + blockDim.x * blockIdx.x; int element; if (i < size) { for (int j = 0; j < size; j++) { element = j * size + i; d_C[element] = d_A[element] + d_B[element]; //printf("Element %d from thread %d\n", element, i); } } } void fillMatrix(float *h_A) { int size = N * N; for (int i = 0; i < size; i++) { h_A[i] = i + 1; } } void addMatrices(float *h_A, float *h_B, float *h_C) { int size = N * N; int d_size = size * sizeof(float); float *d_A, *d_B, *d_C; // Allocate device memory for A, B, and C // copy h_A and h_B to device memory hipError_t err = hipMalloc((void**) &d_A, d_size); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } hipMemcpy(d_A, h_A, d_size, hipMemcpyHostToDevice); err = hipMalloc((void**) &d_B, d_size); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } hipMemcpy(d_B, h_B, d_size, hipMemcpyHostToDevice); err = hipMalloc((void**) &d_C, d_size); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } // Kernel launch code - to have the device to perform the actual matrix addition hipLaunchKernelGGL(( matAddKernel), dim3(ceil((N)/256.0)), dim3(256), 0, 0, d_A, d_B, d_C, N); // copy C from the device memory hipMemcpy(h_C, d_C, d_size, hipMemcpyDeviceToHost); // Free device vector (which represents our matrices) hipFree(d_A); hipFree(d_B); hipFree(d_C); } void printMatrix(float *A) { int size = N * N; for (int i = 0; i < size; i++) { if (i % N == 0 && i != 0) printf("\n"); printf("%d\t", (int)A[i]); } }
d326a470a483fe5e171614a73252f76ae7256a28.cu
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #define N 5 void addMatrices(float *h_A, float *h_B, float *h_C); void fillMatrix(float *h_A); void printMatrix(float *A); int main(int argc, char const *argv[]) { float *h_A = (float *) malloc(N * N * sizeof(float)); float *h_B = (float *) malloc(N * N * sizeof(float)); float *h_C = (float *) malloc(N * N * sizeof(float)); fillMatrix(h_A); fillMatrix(h_B); addMatrices(h_A, h_B, h_C); printMatrix(h_C); free(h_A); free(h_B); free(h_C); return 0; } __global__ void matAddKernel(float *d_A, float *d_B, float *d_C, int size) { int i = threadIdx.x + blockDim.x * blockIdx.x; int element; if (i < size) { for (int j = 0; j < size; j++) { element = j * size + i; d_C[element] = d_A[element] + d_B[element]; //printf("Element %d from thread %d\n", element, i); } } } void fillMatrix(float *h_A) { int size = N * N; for (int i = 0; i < size; i++) { h_A[i] = i + 1; } } void addMatrices(float *h_A, float *h_B, float *h_C) { int size = N * N; int d_size = size * sizeof(float); float *d_A, *d_B, *d_C; // Allocate device memory for A, B, and C // copy h_A and h_B to device memory cudaError_t err = cudaMalloc((void**) &d_A, d_size); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } cudaMemcpy(d_A, h_A, d_size, cudaMemcpyHostToDevice); err = cudaMalloc((void**) &d_B, d_size); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } cudaMemcpy(d_B, h_B, d_size, cudaMemcpyHostToDevice); err = cudaMalloc((void**) &d_C, d_size); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } // Kernel launch code - to have the device to perform the actual matrix addition matAddKernel<<<ceil((N)/256.0), 256>>>(d_A, d_B, d_C, N); // copy C from the device memory cudaMemcpy(h_C, d_C, d_size, cudaMemcpyDeviceToHost); // Free device vector (which represents our matrices) cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } void printMatrix(float *A) { int size = N * N; for (int i = 0; i < size; i++) { if (i % N == 0 && i != 0) printf("\n"); printf("%d\t", (int)A[i]); } }
eb662aed724bb309c78db28f1a1f8fe42f4dfa64.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define BLOCKSIZE 1024 #define MAXIT 1 #define TOTROWS (BLOCKSIZE*8) #define TOTCOLS (BLOCKSIZE*8) #define NOTSETLOC -1 // for cells that are not fixed #define QMAX(x,y) (((x) > (y))? (x): (y)) int *lkeepgoing; float *iplate; float *oplate; float *fixed; float *tmp; int ncols, nrows; double When(); void Compute(); int main(int argc, char *argv[]) { double t0, tottime; ncols = TOTCOLS; nrows = TOTROWS; hipMalloc((void **) &lkeepgoing, nrows * ncols * sizeof(int)); hipMalloc((void **) &iplate, nrows * ncols * sizeof(float)); hipMalloc((void **) &oplate, nrows * ncols * sizeof(float)); hipMalloc((void **) &fixed, nrows * ncols * sizeof(float)); fprintf(stderr,"Memory allocated\n"); t0 = When(); /* Now proceed with the Jacobi algorithm */ Compute(); tottime = When() - t0; printf("Total Time is: %lf sec.\n", tottime); return 0; } __global__ void InitArrays(float *ip, float *op, float *fp, int *kp, int ncols) { int i; float *fppos, *oppos, *ippos; int *kppos; int blockOffset; int rowStartPos; int colsPerThread; // Each block gets a row, each thread will fill part of a row // Calculate the offset of the row blockOffset = blockIdx.x * ncols; // Calculate our offset into the row rowStartPos = threadIdx.x * (ncols/blockDim.x); // The number of cols per thread colsPerThread = ncols/blockDim.x; ippos = ip + blockOffset+ rowStartPos; fppos = fp + blockOffset+ rowStartPos; oppos = op + blockOffset+ rowStartPos; kppos = kp + blockOffset+ rowStartPos; for (i = 0; i < colsPerThread; i++) { fppos[i] = NOTSETLOC; // Not Fixed ippos[i] = 50; oppos[i] = 50; kppos[i] = 1; // Keep Going } // Insert code to set the rest of the boundary and fixed positions } __global__ void doCalc(float *iplate, float *oplate, int ncols) { /* Compute the 5 point stencil for my region */ } __global__ void doCheck(float *iplate, float *oplate, float *fixed, int *lkeepgoing, int ncols) { // Calculate keepgoing array } __global__ void reduceSingle(int *idata, int *single, int nrows) { // Reduce rows to the first element in each row int i; int rowStartPos; int colsPerThread; int *mypart; extern __shared__ int parts[]; // Each block gets a row, each thread will reduce part of a row // Calculate our offset into the row rowStartPos = threadIdx.x * (nrows/blockDim.x); // The number of cols per thread colsPerThread = nrows/blockDim.x; //mypart = idata + blockOffset + rowStartPos; // sum my part of one dim array parts[threadIdx.x] = 0; for (i = rowStartPos; i < colsPerThread + start; i++) { parts[threadIdx.x] += idata[i]; } int tid = threadIdx.x if(tid <512) { parts[tid] += parts[tid+512];} __syncthreads(); if(tid <256) { parts[tid] += parts[tid+256];} __syncthreads(); if(tid <128) { parts[tid] += parts[tid+128];} __syncthreads(); if(tid <64) { parts[tid] += parts[tid+64];} __syncthreads(); if(tid <32) { parts[tid] += parts[tid+32];} __syncthreads(); if(tid == 0) { for(i=0;i<32;i++) { *single += parts[i]; } } } __global__ void reduceSum(int *idata, int *odata, unsigned int ncols) { // Reduce rows to the first element in each row int i; int blockOffset; int rowStartPos; int colsPerThread; int *mypart; // Each block gets a row, each thread will reduce part of a row // Calculate the offset of the row blockOffset = blockIdx.x * ncols; // Calculate our offset into the row rowStartPos = threadIdx.x * (ncols/blockDim.x); // The number of cols per thread colsPerThread = ncols/blockDim.x; mypart = idata + blockOffset + rowStartPos; // Sum all of the elements in my thread block and put them // into the first column spot for (i = 1; i < colsPerThread; i++) { mypart[0] += mypart[i]; } __syncthreads(); // Wait for everyone to complete // Now reduce all of the threads in my block into the first spot for my row if(threadIdx.x == 0) { odata[blockIdx.x] = 0; for(i = 0; i < blockDim.x; i++) { odata[blockIdx.x] += mypart[i*colsPerThread]; } } // We cant synchronize between blocks, so we will have to start another kernel } void Compute() { int *keepgoing_single; int *keepgoing_sums; int keepgoing; int blocksize = BLOCKSIZE; int iteration; ncols = TOTCOLS; nrows = TOTROWS; // One block per row hipLaunchKernelGGL(( InitArrays), dim3(nrows), dim3(blocksize) , 0, 0, iplate, oplate, fixed, lkeepgoing, ncols); hipMalloc((void **)&keepgoing_single, 1 * sizeof(int)); keepgoing = 1; hipMalloc((void **)&keepgoing_sums, nrows * sizeof(int)); int *peek = (int *)malloc(nrows*sizeof(int)); for (iteration = 0; (iteration < MAXIT) && keepgoing; iteration++) { hipLaunchKernelGGL(( doCalc), dim3(nrows), dim3(blocksize) , 0, 0, iplate, oplate, ncols); hipLaunchKernelGGL(( doCheck), dim3(nrows), dim3(blocksize) , 0, 0, iplate, oplate, fixed, lkeepgoing, ncols); //reduce value to first cell in each row` hipLaunchKernelGGL(( reduceSum), dim3(nrows), dim3(blocksize), 0, 0, lkeepgoing, keepgoing_sums, ncols); // hipMemcpy(peek, keepgoing_sums, nrows*sizeof(int), hipMemcpyDeviceToHost); // fprintf(stderr, "after hipMemcpy \n"); // int i; // for(i = 0; i < nrows; i++) { // fprintf(stderr, "%d, ",peek[i]); // } // Now we have the sum for each row in the first column, // reduce to one value double t0 = When(); int singleLoop; for(singleLoop = 0; singleLoop < 1000; singleLoop++) {} hipLaunchKernelGGL(( reduceSingle), dim3(1), dim3(blocksize), blocksize*sizeof(int), 0, keepgoing_sums, keepgoing_single); } printf("reduce single:%f\n", When() - t0); keepgoing = 0; hipMemcpy(&keepgoing, keepgoing_single, 1 * sizeof(int), hipMemcpyDeviceToHost); fprintf(stderr, "keepgoing = %d\n", keepgoing); /* swap the new value pointer with the old value pointer */ tmp = oplate; oplate = iplate; iplate = tmp; } free(peek); hipFree(keepgoing_single); hipFree(keepgoing_sums); fprintf(stderr,"Finished in %d iterations\n", iteration); } /* Return the current time in seconds, using a double precision number. */ double When() { struct timeval tp; gettimeofday(&tp, NULL); return ((double) tp.tv_sec + (double) tp.tv_usec * 1e-6); }
eb662aed724bb309c78db28f1a1f8fe42f4dfa64.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define BLOCKSIZE 1024 #define MAXIT 1 #define TOTROWS (BLOCKSIZE*8) #define TOTCOLS (BLOCKSIZE*8) #define NOTSETLOC -1 // for cells that are not fixed #define QMAX(x,y) (((x) > (y))? (x): (y)) int *lkeepgoing; float *iplate; float *oplate; float *fixed; float *tmp; int ncols, nrows; double When(); void Compute(); int main(int argc, char *argv[]) { double t0, tottime; ncols = TOTCOLS; nrows = TOTROWS; cudaMalloc((void **) &lkeepgoing, nrows * ncols * sizeof(int)); cudaMalloc((void **) &iplate, nrows * ncols * sizeof(float)); cudaMalloc((void **) &oplate, nrows * ncols * sizeof(float)); cudaMalloc((void **) &fixed, nrows * ncols * sizeof(float)); fprintf(stderr,"Memory allocated\n"); t0 = When(); /* Now proceed with the Jacobi algorithm */ Compute(); tottime = When() - t0; printf("Total Time is: %lf sec.\n", tottime); return 0; } __global__ void InitArrays(float *ip, float *op, float *fp, int *kp, int ncols) { int i; float *fppos, *oppos, *ippos; int *kppos; int blockOffset; int rowStartPos; int colsPerThread; // Each block gets a row, each thread will fill part of a row // Calculate the offset of the row blockOffset = blockIdx.x * ncols; // Calculate our offset into the row rowStartPos = threadIdx.x * (ncols/blockDim.x); // The number of cols per thread colsPerThread = ncols/blockDim.x; ippos = ip + blockOffset+ rowStartPos; fppos = fp + blockOffset+ rowStartPos; oppos = op + blockOffset+ rowStartPos; kppos = kp + blockOffset+ rowStartPos; for (i = 0; i < colsPerThread; i++) { fppos[i] = NOTSETLOC; // Not Fixed ippos[i] = 50; oppos[i] = 50; kppos[i] = 1; // Keep Going } // Insert code to set the rest of the boundary and fixed positions } __global__ void doCalc(float *iplate, float *oplate, int ncols) { /* Compute the 5 point stencil for my region */ } __global__ void doCheck(float *iplate, float *oplate, float *fixed, int *lkeepgoing, int ncols) { // Calculate keepgoing array } __global__ void reduceSingle(int *idata, int *single, int nrows) { // Reduce rows to the first element in each row int i; int rowStartPos; int colsPerThread; int *mypart; extern __shared__ int parts[]; // Each block gets a row, each thread will reduce part of a row // Calculate our offset into the row rowStartPos = threadIdx.x * (nrows/blockDim.x); // The number of cols per thread colsPerThread = nrows/blockDim.x; //mypart = idata + blockOffset + rowStartPos; // sum my part of one dim array parts[threadIdx.x] = 0; for (i = rowStartPos; i < colsPerThread + start; i++) { parts[threadIdx.x] += idata[i]; } int tid = threadIdx.x if(tid <512) { parts[tid] += parts[tid+512];} __syncthreads(); if(tid <256) { parts[tid] += parts[tid+256];} __syncthreads(); if(tid <128) { parts[tid] += parts[tid+128];} __syncthreads(); if(tid <64) { parts[tid] += parts[tid+64];} __syncthreads(); if(tid <32) { parts[tid] += parts[tid+32];} __syncthreads(); if(tid == 0) { for(i=0;i<32;i++) { *single += parts[i]; } } } __global__ void reduceSum(int *idata, int *odata, unsigned int ncols) { // Reduce rows to the first element in each row int i; int blockOffset; int rowStartPos; int colsPerThread; int *mypart; // Each block gets a row, each thread will reduce part of a row // Calculate the offset of the row blockOffset = blockIdx.x * ncols; // Calculate our offset into the row rowStartPos = threadIdx.x * (ncols/blockDim.x); // The number of cols per thread colsPerThread = ncols/blockDim.x; mypart = idata + blockOffset + rowStartPos; // Sum all of the elements in my thread block and put them // into the first column spot for (i = 1; i < colsPerThread; i++) { mypart[0] += mypart[i]; } __syncthreads(); // Wait for everyone to complete // Now reduce all of the threads in my block into the first spot for my row if(threadIdx.x == 0) { odata[blockIdx.x] = 0; for(i = 0; i < blockDim.x; i++) { odata[blockIdx.x] += mypart[i*colsPerThread]; } } // We cant synchronize between blocks, so we will have to start another kernel } void Compute() { int *keepgoing_single; int *keepgoing_sums; int keepgoing; int blocksize = BLOCKSIZE; int iteration; ncols = TOTCOLS; nrows = TOTROWS; // One block per row InitArrays<<< nrows, blocksize >>>(iplate, oplate, fixed, lkeepgoing, ncols); cudaMalloc((void **)&keepgoing_single, 1 * sizeof(int)); keepgoing = 1; cudaMalloc((void **)&keepgoing_sums, nrows * sizeof(int)); int *peek = (int *)malloc(nrows*sizeof(int)); for (iteration = 0; (iteration < MAXIT) && keepgoing; iteration++) { doCalc<<< nrows, blocksize >>>(iplate, oplate, ncols); doCheck<<< nrows, blocksize >>>(iplate, oplate, fixed, lkeepgoing, ncols); //reduce value to first cell in each row` reduceSum<<< nrows, blocksize>>>(lkeepgoing, keepgoing_sums, ncols); // cudaMemcpy(peek, keepgoing_sums, nrows*sizeof(int), cudaMemcpyDeviceToHost); // fprintf(stderr, "after cudaMemcpy \n"); // int i; // for(i = 0; i < nrows; i++) { // fprintf(stderr, "%d, ",peek[i]); // } // Now we have the sum for each row in the first column, // reduce to one value double t0 = When(); int singleLoop; for(singleLoop = 0; singleLoop < 1000; singleLoop++) {} reduceSingle<<<1, blocksize, blocksize*sizeof(int)>>>(keepgoing_sums, keepgoing_single); } printf("reduce single:%f\n", When() - t0); keepgoing = 0; cudaMemcpy(&keepgoing, keepgoing_single, 1 * sizeof(int), cudaMemcpyDeviceToHost); fprintf(stderr, "keepgoing = %d\n", keepgoing); /* swap the new value pointer with the old value pointer */ tmp = oplate; oplate = iplate; iplate = tmp; } free(peek); cudaFree(keepgoing_single); cudaFree(keepgoing_sums); fprintf(stderr,"Finished in %d iterations\n", iteration); } /* Return the current time in seconds, using a double precision number. */ double When() { struct timeval tp; gettimeofday(&tp, NULL); return ((double) tp.tv_sec + (double) tp.tv_usec * 1e-6); }
199294b64ddd473407e89693e3d2ee39342efdbe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "utils.h" __global__ void reduce_max_min(const float* const d_in, float* d_out, bool is_max=true) { extern __shared__ float partial[]; int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x + tid; partial[tid] = d_in[idx]; // make sure all data in this block has loaded into shared memory __syncthreads(); for(unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1){ if(tid < stride){ if(is_max) partial[tid] = max(partial[tid], partial[tid+stride]); else partial[tid] = min(partial[tid], partial[tid+stride]); } // make sure all operations at one stage are done! __syncthreads(); } if(tid == 0) d_out[blockIdx.x] = partial[tid]; } void reduce(const float* const d_in,float &min_logLum,float &max_logLum,const size_t numRows,const size_t numCols) { const int BLOCK_SIZE = numCols; const int GRID_SIZE = numRows; // declare GPU memory pointers float * d_intermediate, *d_max, *d_min; // allocate GPU memory hipMalloc((void **) &d_intermediate, GRID_SIZE*sizeof(float)); hipMalloc((void **) &d_max, sizeof(float)); hipMalloc((void **) &d_min, sizeof(float)); // find maximum; // firstly, find the maximum in each block hipLaunchKernelGGL(( reduce_max_min), dim3(GRID_SIZE),dim3(BLOCK_SIZE), BLOCK_SIZE*sizeof(float), 0, d_in, d_intermediate, true); // then, find the global maximum hipLaunchKernelGGL(( reduce_max_min), dim3(1), dim3(GRID_SIZE), GRID_SIZE*sizeof(float), 0, d_intermediate, d_max, true); checkCudaErrors(hipMemset(d_intermediate,0,GRID_SIZE*sizeof(float))); // find minimum; // firstly, find the minimum in each block hipLaunchKernelGGL(( reduce_max_min), dim3(GRID_SIZE),dim3(BLOCK_SIZE), BLOCK_SIZE*sizeof(float), 0, d_in, d_intermediate,false); // then, find the global minimum hipLaunchKernelGGL(( reduce_max_min), dim3(1), dim3(GRID_SIZE), GRID_SIZE*sizeof(float), 0, d_intermediate, d_min, false); // transfer the output to CPU checkCudaErrors(hipMemcpy(&max_logLum, d_max, sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&min_logLum, d_min, sizeof(float), hipMemcpyDeviceToHost)); // free GPU memory location checkCudaErrors(hipFree(d_intermediate)); checkCudaErrors(hipFree(d_max)); checkCudaErrors(hipFree(d_min)); return; } __global__ void hist(const float* const d_in, unsigned int * const d_out, const float logLumRange, const int min_logLum, const int numBins) { int idx = blockIdx.x * blockDim.x + threadIdx.x; float num = d_in[idx]; int bin_idx = (num - min_logLum)/logLumRange*numBins; if(bin_idx >= numBins) bin_idx--; atomicAdd(&(d_out[bin_idx]),1); } __global__ void prefixSum_HS(const unsigned int * const d_in, unsigned int * const d_out) { /* Hillis Steele Scan for d := 1 to log2n do forall k in parallel do if k 2^d then x[out][k] := x[in][k 2^d-1] + x[in][k] else x[out][k] := x[in][k] swap(in,out) This version can handle arrays only as large as can be processed by a single thread block running on one multiprocessor of a GPU */ extern __shared__ unsigned int temp[]; int tid = threadIdx.x; int pout = 0, pin = 1; // exclusicve scan temp[tid] = tid > 0? d_in[tid-1]:0; // make sure all data in this block are loaded into shared shared memory __syncthreads(); for(unsigned int stride = 1; stride < blockDim.x; stride <<= 1){ // swap double buffer indices pout = 1 - pout; pin = 1 - pout; if(tid >= stride) temp[pout*blockDim.x+tid] = temp[pin*blockDim.x+tid] + temp[pin*blockDim.x+tid - stride]; else temp[pout*blockDim.x+tid] = temp[pin*blockDim.x+tid]; // make sure all operations at one stage are done! __syncthreads(); } d_out[tid] = temp[pout*blockDim.x + tid]; } __global__ void prefixSum_BL(const unsigned int * const d_in, unsigned int * const d_out, const int nums) { /* Blelloch Scan : Up-Sweep(reduce) + Down-Sweep Up-Sweep: for d := 0 to log2n - 1 do for k from 0 to n 1 by 2^(d+1) in parallel do x[k + 2^(d + 1) - 1] := x[k + 2^d - 1] + x [k + 2^(d+1) - 1] Down-Sweep: x[n - 1] := 0 for d := log2n down to 0 do for k from 0 to n 1 by 2^(d+1) in parallel do t := x[k + 2^d- 1] x[k + 2^d - 1] := x [k + 2^(d+1) - 1] x[k + 2^(d+1) - 1] := t + x [k + 2^(d+1) - 1] */ extern __shared__ unsigned int temp[]; int tid = threadIdx.x; // exclusicve scan temp[2*tid] = d_in[2*tid]; if(2*tid+1 < nums) temp[2*tid+1] = d_in[2*tid+1]; else temp[2*tid+1] = 0; // make sure all data in this block are loaded into shared memory __syncthreads(); int stride = 1; // reduce step for(unsigned int d = blockDim.x; d > 0; d >>= 1){ if(tid < d){ int idx1 = (2*tid+1)*stride - 1; int idx2 = (2*tid+2)*stride - 1; temp[idx2] += temp[idx1]; } stride *= 2; // make sure all operations at one stage are done! __syncthreads(); } // Downsweep Step // set identity value if(tid == 0) temp[nums-1] = 0; for(unsigned int d = 1; d < nums; d <<= 1){ stride >>= 1; // make sure all operations at one stage are done! __syncthreads(); if( tid < d){ int idx1 = (2*tid+1)*stride - 1; int idx2 = (2*tid+2)*stride - 1; unsigned int tmp = temp[idx1]; temp[idx1] = temp[idx2]; temp[idx2] += tmp; } } // make sure all operations at the last stage are done! __syncthreads(); d_out[2*tid] = temp[2*tid]; if(2*tid+1 < nums) d_out[2*tid+1] = temp[2*tid+1]; } // Scan algorithm from Course : Hetergeneous Parallel Programming __global__ void prefixSum_HPP(const unsigned int * const d_in, unsigned int * const d_out, const int nums) { extern __shared__ unsigned int temp[]; int tid = threadIdx.x; // exclusicve scan if(tid == 0){ temp[2*tid] = 0; temp[2*tid+1] = d_in[2*tid]; } else{ temp[2*tid] = d_in[2*tid-1]; if(2*tid+1 < nums) temp[2*tid+1] = d_in[2*tid]; else temp[2*tid+1] = 0; } // make sure all data in this block are loaded into shared shared memory __syncthreads(); // Reduction Phase for(unsigned int stride = 1; stride <= blockDim.x; stride <<= 1){ // first update all idx == 2n-1, then 4n-1, then 8n-1 ... // finaly 2(blockDim.x/2) * n - 1(only 1 value will be updated partial[blockDim.x-1]) int idx = (tid+1)*stride*2 - 1; if( idx < 2*blockDim.x) temp[idx] += temp[idx-stride]; // make sure all operations at one stage are done! __syncthreads(); } // Example: // After reduction phase , position at 0, 1, 3, 7, ... has their final values (blockDim.x == 8) // then we update values reversely. // first use position 3's value to update position 5(stride == 2 == blockDim.x/4, idx == 3 == (0+1)*2*2-1, only 1 thread do calculation) // then use position 1 to update postion 2 , position 3 to update position 4, position 5 to update position 6 // (stride == 1 == blockDim.x/8, idx == (0+1)*1*2-1=1,(1+1)*1*2-1=3, (2+1)*1*2-1=5, 3 threads do calculation) // Post Reduction Reverse Phase for(unsigned int stride = blockDim.x/2; stride > 0; stride >>= 1){ // first update all idx == 2(blockDim.x/4) * n - 1 + blockDim.x/4, // then 2(blockDim.x/8)n-1+blockDim.x/8, then 2(blockDim.x/16)n-1 + blockDim.x/16... // finaly 2 * n - 1 int idx = (tid+1)*stride*2 - 1; if( idx + stride < 2*blockDim.x) temp[idx + stride] += temp[idx]; // make sure all operations at one stage are done! __syncthreads(); } // exclusive scan d_out[2*tid] = temp[2*tid]; if(2*tid+1 < nums) d_out[2*tid+1] = temp[2*tid+1]; } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ // Step 1 : find minimum and maximum value reduce(d_logLuminance, min_logLum, max_logLum, numRows, numCols); // Step 2: find the range float logLumRange = max_logLum - min_logLum; // Step 3 : generate a histogram of all the values // declare GPU memory pointers unsigned int *d_bins; // allocate GPU memory checkCudaErrors(hipMalloc((void **) &d_bins, numBins*sizeof(unsigned int))); checkCudaErrors(hipMemset(d_bins,0,numBins*sizeof(unsigned int))); hipLaunchKernelGGL(( hist), dim3(numRows), dim3(numCols), 0, 0, d_logLuminance, d_bins, logLumRange, min_logLum, numBins); // Step 4 : prefix sum //prefixSum_HS<<<1, numBins, numBins*sizeof(unsigned int)>>>(d_bins, d_cdf); //prefixSum_HPP<<<1, ceil(numBins/2), numBins*sizeof(unsigned int)>>>(d_bins, d_cdf, numBins); hipLaunchKernelGGL(( prefixSum_BL), dim3(1), dim3(ceil(numBins/2)), numBins*sizeof(unsigned int), 0, d_bins, d_cdf, numBins); // free GPU memory allocation checkCudaErrors(hipFree(d_bins)); }
199294b64ddd473407e89693e3d2ee39342efdbe.cu
/* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "utils.h" __global__ void reduce_max_min(const float* const d_in, float* d_out, bool is_max=true) { extern __shared__ float partial[]; int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x + tid; partial[tid] = d_in[idx]; // make sure all data in this block has loaded into shared memory __syncthreads(); for(unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1){ if(tid < stride){ if(is_max) partial[tid] = max(partial[tid], partial[tid+stride]); else partial[tid] = min(partial[tid], partial[tid+stride]); } // make sure all operations at one stage are done! __syncthreads(); } if(tid == 0) d_out[blockIdx.x] = partial[tid]; } void reduce(const float* const d_in,float &min_logLum,float &max_logLum,const size_t numRows,const size_t numCols) { const int BLOCK_SIZE = numCols; const int GRID_SIZE = numRows; // declare GPU memory pointers float * d_intermediate, *d_max, *d_min; // allocate GPU memory cudaMalloc((void **) &d_intermediate, GRID_SIZE*sizeof(float)); cudaMalloc((void **) &d_max, sizeof(float)); cudaMalloc((void **) &d_min, sizeof(float)); // find maximum; // firstly, find the maximum in each block reduce_max_min<<<GRID_SIZE,BLOCK_SIZE, BLOCK_SIZE*sizeof(float)>>>(d_in, d_intermediate, true); // then, find the global maximum reduce_max_min<<<1, GRID_SIZE, GRID_SIZE*sizeof(float)>>>(d_intermediate, d_max, true); checkCudaErrors(cudaMemset(d_intermediate,0,GRID_SIZE*sizeof(float))); // find minimum; // firstly, find the minimum in each block reduce_max_min<<<GRID_SIZE,BLOCK_SIZE, BLOCK_SIZE*sizeof(float)>>>(d_in, d_intermediate,false); // then, find the global minimum reduce_max_min<<<1, GRID_SIZE, GRID_SIZE*sizeof(float)>>>(d_intermediate, d_min, false); // transfer the output to CPU checkCudaErrors(cudaMemcpy(&max_logLum, d_max, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&min_logLum, d_min, sizeof(float), cudaMemcpyDeviceToHost)); // free GPU memory location checkCudaErrors(cudaFree(d_intermediate)); checkCudaErrors(cudaFree(d_max)); checkCudaErrors(cudaFree(d_min)); return; } __global__ void hist(const float* const d_in, unsigned int * const d_out, const float logLumRange, const int min_logLum, const int numBins) { int idx = blockIdx.x * blockDim.x + threadIdx.x; float num = d_in[idx]; int bin_idx = (num - min_logLum)/logLumRange*numBins; if(bin_idx >= numBins) bin_idx--; atomicAdd(&(d_out[bin_idx]),1); } __global__ void prefixSum_HS(const unsigned int * const d_in, unsigned int * const d_out) { /* Hillis Steele Scan for d := 1 to log2n do forall k in parallel do if k ≥ 2^d then x[out][k] := x[in][k − 2^d-1] + x[in][k] else x[out][k] := x[in][k] swap(in,out) This version can handle arrays only as large as can be processed by a single thread block running on one multiprocessor of a GPU */ extern __shared__ unsigned int temp[]; int tid = threadIdx.x; int pout = 0, pin = 1; // exclusicve scan temp[tid] = tid > 0? d_in[tid-1]:0; // make sure all data in this block are loaded into shared shared memory __syncthreads(); for(unsigned int stride = 1; stride < blockDim.x; stride <<= 1){ // swap double buffer indices pout = 1 - pout; pin = 1 - pout; if(tid >= stride) temp[pout*blockDim.x+tid] = temp[pin*blockDim.x+tid] + temp[pin*blockDim.x+tid - stride]; else temp[pout*blockDim.x+tid] = temp[pin*blockDim.x+tid]; // make sure all operations at one stage are done! __syncthreads(); } d_out[tid] = temp[pout*blockDim.x + tid]; } __global__ void prefixSum_BL(const unsigned int * const d_in, unsigned int * const d_out, const int nums) { /* Blelloch Scan : Up-Sweep(reduce) + Down-Sweep Up-Sweep: for d := 0 to log2n - 1 do for k from 0 to n – 1 by 2^(d+1) in parallel do x[k + 2^(d + 1) - 1] := x[k + 2^d - 1] + x [k + 2^(d+1) - 1] Down-Sweep: x[n - 1] := 0 for d := log2n down to 0 do for k from 0 to n – 1 by 2^(d+1) in parallel do t := x[k + 2^d- 1] x[k + 2^d - 1] := x [k + 2^(d+1) - 1] x[k + 2^(d+1) - 1] := t + x [k + 2^(d+1) - 1] */ extern __shared__ unsigned int temp[]; int tid = threadIdx.x; // exclusicve scan temp[2*tid] = d_in[2*tid]; if(2*tid+1 < nums) temp[2*tid+1] = d_in[2*tid+1]; else temp[2*tid+1] = 0; // make sure all data in this block are loaded into shared memory __syncthreads(); int stride = 1; // reduce step for(unsigned int d = blockDim.x; d > 0; d >>= 1){ if(tid < d){ int idx1 = (2*tid+1)*stride - 1; int idx2 = (2*tid+2)*stride - 1; temp[idx2] += temp[idx1]; } stride *= 2; // make sure all operations at one stage are done! __syncthreads(); } // Downsweep Step // set identity value if(tid == 0) temp[nums-1] = 0; for(unsigned int d = 1; d < nums; d <<= 1){ stride >>= 1; // make sure all operations at one stage are done! __syncthreads(); if( tid < d){ int idx1 = (2*tid+1)*stride - 1; int idx2 = (2*tid+2)*stride - 1; unsigned int tmp = temp[idx1]; temp[idx1] = temp[idx2]; temp[idx2] += tmp; } } // make sure all operations at the last stage are done! __syncthreads(); d_out[2*tid] = temp[2*tid]; if(2*tid+1 < nums) d_out[2*tid+1] = temp[2*tid+1]; } // Scan algorithm from Course : Hetergeneous Parallel Programming __global__ void prefixSum_HPP(const unsigned int * const d_in, unsigned int * const d_out, const int nums) { extern __shared__ unsigned int temp[]; int tid = threadIdx.x; // exclusicve scan if(tid == 0){ temp[2*tid] = 0; temp[2*tid+1] = d_in[2*tid]; } else{ temp[2*tid] = d_in[2*tid-1]; if(2*tid+1 < nums) temp[2*tid+1] = d_in[2*tid]; else temp[2*tid+1] = 0; } // make sure all data in this block are loaded into shared shared memory __syncthreads(); // Reduction Phase for(unsigned int stride = 1; stride <= blockDim.x; stride <<= 1){ // first update all idx == 2n-1, then 4n-1, then 8n-1 ... // finaly 2(blockDim.x/2) * n - 1(only 1 value will be updated partial[blockDim.x-1]) int idx = (tid+1)*stride*2 - 1; if( idx < 2*blockDim.x) temp[idx] += temp[idx-stride]; // make sure all operations at one stage are done! __syncthreads(); } // Example: // After reduction phase , position at 0, 1, 3, 7, ... has their final values (blockDim.x == 8) // then we update values reversely. // first use position 3's value to update position 5(stride == 2 == blockDim.x/4, idx == 3 == (0+1)*2*2-1, only 1 thread do calculation) // then use position 1 to update postion 2 , position 3 to update position 4, position 5 to update position 6 // (stride == 1 == blockDim.x/8, idx == (0+1)*1*2-1=1,(1+1)*1*2-1=3, (2+1)*1*2-1=5, 3 threads do calculation) // Post Reduction Reverse Phase for(unsigned int stride = blockDim.x/2; stride > 0; stride >>= 1){ // first update all idx == 2(blockDim.x/4) * n - 1 + blockDim.x/4, // then 2(blockDim.x/8)n-1+blockDim.x/8, then 2(blockDim.x/16)n-1 + blockDim.x/16... // finaly 2 * n - 1 int idx = (tid+1)*stride*2 - 1; if( idx + stride < 2*blockDim.x) temp[idx + stride] += temp[idx]; // make sure all operations at one stage are done! __syncthreads(); } // exclusive scan d_out[2*tid] = temp[2*tid]; if(2*tid+1 < nums) d_out[2*tid+1] = temp[2*tid+1]; } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ // Step 1 : find minimum and maximum value reduce(d_logLuminance, min_logLum, max_logLum, numRows, numCols); // Step 2: find the range float logLumRange = max_logLum - min_logLum; // Step 3 : generate a histogram of all the values // declare GPU memory pointers unsigned int *d_bins; // allocate GPU memory checkCudaErrors(cudaMalloc((void **) &d_bins, numBins*sizeof(unsigned int))); checkCudaErrors(cudaMemset(d_bins,0,numBins*sizeof(unsigned int))); hist<<<numRows, numCols>>>(d_logLuminance, d_bins, logLumRange, min_logLum, numBins); // Step 4 : prefix sum //prefixSum_HS<<<1, numBins, numBins*sizeof(unsigned int)>>>(d_bins, d_cdf); //prefixSum_HPP<<<1, ceil(numBins/2), numBins*sizeof(unsigned int)>>>(d_bins, d_cdf, numBins); prefixSum_BL<<<1, ceil(numBins/2), numBins*sizeof(unsigned int)>>>(d_bins, d_cdf, numBins); // free GPU memory allocation checkCudaErrors(cudaFree(d_bins)); }
26ff138152d4bfbc59dfd088e728be997cba21f8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include <math.h> #include <time.h> #include <hiprand/hiprand_kernel.h> #include <sys/time.h> #define TRUE 1 unsigned int EMPTY = UINT_MAX; char str[200]; int i, j, k, e; FILE *fp; int line=1; int nnos, idx_ni, nfol; // numero de nos, indice de nos internos, numero de folhas int hnnos; // tamanho da tabela hash int ennos; // tamanho do vetor com as distancias entre as especies (matriz triangular superior) int pos_ins, n_ins; // posicao de insercoes e numero de insercoes int *nz; // contem indice do no; para os nos a serem inseridos, contem o indice do ponto de insercao // para os nos internos a serem usados na insercao, contem -2 float *nz_br; // distancia do ramo (branch) float *nz_dr; // distancias ate o no raiz float *nz_de; // distancias entre especies int *nz_qf; // altura do no int *nz_qe; // quantidade de especies abaixo do no int *nz_p; // pai do no int *nz_f1; // filho da esquerda do no int *nz_f2; // filho da direita do no float *nz_trait; //caracterstica a ser comparada com cada espcie float *nz_class_range; //Faixa para as classe de distncia float *nz_class_value; //Valores medios de I de Moran por classe de distncia float *nz_class_media; //Valores medios de I de Moran por classe de distncia float *nz_class_variance; //Variancia para cada classe de distncia unsigned int *nz_sig; // assinatura do no - da o caminho em bits ate o raiz unsigned int *nz_hsig; // hash da assinatura do no unsigned int *nz_hval; // indice do no na tabela hash long long GPU_start_time; long long GPU_time; // pointers to GPU memory int *nz_d; float *nz_br_d; float *nz_dr_d; float *nz_de_d; int *nz_qf_d; int *nz_qe_d; int *nz_p_d; int *nz_f1_d; int *nz_f2_d; float *nz_trait_d; float *nz_class_range_d; float *nz_class_value_d; unsigned int *nz_sig_d; unsigned int *nz_hsig_d; unsigned int *nz_hval_d; //int pos_ins_d, idx_ni_d; // char *symb, **nz_sy; char str_tmp[100]; char str_float[30]; int nbint, nbuint, nbhuint, nbfloat, nbefloat; // tamanho em bytes dos tipos basicos hiprandState_t *seed_d; float zero = 0.0; // para facilitar impressao da matriz de distancias // Forward function declarations long long start_timer(); long long stop_timer(long long start_time, char *name); // print tree in newick format char *toNewick(int raiz, int base); // find next prime number greater than n int nextprime( int n ); // kernel __global__ void Load_memory_global_Gpu(int nnos, int *nz, float *nz_br, float *nz_dr, int *nz_qf,int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2); __global__ void Load_memory_shared_Gpu(int nnos, int *nz, float *nz_br, float *nz_dr, int *nz_qf,int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2); __global__ void Insert_tree_Gpu(int nnos, int hnnos, int pos_ins, int idx_ni, int *nz, float *nz_br, int *nz_qf, int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2, hiprandState_t *states, unsigned long seed); __global__ void Matrix_distance_Gpu(int nnos, int hnnos, int *nz, float *nz_br, float *nz_dr, float *nz_de, int *nz_qf,int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2, unsigned int *nz_sig, unsigned int *nz_hsig, unsigned int *nz_hval); __global__ void I_moran_Gpu(int nnos, int nrClass, float *nz_de, float *nz_trait, float *nz_class_range, float *nz_class_value, float MeanY, float Variance); // auxiliary kernel functions __device__ int quadratic_probing_insert(unsigned int *nz_hsig, unsigned int *nz_hval, unsigned int sig, int val, int hnnos); __device__ int quadratic_probing_search(unsigned int *nz_hsig, unsigned int *nz_hval, unsigned int sig, int hnnos); __device__ inline void atomicFloatAdd(float *address, float val); // Main program int main(int argc, char *argv[]) { int qtdArvores = 1; int qtdBlock = 1; int qtdThreadsPerBlock = 1; int qualArvore = 0; int tipoTransferencia = 1; long long *vetorTempo; // printf("\nSyntax: newick <#qtdBlocos #qtdThreads <#numeroArvoreImprimir <#tipoTransferencia >>>"); // printf("\n tipoTransferencia:"); // printf("\n 1: replicao feita na GPU, utilizando a memria GLOBAL como origem da copia"); // printf("\n 2: replicao feita na GPU, utilizando a memria COMPARTILHADA para acelerar transferncia"); // printf("\n 3: replicao feita na CPU, em seguida, todos os dados so copiados para a GPU\n\n"); if (argc >= 2) sscanf(argv[1], "%d", &qtdBlock); if (argc >= 3) sscanf(argv[2], "%d", &qtdThreadsPerBlock); if (argc >= 4) sscanf(argv[3], "%d", &qualArvore); if (argc >= 5) sscanf(argv[4], "%d", &tipoTransferencia); // printf("qtdBlock: %d qtdThreadsPerBlock: %d => qtdArvores: %d) \n", qtdBlock, qtdThreadsPerBlock, qtdBlock*qtdThreadsPerBlock); // printf("qualArvore: %d\n", qualArvore); vetorTempo = (long long *) malloc(5 * sizeof(long long)); GPU_start_time = start_timer(); dim3 grid(qtdBlock), block(qtdThreadsPerBlock); //Total de threads a serem criadas, conforme a quantidade de blocos e threads por bloco qtdArvores = grid.x * block.x; // printf("qtdArvores %d. \n", qtdArvores); // fp = fopen("wellParser.out", "r"); // if (fp == NULL) { // printf("\nCannot open file\n"); // exit(0); // } fscanf(fp,"%d %d", &nnos, &idx_ni); // printf("No nos: %d, Indice no interno: %d\n", nnos, idx_ni); nfol = nnos / 2; fscanf(fp,"%d %d", &pos_ins, &n_ins); // printf("Inserir %d especies a partir de %d\n", n_ins, pos_ins); //printf("Arvore: "); nz = (int *) malloc(nnos * sizeof(int) * qtdArvores); for(i=0; i<nnos; i++) { fscanf(fp,"%d", &nz[i]); //printf("%d ", nz[i]); // } // printf("\n"); // printf("Simbolos: "); symb = (char *) malloc(50); nz_sy = (char **) malloc(nnos * sizeof(char *)); for(i=0; i<nnos; i++) { fscanf(fp,"%s", symb); nz_sy[i] = (char *) malloc(50); strcpy(nz_sy[i], symb); //printf("%s ", nz_sy[i]); } //printf("\n"); nz_dr = (float *) malloc(nnos * sizeof(float) * qtdArvores); ennos = (nfol * (nfol - 1)) / 2; size_t tamanho_ui = (unsigned int) (ennos * sizeof(float) * qtdArvores) ; nz_de = (float *) malloc( tamanho_ui ); //printf("\n\nennos * sizeof(float) * qtdArvores(%d): %u\n\n", qtdArvores, tamanho_ui ); //printf("Ramos: "); nz_br = (float *) malloc(nnos * sizeof(float) * qtdArvores); for(i=0; i<nnos; i++) { fscanf(fp,"%f", &nz_br[i]); // printf("%.2f ", nz_br[i]); } // printf("\n"); // printf("No Filhos: "); nz_qf = (int *) malloc(nnos * sizeof(int) * qtdArvores); for(i=0; i<nnos; i++) { fscanf(fp,"%d", &nz_qf[i]); // printf("%d ", nz_qf[i]); } //printf("\n"); // printf("No Especies: "); nz_qe = (int *) malloc(nnos * sizeof(int) * qtdArvores); for(i=0; i<nnos; i++) { fscanf(fp,"%d", &nz_qe[i]); // printf("%d ", nz_qe[i]); } // printf("\n"); // printf("Pais: "); nz_p = (int *) malloc(nnos * sizeof(int) * qtdArvores); for(i=0; i<nnos; i++) { fscanf(fp,"%d", &nz_p[i]); //printf("%d ", nz_p[i]); } //printf("\n"); //printf("Filhos 1: "); nz_f1 = (int *) malloc(nnos * sizeof(int) * qtdArvores); for(i=0; i<nnos; i++) { fscanf(fp,"%d", &nz_f1[i]); //printf("%d ", nz_f1[i]); } //printf("\n"); //printf("Filhos 2: "); nz_f2 = (int *) malloc(nnos * sizeof(int) * qtdArvores); for(i=0; i<nnos; i++) { fscanf(fp,"%d", &nz_f2[i]); // printf("%d ", nz_f2[i]); } //printf("\n"); //printf("Traits: "); nz_trait = (float *) malloc(nnos * sizeof(float) ); for(i=0; i<nnos; i++) { fscanf(fp,"%f", &nz_trait[i]); //printf("%f ", nz_trait[i]); } //printf("\n"); hnnos = nextprime(2*nnos); //printf("\nPrimo/hnnos = %d, qtdArvores = %d, hnnos*qtdArvores = %d\n", hnnos, qtdArvores, hnnos * qtdArvores); nz_sig = (unsigned int *) malloc(hnnos * sizeof(unsigned int) * qtdArvores); for(i=0; i<(hnnos * qtdArvores); i++) { nz_sig[i] = 0; } nz_hsig = (unsigned int *) malloc(hnnos * sizeof(unsigned int) * qtdArvores); nz_hval = (unsigned int *) malloc(hnnos * sizeof(unsigned int) * qtdArvores); for(i=0; i<(hnnos * qtdArvores); i++) { nz_hsig[i] = (unsigned int) EMPTY; nz_hval[i] = (unsigned int) EMPTY; } fclose(fp); toNewick(nnos-1, 0); //printf(";\n"); // move data to GPU nbint = nnos * sizeof(int); nbuint = nnos * sizeof(unsigned int); nbhuint = hnnos * sizeof(unsigned int); nbfloat = nnos * sizeof(float); nbefloat = ennos * sizeof(float); GPU_time = stop_timer(GPU_start_time, "\t Tempo: Preencher dados nas estruturas da CPU"); GPU_start_time = start_timer(); //hipMalloc((void **)&pos_ins_d, sizeof(int)); //hipMalloc((void **)&idx_ni_d, sizeof(int)); /* :printf("\n ________ \t tipos \t\t tipos*qtdArvores, nnos %d ", nnos); printf("\n nbint \t %d \t\t %d ", nbint, nbint * qtdArvores); printf("\n nbfloat \t %d \t\t %d ", nbfloat, nbfloat * qtdArvores); printf("\n nbefloat \t %d \t\t %d ", nbefloat, nbefloat * qtdArvores); printf("\n nbuint \t %d \t\t %d ", nbuint, nbuint * qtdArvores); printf("\n "); */ hipDeviceReset(); //printf("\ncurandState: %d\n", sizeof(hiprandState_t)); hipMalloc((void **)&nz_d, nbint * qtdArvores); hipMalloc((void **)&nz_br_d, nbfloat * qtdArvores); hipMalloc((void **)&nz_dr_d, nbfloat * qtdArvores); hipMalloc((void **)&nz_qf_d, nbint * qtdArvores); hipMalloc((void **)&nz_qe_d, nbint * qtdArvores); hipMalloc((void **)&nz_p_d, nbint * qtdArvores); hipMalloc((void **)&nz_f1_d, nbint * qtdArvores); hipMalloc((void **)&nz_f2_d, nbint * qtdArvores); hipMalloc((void **)&seed_d, nnos*sizeof(hiprandState_t)*qtdArvores); GPU_time = stop_timer(GPU_start_time, "\t Tempo: Alocar memria na GPU"); if( nz_d==0 ) { printf("couldn't allocate memory nz_d\n"); return 1; } if( nz_br_d==0 ) { printf("couldn't allocate memory nz_br_d\n"); return 1; } if( nz_dr_d==0 ) { printf("couldn't allocate memory nz_dr_d\n"); return 1; } if( nz_qf_d==0 ) { printf("couldn't allocate memory nz_qf_d\n"); return 1; } if( nz_qe_d==0 ) { printf("couldn't allocate memory nz_qe_d\n"); return 1; } if( nz_p_d==0 || nz_f1_d==0 || nz_f2_d==0 ) { printf("couldn't allocate memory 2\n"); return 1; } if(seed_d ==0 ) { printf("couldn't allocate memory seed_d\n"); return 1; } GPU_start_time = start_timer(); if (tipoTransferencia == 1 || tipoTransferencia == 2){ hipMemcpy(nz_d, nz, nbint, hipMemcpyHostToDevice); hipMemcpy(nz_br_d, nz_br, nbfloat, hipMemcpyHostToDevice); hipMemcpy(nz_qf_d, nz_qf, nbint, hipMemcpyHostToDevice); hipMemcpy(nz_qe_d, nz_qe, nbint, hipMemcpyHostToDevice); hipMemcpy(nz_p_d, nz_p, nbint, hipMemcpyHostToDevice); hipMemcpy(nz_f1_d, nz_f1, nbint, hipMemcpyHostToDevice); hipMemcpy(nz_f2_d, nz_f2, nbint, hipMemcpyHostToDevice); /* //hipMemcpy(pos_ins_d, pos_ins, sizeof(int), hipMemcpyHostToDevice); //hipMemcpy(idx_ni_d, idx_ni, sizeof(int), hipMemcpyHostToDevice); */ } GPU_time = stop_timer(GPU_start_time, "\t Tempo para copiar dados (bases) para memria"); vetorTempo[0] = GPU_time; int aux = sizeof(int)*nnos; /* OPES PARA GERAR OS DADOS NA GPU: 1. Copiar os elementos das estruturas e replica-los na gpu, utilizando memria GLOBAL 2. Copiar os elementos das estruturas e replica-los na gpu, utilizando memria COMPARTILHADA 3. Replicar os elementos na CPU e copia-los para A GPU (memria global) */ if (tipoTransferencia == 1) { GPU_start_time = start_timer(); hipLaunchKernelGGL(( Load_memory_global_Gpu), dim3(grid), dim3(block), aux, 0, nnos, nz_d, nz_br_d, nz_dr_d, nz_qf_d, nz_qe_d, nz_p_d, nz_f1_d, nz_f2_d); hipDeviceSynchronize(); GPU_time = stop_timer(GPU_start_time, "\t Tempo para copiar memria GPU (transferencia via memoria global)"); vetorTempo[0] += GPU_time; }else{ if (tipoTransferencia == 2){ GPU_start_time = start_timer(); hipLaunchKernelGGL(( Load_memory_shared_Gpu), dim3(grid), dim3(block), aux, 0, nnos, nz_d, nz_br_d, nz_dr_d, nz_qf_d, nz_qe_d, nz_p_d, nz_f1_d, nz_f2_d); hipDeviceSynchronize(); GPU_time = stop_timer(GPU_start_time, "\t Tempo para copiar memria GPU (transferencia via memoria compartilhada)"); vetorTempo[0] += GPU_time; }else{ GPU_start_time = start_timer(); int base = 0; for(i = 0; i < qtdArvores; i++) { if (i > 0) { for(j = 0; j < nnos; j++) { base = i * nnos; nz[base+j] = nz[j] + (nz[j] >= 0 ? base : 0); nz_br[base+j] = nz_br[j]; nz_dr[base+j] = 0; //nz_de[base+j] = nz_de[j]; nz_qf[base+j] = nz_qf[j]; nz_qe[base+j] = nz_qe[j]; nz_p[base+j] = nz_p[j] + (nz_p[j] >= 0 ? base : 0); nz_f1[base+j] = nz_f1[j] + (nz_f1[j] >= 0 ? base : 0); nz_f2[base+j] = nz_f2[j] + (nz_f2[j] >= 0 ? base : 0); } nz[nfol] = -i; } } hipMemcpy(nz_d, nz, nbint * qtdArvores, hipMemcpyHostToDevice); hipMemcpy(nz_br_d, nz_br, nbfloat * qtdArvores, hipMemcpyHostToDevice); hipMemcpy(nz_dr_d, nz_dr, nbfloat * qtdArvores, hipMemcpyHostToDevice); //hipMemcpy(nz_de_d, nz_de, nbefloat * qtdArvores, hipMemcpyHostToDevice); hipMemcpy(nz_qf_d, nz_qf, nbint * qtdArvores, hipMemcpyHostToDevice); hipMemcpy(nz_qe_d, nz_qe, nbint * qtdArvores, hipMemcpyHostToDevice); hipMemcpy(nz_p_d, nz_p, nbint * qtdArvores, hipMemcpyHostToDevice); hipMemcpy(nz_f1_d, nz_f1, nbint * qtdArvores, hipMemcpyHostToDevice); hipMemcpy(nz_f2_d, nz_f2, nbint * qtdArvores, hipMemcpyHostToDevice); GPU_time = stop_timer(GPU_start_time, "\t Tempo para copiar da CPU->GPU (carregar dados)"); vetorTempo[0] += GPU_time; } } hipDeviceSynchronize(); /************************************************** * * I N S E R I R E S P E C I E S P E R D I D A S * ******************************************************/ // call kernel GPU_start_time = start_timer(); if (n_ins > 0){ //se houver ns a inserir hipLaunchKernelGGL(( Insert_tree_Gpu), dim3(grid), dim3(block), aux, 0, nnos, hnnos, pos_ins, idx_ni, nz_d, nz_br_d, nz_qf_d, nz_qe_d, nz_p_d, nz_f1_d, nz_f2_d, seed_d, time(NULL)); printf("Erro (inserir): %s\n", hipGetErrorString( hipGetLastError() ) ); } hipDeviceSynchronize(); GPU_time = stop_timer(GPU_start_time, "\t Tempo para incluir ns na rvore"); vetorTempo[1] = GPU_time; //alocar memoria para outras vetores hipMalloc((void **)&nz_de_d, nbefloat * qtdArvores); hipMalloc((void **)&nz_sig_d, nbhuint * qtdArvores); hipMalloc((void **)&nz_hsig_d, nbhuint * qtdArvores); hipMalloc((void **)&nz_hval_d, nbhuint * qtdArvores); if( nz_de_d==0 ) { printf("couldn't allocate memory nz_de_d\n"); return 1; } if( nz_sig_d==0) { printf("couldn't allocate memory nz_sig_d\n"); return 1; } if( nz_hsig_d==0 ) { printf("couldn't allocate memory nz_hsig_d\n"); return 1; } if( nz_hval_d==0 ) { printf("couldn't allocate memory nz_hval_d\n"); return 1; } hipMemcpy(nz_sig_d, nz_sig, nbhuint * qtdArvores, hipMemcpyHostToDevice); hipMemcpy(nz_hsig_d, nz_hsig, nbhuint * qtdArvores, hipMemcpyHostToDevice); hipMemcpy(nz_hval_d, nz_hval, nbhuint * qtdArvores, hipMemcpyHostToDevice); /************************************************** * * C A L C U L A R A M A T R I Z D E D I S T A N C I A * ******************************************************/ hipDeviceSynchronize(); GPU_start_time = start_timer(); // int nb = qtdArvores; hipLaunchKernelGGL(( Matrix_distance_Gpu), dim3(qtdArvores), dim3(nfol), 0, 0, nnos, hnnos, nz_d, nz_br_d, nz_dr_d, nz_de_d, nz_qf_d, nz_qe_d, nz_p_d, nz_f1_d, nz_f2_d, nz_sig_d, nz_hsig_d, nz_hval_d); hipDeviceSynchronize(); printf("Erro (matrix distancia): %s\n", hipGetErrorString( hipGetLastError() ) ); GPU_time = stop_timer(GPU_start_time, "\t Tempo total para calcular a matriz de distncia"); vetorTempo[2] = GPU_time; GPU_start_time = start_timer(); // copy data back to the CPU //hipMemcpy(pos_ins, pos_ins_d, sizeof(int), hipMemcpyDeviceToHost); //hipMemcpy(idx_ni, idx_ni_d, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(nz, nz_d, nbint * qtdArvores, hipMemcpyDeviceToHost); hipMemcpy(nz_br, nz_br_d, nbfloat * qtdArvores, hipMemcpyDeviceToHost); hipMemcpy(nz_dr, nz_dr_d, nbfloat * qtdArvores, hipMemcpyDeviceToHost); hipMemcpy(nz_de, nz_de_d, nbefloat * qtdArvores, hipMemcpyDeviceToHost); hipMemcpy(nz_qf, nz_qf_d, nbint * qtdArvores, hipMemcpyDeviceToHost); hipMemcpy(nz_qe, nz_qe_d, nbint * qtdArvores, hipMemcpyDeviceToHost); hipMemcpy(nz_p, nz_p_d, nbint * qtdArvores, hipMemcpyDeviceToHost); hipMemcpy(nz_f1, nz_f1_d, nbint * qtdArvores, hipMemcpyDeviceToHost); hipMemcpy(nz_f2, nz_f2_d, nbint * qtdArvores, hipMemcpyDeviceToHost); hipMemcpy(nz_sig, nz_sig_d, nbhuint * qtdArvores, hipMemcpyDeviceToHost); hipMemcpy(nz_hsig, nz_hsig_d, nbhuint * qtdArvores, hipMemcpyDeviceToHost); hipMemcpy(nz_hval, nz_hval_d, nbhuint * qtdArvores , hipMemcpyDeviceToHost); GPU_time = stop_timer(GPU_start_time, "\t Tempo copiar dados de volta (GPU -> cpu): "); vetorTempo[3] = GPU_time; //Copiar dados da Gpu para cpu /* printf("\n\nImprimir uma arvore: \n"); toNewick(nnos-1, 0); printf(";\n"); */ hipDeviceSynchronize(); //Desalocar memoria da GPU para utilizar no prximo kernel GPU_start_time = start_timer(); hipFree(nz_d); hipFree(nz_br_d); hipFree(nz_dr_d); hipFree(nz_qf_d); hipFree(nz_qe_d); hipFree(nz_p_d); hipFree(nz_f1_d); hipFree(nz_f2_d); hipFree(nz_sig_d); hipFree(nz_hsig_d); hipFree(nz_hval_d); hipFree(seed_d); free(nz_qf); free(nz_qe); free(nz_f1); free(nz_f2); free(nz_p); free(nz_dr); free(nz_sig); free(nz_hsig); free(nz_hval); GPU_time = stop_timer(GPU_start_time, "\t Tempo: Liberar memria GPU"); /************************************************** * * C A L C U L A R I D E M O R A N * ******************************************************/ //Aloca posicoes em memoria para armazenar as classes de distncia int nrClass = 4; float maiorDistancia=0, menorDistancia = nz_de[0], salto; nz_class_range = (float *) malloc((nrClass+1) * sizeof(float)); nz_class_value = (float *) malloc(nrClass * sizeof(float) * qtdArvores); nz_class_media = (float *) malloc(nrClass * sizeof(float) ); nz_class_variance = (float *) malloc(nrClass * sizeof(float) ); //As classes so definidas de forma igual, entre o maior e menor valor for (i=0;i<ennos;i++){ if (maiorDistancia < nz_de[i]) maiorDistancia = nz_de[i]; if (menorDistancia > nz_de[i]) menorDistancia = nz_de[i]; } //nz_class_range[0] = menorDistancia; salto = (maiorDistancia - menorDistancia)/nrClass; // salto a "media" entre maior e menor, representando o salto entre as classes for(i=0;i<nrClass;i++){ nz_class_range[i] = menorDistancia; nz_class_value[i] = 0.0; menorDistancia += salto; } nz_class_range[0] -= nz_class_range[0]/2; //para incluir distancias iguais ao menor valor nz_class_range[i] = maiorDistancia; //realiza uma cpia do vetor de caractersticas (so as mesmas para todas as especies, independente da posio na rvore) hipMalloc((void **)&nz_trait_d, nbfloat); hipMalloc((void **)&nz_class_range_d, sizeof(float) * (nrClass+1)); //+1 para guardar a faixa final da classe hipMalloc((void **)&nz_class_value_d, sizeof(float) * nrClass * qtdArvores); hipMemcpy(nz_trait_d, nz_trait, nbfloat, hipMemcpyHostToDevice); hipMemcpy(nz_class_range_d, nz_class_range, sizeof(float) * (nrClass+1), hipMemcpyHostToDevice); hipMemcpy(nz_class_value_d, nz_class_value, sizeof(float) * nrClass * qtdArvores, hipMemcpyHostToDevice); float Variance, MeanY, SumW; SumW = 0; Variance = 0; for (int d=0;d<nfol;d++){ SumW = SumW + nz_trait[d]; Variance = Variance + pow(nz_trait[d],2); } MeanY = SumW / nfol; Variance = Variance - (pow(SumW, 2) / nfol); hipDeviceSynchronize(); GPU_start_time = start_timer(); aux = sizeof(float)*(nrClass+1); hipLaunchKernelGGL(( I_moran_Gpu), dim3(qtdArvores), dim3(nfol), aux, 0, nnos, nrClass, nz_de_d, nz_trait_d, nz_class_range_d, nz_class_value_d, MeanY, Variance); hipDeviceSynchronize(); printf("Erro (I_moran_Gpu): %s\n", hipGetErrorString( hipGetLastError() ) ); GPU_time = stop_timer(GPU_start_time, "\t Tempo para calcular o Indice de Moran): "); vetorTempo[4] = GPU_time; //Copiar dados da Gpu para cpu //Traz os resultados de volta (GPU para Host), as medias so armazenadas no incio do vetor hipMemcpy(nz_class_value, nz_class_value_d, nrClass * sizeof(float) * qtdArvores, hipMemcpyDeviceToHost); //Calcula a media por classe e a variancia float media; int nrArvore; for(i=0;i<nrClass;i++){ media = 0; for (nrArvore=i;nrArvore<(qtdArvores*nrClass);nrArvore+=nrClass){ media += nz_class_value[nrArvore]; } nz_class_media[i] = media / qtdArvores; } //calculo da variancia for(i=0;i<nrClass;i++){ media = 0; for (nrArvore=i;nrArvore<(qtdArvores*nrClass);nrArvore+=nrClass){ media += pow((nz_class_value[nrArvore] - nz_class_media[i]), 2); } nz_class_variance[i] = media / qtdArvores; } GPU_start_time = start_timer(); /************************************************** * * E X I B I R R E S U L T A D O S * ******************************************************/ /* printf("\nnz_sy, "); for (int jx=0;jx<qtdArvores;jx++) for(i=0;i<nnos;i++){ printf("%s,", nz_sy[i]); } printf("\nnz, "); for(i=0;i<(qtdArvores*nnos);i++){ printf("%d,", nz[i]); } */ // printf("\nnz_br,"); // for(i=0;i<(qtdArvores*nnos);i++){ // printf("%f,", nz_br[i]); // if (i == 10000) //limitar impressao para nao deixar os arquivos muito grandes // break; // } /* printf("\nnz_dr,"); for(i=0;i<(qtdArvores*nnos);i++){ printf("%f,", nz_dr[i]); } printf("\nnz_qf,"); for(i=0;i<(qtdArvores*nnos);i++){ printf("%d,", nz_qf[i]); } printf("\nnz_qe,"); for(i=0;i<(qtdArvores*nnos);i++){ printf("%d,", nz_qe[i]); } printf("\nnz_p,"); for(i=0;i<(qtdArvores*nnos);i++){ printf("%d,", nz_p[i]); // if (i == 10000) // break; } printf("\nnz_f1,"); for(i=0;i<(qtdArvores*nnos);i++){ printf("%d,", nz_f1[i]); if (i == 10000) break; } printf("\nnz_f2,"); for(i=0;i<(qtdArvores*nnos);i++){ printf("%d,", nz_f2[i]); if (i == 10000) break; } */ // printf("\nnz_class,"); // for(i=0;i<(nrClass);i++){ // printf("\t\n [%d] %f => value: %f ; media: %f ; variance: %f ", i, nz_class_range[i], nz_class_value[i], nz_class_media[i], nz_class_variance[i]); // } // printf("\n"); // printf("\n"); /* for(i=1;i<=qtdArvores;i++){ //toNewick((nnos)-1); toNewick((i*nnos)-1, (nnos*(i-1))); printf(";\n"); } printf("Pais: "); for(i=0; i<(nnos*qtdArvores); i++) { printf("%d ", nz_p[i]); } printf("\n"); printf("Dst Raiz: "); for(i=0; i<(nnos*qtdArvores); i++) { if ((i-((i/nnos)*nnos)) == nfol) continue; // desconta o no da posicao nfol printf("%.2f ", nz_dr[i]); // pois este nao e usado } printf("\n"); printf("Assinatura: "); for(i=0; (i<(hnnos * qtdArvores)); i++) { if ((i-((i/nnos)*nnos)) == nfol) continue; // desconta o no da posicao nfol if (i == nfol) continue; printf("%u ", nz_sig[i]); } printf("\n"); printf("Hash Sign: "); for(i=0; (i<(hnnos * qtdArvores)); i++) { if (i == nfol) continue; if ((i-((i/nnos)*nnos)) == nfol) continue; // desconta o no da posicao nfol printf("%u ", nz_hsig[i]); } printf("\n"); printf("Hash Val: "); for(i=0; (i<(hnnos * qtdArvores)); i++) { if ((i-((i/nnos)*nnos)) == nfol) continue; // desconta o no da posicao nfol if (i == nfol) continue; printf("%u ", nz_hval[i]); } printf("\n"); // */ // e = 0; // indexa a matriz triangular superior (representada num array) que contem a distancia // // entre as especies // printf("Distancias: \n"); // printf("%7s ", nz_sy[0]); // for(i=1; i<nfol; i++) // printf("%4s ", nz_sy[i]); // printf("\n"); // for(i=0; i<nfol; i++) { // printf("%3s ", nz_sy[i]); // // if (i >= (nfol-3)){ // for(j=0; j<=i; j++) // printf("%.2f ", zero); // for(k=i+1; k<nfol; k++) { // printf("%.2f ", nz_de[e+(qualArvore*ennos)]); // e++; // } // // } // printf("\n"); // } /* printf("\n\nnz_de: "); for(k=0; k<(ennos*qtdArvores); k++) { printf("%.2f ", nz_de[k]); } */ GPU_time = stop_timer(GPU_start_time, "\t Tempo mostrar dados em tela"); GPU_start_time = start_timer(); printf("\n"); free(nz); free(nz_br); free(nz_de); free(symb); free(nz_sy); GPU_time = stop_timer(GPU_start_time, "\t Tempo: Liberar memoria da CPU"); GPU_start_time = start_timer(); hipFree(nz_de_d); GPU_time = stop_timer(GPU_start_time, "\t Tempo: Liberar memria GPU (matriz de distancia)"); printf("\n\n=================== R e s u m o d o s T e m p o s ======================="); printf("\nCPU -> GPU\tIncluir Esp. \tMatriz dist.\tGPU-> CPU\tI de Moran (em sec)"); printf("\n%.5f \t", ((float) vetorTempo[0]) / (1000 * 1000)); printf("%.5f \t", ((float) vetorTempo[1]) / (1000 * 1000)); printf("%.5f \t", ((float) vetorTempo[2]) / (1000 * 1000)); printf("%.5f \t", ((float) vetorTempo[3]) / (1000 * 1000)); printf("%.5f \t\n", ((float) vetorTempo[4]) / (1000 * 1000)); free(vetorTempo); return 0; } // Returns the current time in microseconds long long start_timer() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000000 + tv.tv_usec; } // Prints the time elapsed since the specified time long long stop_timer(long long start_time, char *name) { struct timeval tv; gettimeofday(&tv, NULL); long long end_time = tv.tv_sec * 1000000 + tv.tv_usec; //printf("%s: %.5f sec\n", name, ((float) (end_time - start_time)) / (1000 * 1000)); return end_time - start_time; } char *toNewick(int idRaiz, int base) { strcpy(str_tmp,""); strcpy(str_float,""); if (nz_f1[idRaiz] < 0) { // No tem filhos if ((idRaiz-base) < 0 || (idRaiz-base) > (nnos-1)) //printf("ERRO %d\n", (idRaiz-base)); else strcat (str_tmp, nz_sy[idRaiz-base]); strcat (str_tmp, ":"); //sprintf(str_float,"%0.2f", nz_br[idRaiz]); strcat (str_tmp, str_float); return str_tmp; } else { // Tem filhos // printf("("); // printf("%s", toNewick(nz_f1[idRaiz], base)); // printf(","); // printf("%s", toNewick(nz_f2[idRaiz], base)); // printf(")"); // printf("%s", nz_sy[idRaiz-base]); // printf(":"); // sprintf(str_float,"%0.2f", nz_br[idRaiz]); // printf("%s", str_float); return ""; } } int nextprime( int n ) { int Divisor, PossiblePrime; int FoundPrime; PossiblePrime = n; if( PossiblePrime <= 2 ) PossiblePrime = 2; else if( PossiblePrime != 3 ) { if( PossiblePrime % 2 == 0 ) PossiblePrime++; /* Need An Odd Number */ for( ; ; PossiblePrime += 2 ) { FoundPrime = !TRUE; for( Divisor = 3; PossiblePrime % Divisor; Divisor += 2 ) if( Divisor * Divisor > PossiblePrime ) { FoundPrime = TRUE; break; } if( FoundPrime ) break; } } return PossiblePrime; } __device__ int quadratic_probing_insert(unsigned int *nz_hsig, unsigned int *nz_hval, unsigned int sig, int val, int hnnos) { unsigned int j, hk, old; int ib = blockIdx.x; // identificador do bloco j = 0; hk = sig % hnnos; while(j < hnnos) { old = atomicCAS(&nz_hsig[hk+ib*hnnos], UINT_MAX, sig); // se posicao estiver vazia (UINT_MAX = EMPTY) if (old == UINT_MAX) { nz_hval[hk+ib*hnnos] = val; return (hk+ib*hnnos); } j++; hk = (hk + j * j) % hnnos; // hk = (hk + j) % hnnos; } return (-1); } __device__ int quadratic_probing_search(unsigned int *nz_hsig, unsigned int *nz_hval, unsigned int sig, int hnnos) { unsigned int j, hk; int ib = blockIdx.x; // identificador do bloco j = 0; hk = sig % hnnos; while(j < hnnos) { if (nz_hsig[hk+ib*hnnos] == sig) { return (nz_hval[hk+ib*hnnos]); } j++; hk = (hk + j * j) % hnnos; // hk = (hk + j) % hnnos; } return (-1); } // estas duas funcoes sao usada para mapear os indices de um array para uma matriz triangular // superior correspondente (sem a diagonal). para uma matriz nxn, o array ter n(n-1)/2 elementos __host__ __device__ int row_index( int i, int M ){ // retorna o indice da linha M--; float m = M; float row = (-2*m - 1 + sqrt( (4*m*(m+1) - 8*(float)i - 7) )) / -2; if( row == (float)(int) row ) row -= 1; return (int) row; } __host__ __device__ int column_index( int i, int M ){ // retorna o indice da coluna int row = row_index( i, M); M--; return 1 + (i - M * row + row*(row+1) / 2); } __global__ void Load_memory_global_Gpu(int nnos, int *nz, float *nz_br, float *nz_dr, int *nz_qf,int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2) { int i = threadIdx.x; // identificador da thread int index, new_index; int base = (blockIdx.x * blockDim.x * nnos) + nnos * i; //Todas as threads copiam os dados para suas respectivas reas for(index = 0; index < nnos; index++){ new_index = base+index; nz[new_index] = nz[index] + (nz[index] >= 0 ? base : 0); nz_br[new_index] = nz_br[index]; nz_dr[new_index] = 0; nz_qf[new_index] = nz_qf[index]; nz_qe[new_index] = nz_qe[index]; nz_p[new_index] = nz_p[index] + (nz_p[index] >= 0 ? base : 0); nz_f1[new_index] = nz_f1[index] + (nz_f1[index] >= 0 ? base : 0); nz_f2[new_index] = nz_f2[index] + (nz_f2[index] >= 0 ? base : 0); } } __global__ void Load_memory_shared_Gpu(int nnos, int *nz, float *nz_br, float *nz_dr, int *nz_qf, int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2) { extern __shared__ float nzTemp[]; int i = threadIdx.x; // identificador da thread int index; int base = (blockIdx.x * blockDim.x * nnos) + nnos * i; //Copiar dados do vetor NZ if (threadIdx.x == 0) for(index = 0; index < nnos; index++) nzTemp[index] = nz[index]; __syncthreads(); for(index = 0; index < nnos; index++) nz[base+index] = (int) (nzTemp[index] + (nzTemp[index] >= 0 ? base : 0)); __syncthreads(); //Copiar dados do vetor BR if (threadIdx.x == 0) for(index = 0; index < nnos; index++) nzTemp[index] = nz_br[index]; __syncthreads(); for(index = 0; index < nnos; index++) nz_br[base+index] = nzTemp[index]; __syncthreads(); //Copiar dados do vetor QF if (threadIdx.x == 0) for(index = 0; index < nnos; index++) nzTemp[index] = nz_qf[index]; __syncthreads(); for(index = 0; index < nnos; index++){ nz_dr[base+index] = 0; nz_qf[base+index] = nzTemp[index]; } __syncthreads(); //Copiar dados do vetor QE if (threadIdx.x == 0) for(index = 0; index < nnos; index++) nzTemp[index] = nz_qe[index]; __syncthreads(); for(index = 0; index < nnos; index++) nz_qe[base+index] = nzTemp[index]; __syncthreads(); //Copiar dados do vetor P if (threadIdx.x == 0) for(index = 0; index < nnos; index++) nzTemp[index] = nz_p[index]; __syncthreads(); for(index = 0; index < nnos; index++) nz_p[base+index] = (int) (nzTemp[index] + (nzTemp[index] >= 0 ? base : 0)); __syncthreads(); //Copiar dados do vetor F1 if (threadIdx.x == 0) for(index = 0; index < nnos; index++) nzTemp[index] = nz_f1[index]; __syncthreads(); for(index = 0; index < nnos; index++) nz_f1[base+index] = (int) (nzTemp[index] + (nzTemp[index] >= 0 ? base : 0)); __syncthreads(); //Copiar dados do vetor F2 if (threadIdx.x == 0) for(index = 0; index < nnos; index++) nzTemp[index] = nz_f2[index]; __syncthreads(); for(index = 0; index < nnos; index++) nz_f2[base+index] = (int) (nzTemp[index] + (nzTemp[index] >= 0 ? base : 0)); // __syncthreads(); } __global__ void Insert_tree_Gpu(int nnos, int hnnos, int pos_ins, int idx_ni, int *nz, float *nz_br, int *nz_qf,int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2, hiprandState_t *states, unsigned long seed) { int i = threadIdx.x; // identificador da thread float x; // valor gerado aleatoriamente unsigned int valor2; // numero entre 1 e maximo inteiro sem sinal unsigned int valor1; // numero entre 1 e altura da sub-arvore unsigned int shift = 8*sizeof(unsigned int)-1; // bits estao na faixa 0-31, e nao em 1-32 unsigned int mask=1<<shift; // recebe 1 deslocado 31 vezes p/ direita // (10000000 00000000 00000000 00000000) __shared__ int nfol; // numero de folhas da arvore int indMdcc; // no a partir do qual sera inserido uma especie int indNewNode; // aponta para o no internto a ser inserido junto com a especie a ser inserida // idx_ni e o indice inicial dos nos internos a serem inseridos. Este indice // cresce da direita para a esquerda. Veja que pos_ins aponta para a primeira // especie a ser inserida. Serao inseridas nfol-pos_ins+1 especies. int indSisterSpecies; int index; int indNewSpecies; int base = (blockIdx.x * blockDim.x * nnos) + nnos * i; index = 0; indSisterSpecies = 0; nfol = nnos / 2; // folhas estao na metade inferior hiprand_init(seed+i, base, 0, &states[base]); // Initialize CURAND for(indNewSpecies=(base+pos_ins);indNewSpecies < (base+nfol);indNewSpecies++){ hiprand(&states[base]); x = hiprand_uniform (&states[base]); // gera numero aleatorio indNewNode = base + (idx_ni - ((indNewSpecies-base) - pos_ins)); // recebe um no interno a ser usado na insercao das especies indMdcc = nz[indNewSpecies]; // a posicao species [pos_ins <= species < nfol] contem o indice do no interno que // sera usado para inserir a especie, i.e., ponto inicial de insercao (MDCC-most derived consensus clade) valor1 = (int) (1 + x*nz_qf[indMdcc]); // numero entre 1 e altura da sub-arvore valor2 = (unsigned int) (1 + x*UINT_MAX); // numero entre 1 e maximo inteiro sem sinal // a insercao e feita a partir do ponto de insercao mas seguindo os bits de valor2 // se o bit for 1 avanca para a esquerda (f1) e se for 0 avanca para a direita (f2) if (indMdcc <= (base+nfol)) //Se o ponto de insero for uma folha, ento sobe um nvel indMdcc = nz_p[indMdcc]; else while (valor1 > 0) { // faca enquando nao alcancar a altura do no em questao ou um no // folha seja alcancado. if(valor2 & mask) if (nz_f1[indMdcc] <= (base+nfol)) break; else indMdcc = nz_f1[indMdcc]; // avanca para proximo filho else if (nz_f2[indMdcc] <= (base+nfol)) break; else indMdcc = nz_f2[indMdcc]; // avanca para proximo filho valor2 <<= 1; // avanca para proximo bit valor1--; // diminui altura da arvore } // // convencao: f1 aa esquerda e f2 aa direita // x = hiprand_uniform (&states[base]); // gera numero aleatorio - reuso de x if(valor2 & mask) { // insere no aa direita (f2) do no folha (especie) atual (f1) indSisterSpecies = nz_f1[indMdcc]; //n a partir do qual o calculo do brach para a nova especie ser realizado nz_f1[indNewNode] = nz_f1[indMdcc]; nz_f2[indNewNode] = indNewSpecies; nz_p[nz_f1[indNewNode]] = indNewNode; nz_f1[indMdcc] = indNewNode; nz_br[indNewNode] = x * nz_br[nz_f1[indNewNode]]; nz_br[nz_f1[indNewNode]] -= nz_br[indNewNode]; nz_qf[indNewNode] = nz_qf[nz_f1[indNewNode]]++; nz_qe[indNewNode] = nz_qe[nz_f1[indNewNode]]++; } else { // insere no aa esquerda (f1) do no folha (especie) atual (f2) indSisterSpecies = nz_f2[indMdcc]; //n a partir do qual o calculo do brach para a nova especie ser realizado nz_f1[indNewNode] = indNewSpecies; nz_f2[indNewNode] = nz_f2[indMdcc]; nz_p[nz_f2[indNewNode]] = indNewNode; nz_f2[indMdcc] = indNewNode; //Dividir o branch do n "quebrado", de forma proporcional para o novo n PAI (indNewNode) nz_br[indNewNode] = x * nz_br[nz_f2[indNewNode]]; nz_br[nz_f2[indNewNode]] -= nz_br[indNewNode]; nz_qf[indNewNode] = nz_qf[nz_f2[indNewNode]]++; //Atualizar informacoes de quantidade de especies nz_qe[indNewNode] = nz_qe[nz_f2[indNewNode]]++; } //atualiza vetor de pais nz_p[indNewSpecies] = indNewNode; nz_p[indNewNode] = indMdcc; nz_qe[indNewSpecies] = 1; //atualizar a qtde de especies e qtd de filhos index = nz_p[indNewNode]; x = nnos/2; while( index > -1 || x <= 0 ){ nz_qe[index] += 1; // if (nz_f1[index] == -2 || nz_f2[index] == -2) break; if ( (nz_f1[index] >= (base+nfol) && nz_qf[nz_f1[index]] >= nz_qf[index]) || (nz_f2[index] >= (base+nfol) && nz_qf[nz_f2[index]] >= nz_qf[index])) nz_qf[index] += 1; index = nz_p[index]; x--; } //Calcular distancia para o n inserido x = hiprand_uniform (&states[base]); // gera numero aleatorio - reuso de x if (indSisterSpecies < (base+nfol)) //se irma eh folha, ento branch deve possuir tamanho igual a irma nz_br[indNewSpecies] = nz_br[indSisterSpecies]; else { valor2 = (unsigned int) (1 + x*UINT_MAX); // numero entre 1 e maximo inteiro sem sinal nz_br[indNewSpecies] = 0.0; index = indSisterSpecies; while (true){ nz_br[indNewSpecies] += nz_br[index]; if (valor2 & mask){ if (nz_f1[index] == -2) break; index = nz_f1[index]; }else{ if (nz_f2[index] == -2) break; index = nz_f2[index]; } } } } } __global__ void Matrix_distance_Gpu(int nnos, int hnnos, int *nz, float *nz_br, float *nz_dr, float *nz_de, int *nz_qf,int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2, unsigned int *nz_sig, unsigned int *nz_hsig, unsigned int *nz_hval) { float y; // acumula soma das arestas __shared__ int nfol; // numero de folhas da arvore int j; // indice para thread ativa int a, b; // usados no calculo da faixa de elementos (da matriz triangular) a serem considerados unsigned int sig1, sig2, sig3, sig4; // assinaturas de tres nos - da o caminho em bits ate o raiz int bit; // contem bit sendo analizado int ancc; // indice do ancestral comum int nthreads; // numero de threads ativas int r, c; // linha e coluna da matriz triangular superior int bits; // conta quantos bits sao iguais int i = threadIdx.x; // identificador da thread int ib = blockIdx.x; // identificador do bloco int it; // indice de acesso global das threads int ennos; // tamanho da matriz de distancias nfol = nnos / 2; // folhas estao na metade inferior ennos = (nfol * (nfol - 1)) / 2; it = i + ib*nnos; if (i < nfol) { // nos folhos calculam distancia ate a raiz e armazena o caminho (assinatura // em bits) at a raiz y = 0; j = it; // associa threads com nos folhas nz_sig[it] = 1; while (j != -1) { y = y + nz_br[j]; // acumula a distancia if (nz_p[j] == -1) break; nz_sig[it] <<= 1; // acumula o caminho if (nz_f1[nz_p[j]] == j) // acrescenta 0 se vier da direita (f2) nz_sig[it]++; // ou 1 se vier da esquerda (f1) j = nz_p[j]; } quadratic_probing_insert(nz_hsig, nz_hval, nz_sig[it], it, hnnos); nz_dr[it] = y; } __syncthreads(); // espera todas as threads chegarem at aqui if (i < (nfol-1)) { // nos internos calculam distancia ate a raiz e armazena o caminho // (assinatura em bits) at o raiz y = 0; j = it+nfol+1; // associa threads com os nos internos nz_sig[j] = 1; if (nz_p[j] == -1) j = -1; while (j != -1) { y = y + nz_br[j]; // acumula a distancia if (nz_p[j] == -1) break; nz_sig[it+nfol+1] <<= 1; // acumula o caminho if (nz_f1[nz_p[j]] == j) // acrescenta 0 se vier da direita (f2) nz_sig[it+nfol+1]++; // ou 1 se vier da esquerda (f1) j = nz_p[j]; } quadratic_probing_insert(nz_hsig, nz_hval, nz_sig[it+nfol+1], (it+nfol+1), hnnos); nz_dr[it+nfol+1] = y; } __syncthreads(); // espera todas as threads chegarem at aqui // se nfol (numero de especies) for impar, usamos nfol threads // se nfol (numero de especies) for par, usamos nfol-1 threads // isso evita termos que tratar de elementos restantes if ( (nfol % 2) == 0) { nthreads = nfol - 1; // nfol par: cada thread calcula nfol/2 distancias a = nfol / 2; // quantidade de elementos por thread } else { nthreads = nfol; // nfol mpar: cada thread calcula (nfol-1)/2 distancias a = (nfol - 1) / 2; // quantidade de elementos por thread } if (i < nthreads) { for( b = i*a; b < a+(i*a); b++) { r = row_index(b, nfol); c = column_index(b, nfol); sig1 = nz_sig[r+ib*nnos]; sig2 = nz_sig[c+ib*nnos]; sig3 = 1; // inicia com 1 para diferenciar das demais assinaturas, i.e., 10, 100 etc bits = 0; // conta quantos bits sao iguais sig4 = 1; // recebe assinatura invertida while ( (sig1 & 1) == (sig2 & 1) && bits < 32) { // compara bit menos significativo bit = (sig1 & 1); bits++; sig1 >>= 1; // avanca para proximo bit sig2 >>= 1; // avanca para proximo bit sig3 <<= 1; // armazena bits coincidentes - caminho do ancestral comum if (bit) sig3++; // soma 1 ou 0 } while (bits>0) { // inverte a assinatura coincidente incluindo um 1 mais a esquerda sig4 <<= 1; if (sig3 & 1) sig4++; sig3 >>= 1; bits--; } ancc = quadratic_probing_search(nz_hsig, nz_hval, sig4, hnnos); nz_de[b+ib*ennos] = nz_dr[r+ib*nnos] + nz_dr[c+ib*nnos] - 2*nz_dr[ancc]; } } } /* Calcular o I de Moran para cada classe. Sao diversas arvores, cada uma tera o I de Moran para cada classe (nz_class), em seguida faz-se a media e calcula a variancia entre elas. Return: I de Moran por classe e a variancia para cada classe. */ __global__ void I_moran_Gpu(int nnos, int nrClass, float *nz_de, float *nz_trait, float *nz_class_range, float *nz_class_value, float MeanY, float Variance){ int d, r, c, a, b; int nfol, nthreads; float SumProdCross, SumW, w; short int p; int i = threadIdx.x; // identificador da thread int ib = blockIdx.x; // identificador do bloco int ennos; int base; __shared__ float sumTotal, sumTotalProdCross; extern __shared__ float nzClass[]; for(d=0;d<nrClass;d++){ nzClass[d] = nz_class_range[d]; } nfol = nnos/2; ennos = (nfol * (nfol - 1)) / 2; base = ib * ennos; SumW = 0; if ( (nfol % 2) == 0) { nthreads = nfol - 1; // nfol par: cada thread calcula nfol/2 distancias a = nfol / 2; // quantidade de elementos por thread } else { nthreads = nfol; // nfol mpar: cada thread calcula (nfol-1)/2 distancias a = (nfol - 1) / 2; // quantidade de elementos por thread } w = 1; p = 2; //Symetric //Inicializa variaveis compartilhadas sumTotalProdCross = 0; sumTotal = 0; __syncthreads();//aguarda inicializacao das variaveis para continuar execuo if (i < nthreads) { for(d=0;d<nrClass;d++){ SumProdCross = 0; SumW = 0; for( b = i*a; b < a+(i*a); b++) { if (nz_de[b+base] > nzClass[d] && nz_de[b+base] <= nzClass[d+1]){ r = row_index(b, nfol); c = column_index(b, nfol); SumW += (w*p); SumProdCross += (((nz_trait[r] - MeanY) * (nz_trait[c] - MeanY))*p); } } //Utilizar operacao atomica atomicFloatAdd(&sumTotalProdCross, SumProdCross); atomicFloatAdd(&sumTotal, SumW); __syncthreads(); // espera todas as threads chegarem at aqui //apenas uma thread calcula o I de Moran if (threadIdx.x == 0){ nz_class_value[(ib*nrClass)+d] = (nfol / sumTotal) * (sumTotalProdCross / Variance); // I de Moran sumTotalProdCross = 0; sumTotal = 0; //printf("Teste %.2f\n", nz_class_value[(ib*nrClass)+d] ); } __syncthreads(); // espera todas as threads chegarem at aqui } } } __device__ inline void atomicFloatAdd(float *address, float val) { int tmp0 = *address; int i_val = __float_as_int(val + __int_as_float(tmp0)); int tmp1; // compare and swap v = (old == tmp0) ? i_val : old; // returns old while( (tmp1 = atomicCAS((int *)address, tmp0, i_val)) != tmp0 ) { tmp0 = tmp1; i_val = __float_as_int(val + __int_as_float(tmp1)); } }
26ff138152d4bfbc59dfd088e728be997cba21f8.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <math.h> #include <time.h> #include <curand_kernel.h> #include <sys/time.h> #define TRUE 1 unsigned int EMPTY = UINT_MAX; char str[200]; int i, j, k, e; FILE *fp; int line=1; int nnos, idx_ni, nfol; // numero de nos, indice de nos internos, numero de folhas int hnnos; // tamanho da tabela hash int ennos; // tamanho do vetor com as distancias entre as especies (matriz triangular superior) int pos_ins, n_ins; // posicao de insercoes e numero de insercoes int *nz; // contem indice do no; para os nos a serem inseridos, contem o indice do ponto de insercao // para os nos internos a serem usados na insercao, contem -2 float *nz_br; // distancia do ramo (branch) float *nz_dr; // distancias ate o no raiz float *nz_de; // distancias entre especies int *nz_qf; // altura do no int *nz_qe; // quantidade de especies abaixo do no int *nz_p; // pai do no int *nz_f1; // filho da esquerda do no int *nz_f2; // filho da direita do no float *nz_trait; //característica a ser comparada com cada espécie float *nz_class_range; //Faixa para as classe de distância float *nz_class_value; //Valores medios de I de Moran por classe de distância float *nz_class_media; //Valores medios de I de Moran por classe de distância float *nz_class_variance; //Variancia para cada classe de distância unsigned int *nz_sig; // assinatura do no - da o caminho em bits ate o raiz unsigned int *nz_hsig; // hash da assinatura do no unsigned int *nz_hval; // indice do no na tabela hash long long GPU_start_time; long long GPU_time; // pointers to GPU memory int *nz_d; float *nz_br_d; float *nz_dr_d; float *nz_de_d; int *nz_qf_d; int *nz_qe_d; int *nz_p_d; int *nz_f1_d; int *nz_f2_d; float *nz_trait_d; float *nz_class_range_d; float *nz_class_value_d; unsigned int *nz_sig_d; unsigned int *nz_hsig_d; unsigned int *nz_hval_d; //int pos_ins_d, idx_ni_d; // char *symb, **nz_sy; char str_tmp[100]; char str_float[30]; int nbint, nbuint, nbhuint, nbfloat, nbefloat; // tamanho em bytes dos tipos basicos curandState *seed_d; float zero = 0.0; // para facilitar impressao da matriz de distancias // Forward function declarations long long start_timer(); long long stop_timer(long long start_time, char *name); // print tree in newick format char *toNewick(int raiz, int base); // find next prime number greater than n int nextprime( int n ); // kernel __global__ void Load_memory_global_Gpu(int nnos, int *nz, float *nz_br, float *nz_dr, int *nz_qf,int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2); __global__ void Load_memory_shared_Gpu(int nnos, int *nz, float *nz_br, float *nz_dr, int *nz_qf,int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2); __global__ void Insert_tree_Gpu(int nnos, int hnnos, int pos_ins, int idx_ni, int *nz, float *nz_br, int *nz_qf, int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2, curandState *states, unsigned long seed); __global__ void Matrix_distance_Gpu(int nnos, int hnnos, int *nz, float *nz_br, float *nz_dr, float *nz_de, int *nz_qf,int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2, unsigned int *nz_sig, unsigned int *nz_hsig, unsigned int *nz_hval); __global__ void I_moran_Gpu(int nnos, int nrClass, float *nz_de, float *nz_trait, float *nz_class_range, float *nz_class_value, float MeanY, float Variance); // auxiliary kernel functions __device__ int quadratic_probing_insert(unsigned int *nz_hsig, unsigned int *nz_hval, unsigned int sig, int val, int hnnos); __device__ int quadratic_probing_search(unsigned int *nz_hsig, unsigned int *nz_hval, unsigned int sig, int hnnos); __device__ inline void atomicFloatAdd(float *address, float val); // Main program int main(int argc, char *argv[]) { int qtdArvores = 1; int qtdBlock = 1; int qtdThreadsPerBlock = 1; int qualArvore = 0; int tipoTransferencia = 1; long long *vetorTempo; // printf("\nSyntax: newick <#qtdBlocos #qtdThreads <#numeroArvoreImprimir <#tipoTransferencia >>>"); // printf("\n tipoTransferencia:"); // printf("\n 1: replicação feita na GPU, utilizando a memória GLOBAL como origem da copia"); // printf("\n 2: replicação feita na GPU, utilizando a memória COMPARTILHADA para acelerar transferência"); // printf("\n 3: replicação feita na CPU, em seguida, todos os dados são copiados para a GPU\n\n"); if (argc >= 2) sscanf(argv[1], "%d", &qtdBlock); if (argc >= 3) sscanf(argv[2], "%d", &qtdThreadsPerBlock); if (argc >= 4) sscanf(argv[3], "%d", &qualArvore); if (argc >= 5) sscanf(argv[4], "%d", &tipoTransferencia); // printf("qtdBlock: %d qtdThreadsPerBlock: %d => qtdArvores: %d) \n", qtdBlock, qtdThreadsPerBlock, qtdBlock*qtdThreadsPerBlock); // printf("qualArvore: %d\n", qualArvore); vetorTempo = (long long *) malloc(5 * sizeof(long long)); GPU_start_time = start_timer(); dim3 grid(qtdBlock), block(qtdThreadsPerBlock); //Total de threads a serem criadas, conforme a quantidade de blocos e threads por bloco qtdArvores = grid.x * block.x; // printf("qtdArvores %d. \n", qtdArvores); // fp = fopen("wellParser.out", "r"); // if (fp == NULL) { // printf("\nCannot open file\n"); // exit(0); // } fscanf(fp,"%d %d", &nnos, &idx_ni); // printf("No nos: %d, Indice no interno: %d\n", nnos, idx_ni); nfol = nnos / 2; fscanf(fp,"%d %d", &pos_ins, &n_ins); // printf("Inserir %d especies a partir de %d\n", n_ins, pos_ins); //printf("Arvore: "); nz = (int *) malloc(nnos * sizeof(int) * qtdArvores); for(i=0; i<nnos; i++) { fscanf(fp,"%d", &nz[i]); //printf("%d ", nz[i]); // } // printf("\n"); // printf("Simbolos: "); symb = (char *) malloc(50); nz_sy = (char **) malloc(nnos * sizeof(char *)); for(i=0; i<nnos; i++) { fscanf(fp,"%s", symb); nz_sy[i] = (char *) malloc(50); strcpy(nz_sy[i], symb); //printf("%s ", nz_sy[i]); } //printf("\n"); nz_dr = (float *) malloc(nnos * sizeof(float) * qtdArvores); ennos = (nfol * (nfol - 1)) / 2; size_t tamanho_ui = (unsigned int) (ennos * sizeof(float) * qtdArvores) ; nz_de = (float *) malloc( tamanho_ui ); //printf("\n\nennos * sizeof(float) * qtdArvores(%d): %u\n\n", qtdArvores, tamanho_ui ); //printf("Ramos: "); nz_br = (float *) malloc(nnos * sizeof(float) * qtdArvores); for(i=0; i<nnos; i++) { fscanf(fp,"%f", &nz_br[i]); // printf("%.2f ", nz_br[i]); } // printf("\n"); // printf("No Filhos: "); nz_qf = (int *) malloc(nnos * sizeof(int) * qtdArvores); for(i=0; i<nnos; i++) { fscanf(fp,"%d", &nz_qf[i]); // printf("%d ", nz_qf[i]); } //printf("\n"); // printf("No Especies: "); nz_qe = (int *) malloc(nnos * sizeof(int) * qtdArvores); for(i=0; i<nnos; i++) { fscanf(fp,"%d", &nz_qe[i]); // printf("%d ", nz_qe[i]); } // printf("\n"); // printf("Pais: "); nz_p = (int *) malloc(nnos * sizeof(int) * qtdArvores); for(i=0; i<nnos; i++) { fscanf(fp,"%d", &nz_p[i]); //printf("%d ", nz_p[i]); } //printf("\n"); //printf("Filhos 1: "); nz_f1 = (int *) malloc(nnos * sizeof(int) * qtdArvores); for(i=0; i<nnos; i++) { fscanf(fp,"%d", &nz_f1[i]); //printf("%d ", nz_f1[i]); } //printf("\n"); //printf("Filhos 2: "); nz_f2 = (int *) malloc(nnos * sizeof(int) * qtdArvores); for(i=0; i<nnos; i++) { fscanf(fp,"%d", &nz_f2[i]); // printf("%d ", nz_f2[i]); } //printf("\n"); //printf("Traits: "); nz_trait = (float *) malloc(nnos * sizeof(float) ); for(i=0; i<nnos; i++) { fscanf(fp,"%f", &nz_trait[i]); //printf("%f ", nz_trait[i]); } //printf("\n"); hnnos = nextprime(2*nnos); //printf("\nPrimo/hnnos = %d, qtdArvores = %d, hnnos*qtdArvores = %d\n", hnnos, qtdArvores, hnnos * qtdArvores); nz_sig = (unsigned int *) malloc(hnnos * sizeof(unsigned int) * qtdArvores); for(i=0; i<(hnnos * qtdArvores); i++) { nz_sig[i] = 0; } nz_hsig = (unsigned int *) malloc(hnnos * sizeof(unsigned int) * qtdArvores); nz_hval = (unsigned int *) malloc(hnnos * sizeof(unsigned int) * qtdArvores); for(i=0; i<(hnnos * qtdArvores); i++) { nz_hsig[i] = (unsigned int) EMPTY; nz_hval[i] = (unsigned int) EMPTY; } fclose(fp); toNewick(nnos-1, 0); //printf(";\n"); // move data to GPU nbint = nnos * sizeof(int); nbuint = nnos * sizeof(unsigned int); nbhuint = hnnos * sizeof(unsigned int); nbfloat = nnos * sizeof(float); nbefloat = ennos * sizeof(float); GPU_time = stop_timer(GPU_start_time, "\t Tempo: Preencher dados nas estruturas da CPU"); GPU_start_time = start_timer(); //cudaMalloc((void **)&pos_ins_d, sizeof(int)); //cudaMalloc((void **)&idx_ni_d, sizeof(int)); /* :printf("\n ________ \t tipos \t\t tipos*qtdArvores, nnos %d ", nnos); printf("\n nbint \t %d \t\t %d ", nbint, nbint * qtdArvores); printf("\n nbfloat \t %d \t\t %d ", nbfloat, nbfloat * qtdArvores); printf("\n nbefloat \t %d \t\t %d ", nbefloat, nbefloat * qtdArvores); printf("\n nbuint \t %d \t\t %d ", nbuint, nbuint * qtdArvores); printf("\n "); */ cudaDeviceReset(); //printf("\ncurandState: %d\n", sizeof(curandState)); cudaMalloc((void **)&nz_d, nbint * qtdArvores); cudaMalloc((void **)&nz_br_d, nbfloat * qtdArvores); cudaMalloc((void **)&nz_dr_d, nbfloat * qtdArvores); cudaMalloc((void **)&nz_qf_d, nbint * qtdArvores); cudaMalloc((void **)&nz_qe_d, nbint * qtdArvores); cudaMalloc((void **)&nz_p_d, nbint * qtdArvores); cudaMalloc((void **)&nz_f1_d, nbint * qtdArvores); cudaMalloc((void **)&nz_f2_d, nbint * qtdArvores); cudaMalloc((void **)&seed_d, nnos*sizeof(curandState)*qtdArvores); GPU_time = stop_timer(GPU_start_time, "\t Tempo: Alocar memória na GPU"); if( nz_d==0 ) { printf("couldn't allocate memory nz_d\n"); return 1; } if( nz_br_d==0 ) { printf("couldn't allocate memory nz_br_d\n"); return 1; } if( nz_dr_d==0 ) { printf("couldn't allocate memory nz_dr_d\n"); return 1; } if( nz_qf_d==0 ) { printf("couldn't allocate memory nz_qf_d\n"); return 1; } if( nz_qe_d==0 ) { printf("couldn't allocate memory nz_qe_d\n"); return 1; } if( nz_p_d==0 || nz_f1_d==0 || nz_f2_d==0 ) { printf("couldn't allocate memory 2\n"); return 1; } if(seed_d ==0 ) { printf("couldn't allocate memory seed_d\n"); return 1; } GPU_start_time = start_timer(); if (tipoTransferencia == 1 || tipoTransferencia == 2){ cudaMemcpy(nz_d, nz, nbint, cudaMemcpyHostToDevice); cudaMemcpy(nz_br_d, nz_br, nbfloat, cudaMemcpyHostToDevice); cudaMemcpy(nz_qf_d, nz_qf, nbint, cudaMemcpyHostToDevice); cudaMemcpy(nz_qe_d, nz_qe, nbint, cudaMemcpyHostToDevice); cudaMemcpy(nz_p_d, nz_p, nbint, cudaMemcpyHostToDevice); cudaMemcpy(nz_f1_d, nz_f1, nbint, cudaMemcpyHostToDevice); cudaMemcpy(nz_f2_d, nz_f2, nbint, cudaMemcpyHostToDevice); /* //cudaMemcpy(pos_ins_d, pos_ins, sizeof(int), cudaMemcpyHostToDevice); //cudaMemcpy(idx_ni_d, idx_ni, sizeof(int), cudaMemcpyHostToDevice); */ } GPU_time = stop_timer(GPU_start_time, "\t Tempo para copiar dados (bases) para memória"); vetorTempo[0] = GPU_time; int aux = sizeof(int)*nnos; /* OPÇÕES PARA GERAR OS DADOS NA GPU: 1. Copiar os elementos das estruturas e replica-los na gpu, utilizando memória GLOBAL 2. Copiar os elementos das estruturas e replica-los na gpu, utilizando memória COMPARTILHADA 3. Replicar os elementos na CPU e copia-los para A GPU (memória global) */ if (tipoTransferencia == 1) { GPU_start_time = start_timer(); Load_memory_global_Gpu<<<grid, block, aux>>>(nnos, nz_d, nz_br_d, nz_dr_d, nz_qf_d, nz_qe_d, nz_p_d, nz_f1_d, nz_f2_d); cudaDeviceSynchronize(); GPU_time = stop_timer(GPU_start_time, "\t Tempo para copiar memória GPU (transferencia via memoria global)"); vetorTempo[0] += GPU_time; }else{ if (tipoTransferencia == 2){ GPU_start_time = start_timer(); Load_memory_shared_Gpu<<<grid, block, aux>>>(nnos, nz_d, nz_br_d, nz_dr_d, nz_qf_d, nz_qe_d, nz_p_d, nz_f1_d, nz_f2_d); cudaDeviceSynchronize(); GPU_time = stop_timer(GPU_start_time, "\t Tempo para copiar memória GPU (transferencia via memoria compartilhada)"); vetorTempo[0] += GPU_time; }else{ GPU_start_time = start_timer(); int base = 0; for(i = 0; i < qtdArvores; i++) { if (i > 0) { for(j = 0; j < nnos; j++) { base = i * nnos; nz[base+j] = nz[j] + (nz[j] >= 0 ? base : 0); nz_br[base+j] = nz_br[j]; nz_dr[base+j] = 0; //nz_de[base+j] = nz_de[j]; nz_qf[base+j] = nz_qf[j]; nz_qe[base+j] = nz_qe[j]; nz_p[base+j] = nz_p[j] + (nz_p[j] >= 0 ? base : 0); nz_f1[base+j] = nz_f1[j] + (nz_f1[j] >= 0 ? base : 0); nz_f2[base+j] = nz_f2[j] + (nz_f2[j] >= 0 ? base : 0); } nz[nfol] = -i; } } cudaMemcpy(nz_d, nz, nbint * qtdArvores, cudaMemcpyHostToDevice); cudaMemcpy(nz_br_d, nz_br, nbfloat * qtdArvores, cudaMemcpyHostToDevice); cudaMemcpy(nz_dr_d, nz_dr, nbfloat * qtdArvores, cudaMemcpyHostToDevice); //cudaMemcpy(nz_de_d, nz_de, nbefloat * qtdArvores, cudaMemcpyHostToDevice); cudaMemcpy(nz_qf_d, nz_qf, nbint * qtdArvores, cudaMemcpyHostToDevice); cudaMemcpy(nz_qe_d, nz_qe, nbint * qtdArvores, cudaMemcpyHostToDevice); cudaMemcpy(nz_p_d, nz_p, nbint * qtdArvores, cudaMemcpyHostToDevice); cudaMemcpy(nz_f1_d, nz_f1, nbint * qtdArvores, cudaMemcpyHostToDevice); cudaMemcpy(nz_f2_d, nz_f2, nbint * qtdArvores, cudaMemcpyHostToDevice); GPU_time = stop_timer(GPU_start_time, "\t Tempo para copiar da CPU->GPU (carregar dados)"); vetorTempo[0] += GPU_time; } } cudaDeviceSynchronize(); /************************************************** * * I N S E R I R E S P E C I E S P E R D I D A S * ******************************************************/ // call kernel GPU_start_time = start_timer(); if (n_ins > 0){ //se houver nós a inserir Insert_tree_Gpu<<<grid, block, aux>>>(nnos, hnnos, pos_ins, idx_ni, nz_d, nz_br_d, nz_qf_d, nz_qe_d, nz_p_d, nz_f1_d, nz_f2_d, seed_d, time(NULL)); printf("Erro (inserir): %s\n", cudaGetErrorString( cudaGetLastError() ) ); } cudaDeviceSynchronize(); GPU_time = stop_timer(GPU_start_time, "\t Tempo para incluir nós na árvore"); vetorTempo[1] = GPU_time; //alocar memoria para outras vetores cudaMalloc((void **)&nz_de_d, nbefloat * qtdArvores); cudaMalloc((void **)&nz_sig_d, nbhuint * qtdArvores); cudaMalloc((void **)&nz_hsig_d, nbhuint * qtdArvores); cudaMalloc((void **)&nz_hval_d, nbhuint * qtdArvores); if( nz_de_d==0 ) { printf("couldn't allocate memory nz_de_d\n"); return 1; } if( nz_sig_d==0) { printf("couldn't allocate memory nz_sig_d\n"); return 1; } if( nz_hsig_d==0 ) { printf("couldn't allocate memory nz_hsig_d\n"); return 1; } if( nz_hval_d==0 ) { printf("couldn't allocate memory nz_hval_d\n"); return 1; } cudaMemcpy(nz_sig_d, nz_sig, nbhuint * qtdArvores, cudaMemcpyHostToDevice); cudaMemcpy(nz_hsig_d, nz_hsig, nbhuint * qtdArvores, cudaMemcpyHostToDevice); cudaMemcpy(nz_hval_d, nz_hval, nbhuint * qtdArvores, cudaMemcpyHostToDevice); /************************************************** * * C A L C U L A R A M A T R I Z D E D I S T A N C I A * ******************************************************/ cudaDeviceSynchronize(); GPU_start_time = start_timer(); // int nb = qtdArvores; Matrix_distance_Gpu<<<qtdArvores, nfol>>>(nnos, hnnos, nz_d, nz_br_d, nz_dr_d, nz_de_d, nz_qf_d, nz_qe_d, nz_p_d, nz_f1_d, nz_f2_d, nz_sig_d, nz_hsig_d, nz_hval_d); cudaDeviceSynchronize(); printf("Erro (matrix distancia): %s\n", cudaGetErrorString( cudaGetLastError() ) ); GPU_time = stop_timer(GPU_start_time, "\t Tempo total para calcular a matriz de distância"); vetorTempo[2] = GPU_time; GPU_start_time = start_timer(); // copy data back to the CPU //cudaMemcpy(pos_ins, pos_ins_d, sizeof(int), cudaMemcpyDeviceToHost); //cudaMemcpy(idx_ni, idx_ni_d, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(nz, nz_d, nbint * qtdArvores, cudaMemcpyDeviceToHost); cudaMemcpy(nz_br, nz_br_d, nbfloat * qtdArvores, cudaMemcpyDeviceToHost); cudaMemcpy(nz_dr, nz_dr_d, nbfloat * qtdArvores, cudaMemcpyDeviceToHost); cudaMemcpy(nz_de, nz_de_d, nbefloat * qtdArvores, cudaMemcpyDeviceToHost); cudaMemcpy(nz_qf, nz_qf_d, nbint * qtdArvores, cudaMemcpyDeviceToHost); cudaMemcpy(nz_qe, nz_qe_d, nbint * qtdArvores, cudaMemcpyDeviceToHost); cudaMemcpy(nz_p, nz_p_d, nbint * qtdArvores, cudaMemcpyDeviceToHost); cudaMemcpy(nz_f1, nz_f1_d, nbint * qtdArvores, cudaMemcpyDeviceToHost); cudaMemcpy(nz_f2, nz_f2_d, nbint * qtdArvores, cudaMemcpyDeviceToHost); cudaMemcpy(nz_sig, nz_sig_d, nbhuint * qtdArvores, cudaMemcpyDeviceToHost); cudaMemcpy(nz_hsig, nz_hsig_d, nbhuint * qtdArvores, cudaMemcpyDeviceToHost); cudaMemcpy(nz_hval, nz_hval_d, nbhuint * qtdArvores , cudaMemcpyDeviceToHost); GPU_time = stop_timer(GPU_start_time, "\t Tempo copiar dados de volta (GPU -> cpu): "); vetorTempo[3] = GPU_time; //Copiar dados da Gpu para cpu /* printf("\n\nImprimir uma arvore: \n"); toNewick(nnos-1, 0); printf(";\n"); */ cudaDeviceSynchronize(); //Desalocar memoria da GPU para utilizar no próximo kernel GPU_start_time = start_timer(); cudaFree(nz_d); cudaFree(nz_br_d); cudaFree(nz_dr_d); cudaFree(nz_qf_d); cudaFree(nz_qe_d); cudaFree(nz_p_d); cudaFree(nz_f1_d); cudaFree(nz_f2_d); cudaFree(nz_sig_d); cudaFree(nz_hsig_d); cudaFree(nz_hval_d); cudaFree(seed_d); free(nz_qf); free(nz_qe); free(nz_f1); free(nz_f2); free(nz_p); free(nz_dr); free(nz_sig); free(nz_hsig); free(nz_hval); GPU_time = stop_timer(GPU_start_time, "\t Tempo: Liberar memória GPU"); /************************************************** * * C A L C U L A R I D E M O R A N * ******************************************************/ //Aloca posicoes em memoria para armazenar as classes de distância int nrClass = 4; float maiorDistancia=0, menorDistancia = nz_de[0], salto; nz_class_range = (float *) malloc((nrClass+1) * sizeof(float)); nz_class_value = (float *) malloc(nrClass * sizeof(float) * qtdArvores); nz_class_media = (float *) malloc(nrClass * sizeof(float) ); nz_class_variance = (float *) malloc(nrClass * sizeof(float) ); //As classes são definidas de forma igual, entre o maior e menor valor for (i=0;i<ennos;i++){ if (maiorDistancia < nz_de[i]) maiorDistancia = nz_de[i]; if (menorDistancia > nz_de[i]) menorDistancia = nz_de[i]; } //nz_class_range[0] = menorDistancia; salto = (maiorDistancia - menorDistancia)/nrClass; // salto é a "media" entre maior e menor, representando o salto entre as classes for(i=0;i<nrClass;i++){ nz_class_range[i] = menorDistancia; nz_class_value[i] = 0.0; menorDistancia += salto; } nz_class_range[0] -= nz_class_range[0]/2; //para incluir distancias iguais ao menor valor nz_class_range[i] = maiorDistancia; //realiza uma cópia do vetor de características (são as mesmas para todas as especies, independente da posição na árvore) cudaMalloc((void **)&nz_trait_d, nbfloat); cudaMalloc((void **)&nz_class_range_d, sizeof(float) * (nrClass+1)); //+1 para guardar a faixa final da classe cudaMalloc((void **)&nz_class_value_d, sizeof(float) * nrClass * qtdArvores); cudaMemcpy(nz_trait_d, nz_trait, nbfloat, cudaMemcpyHostToDevice); cudaMemcpy(nz_class_range_d, nz_class_range, sizeof(float) * (nrClass+1), cudaMemcpyHostToDevice); cudaMemcpy(nz_class_value_d, nz_class_value, sizeof(float) * nrClass * qtdArvores, cudaMemcpyHostToDevice); float Variance, MeanY, SumW; SumW = 0; Variance = 0; for (int d=0;d<nfol;d++){ SumW = SumW + nz_trait[d]; Variance = Variance + pow(nz_trait[d],2); } MeanY = SumW / nfol; Variance = Variance - (pow(SumW, 2) / nfol); cudaDeviceSynchronize(); GPU_start_time = start_timer(); aux = sizeof(float)*(nrClass+1); I_moran_Gpu<<<qtdArvores, nfol, aux>>>(nnos, nrClass, nz_de_d, nz_trait_d, nz_class_range_d, nz_class_value_d, MeanY, Variance); cudaDeviceSynchronize(); printf("Erro (I_moran_Gpu): %s\n", cudaGetErrorString( cudaGetLastError() ) ); GPU_time = stop_timer(GPU_start_time, "\t Tempo para calcular o Indice de Moran): "); vetorTempo[4] = GPU_time; //Copiar dados da Gpu para cpu //Traz os resultados de volta (GPU para Host), as medias são armazenadas no início do vetor cudaMemcpy(nz_class_value, nz_class_value_d, nrClass * sizeof(float) * qtdArvores, cudaMemcpyDeviceToHost); //Calcula a media por classe e a variancia float media; int nrArvore; for(i=0;i<nrClass;i++){ media = 0; for (nrArvore=i;nrArvore<(qtdArvores*nrClass);nrArvore+=nrClass){ media += nz_class_value[nrArvore]; } nz_class_media[i] = media / qtdArvores; } //calculo da variancia for(i=0;i<nrClass;i++){ media = 0; for (nrArvore=i;nrArvore<(qtdArvores*nrClass);nrArvore+=nrClass){ media += pow((nz_class_value[nrArvore] - nz_class_media[i]), 2); } nz_class_variance[i] = media / qtdArvores; } GPU_start_time = start_timer(); /************************************************** * * E X I B I R R E S U L T A D O S * ******************************************************/ /* printf("\nnz_sy, "); for (int jx=0;jx<qtdArvores;jx++) for(i=0;i<nnos;i++){ printf("%s,", nz_sy[i]); } printf("\nnz, "); for(i=0;i<(qtdArvores*nnos);i++){ printf("%d,", nz[i]); } */ // printf("\nnz_br,"); // for(i=0;i<(qtdArvores*nnos);i++){ // printf("%f,", nz_br[i]); // if (i == 10000) //limitar impressao para nao deixar os arquivos muito grandes // break; // } /* printf("\nnz_dr,"); for(i=0;i<(qtdArvores*nnos);i++){ printf("%f,", nz_dr[i]); } printf("\nnz_qf,"); for(i=0;i<(qtdArvores*nnos);i++){ printf("%d,", nz_qf[i]); } printf("\nnz_qe,"); for(i=0;i<(qtdArvores*nnos);i++){ printf("%d,", nz_qe[i]); } printf("\nnz_p,"); for(i=0;i<(qtdArvores*nnos);i++){ printf("%d,", nz_p[i]); // if (i == 10000) // break; } printf("\nnz_f1,"); for(i=0;i<(qtdArvores*nnos);i++){ printf("%d,", nz_f1[i]); if (i == 10000) break; } printf("\nnz_f2,"); for(i=0;i<(qtdArvores*nnos);i++){ printf("%d,", nz_f2[i]); if (i == 10000) break; } */ // printf("\nnz_class,"); // for(i=0;i<(nrClass);i++){ // printf("\t\n [%d] %f => value: %f ; media: %f ; variance: %f ", i, nz_class_range[i], nz_class_value[i], nz_class_media[i], nz_class_variance[i]); // } // printf("\n"); // printf("\n"); /* for(i=1;i<=qtdArvores;i++){ //toNewick((nnos)-1); toNewick((i*nnos)-1, (nnos*(i-1))); printf(";\n"); } printf("Pais: "); for(i=0; i<(nnos*qtdArvores); i++) { printf("%d ", nz_p[i]); } printf("\n"); printf("Dst Raiz: "); for(i=0; i<(nnos*qtdArvores); i++) { if ((i-((i/nnos)*nnos)) == nfol) continue; // desconta o no da posicao nfol printf("%.2f ", nz_dr[i]); // pois este nao e usado } printf("\n"); printf("Assinatura: "); for(i=0; (i<(hnnos * qtdArvores)); i++) { if ((i-((i/nnos)*nnos)) == nfol) continue; // desconta o no da posicao nfol if (i == nfol) continue; printf("%u ", nz_sig[i]); } printf("\n"); printf("Hash Sign: "); for(i=0; (i<(hnnos * qtdArvores)); i++) { if (i == nfol) continue; if ((i-((i/nnos)*nnos)) == nfol) continue; // desconta o no da posicao nfol printf("%u ", nz_hsig[i]); } printf("\n"); printf("Hash Val: "); for(i=0; (i<(hnnos * qtdArvores)); i++) { if ((i-((i/nnos)*nnos)) == nfol) continue; // desconta o no da posicao nfol if (i == nfol) continue; printf("%u ", nz_hval[i]); } printf("\n"); // */ // e = 0; // indexa a matriz triangular superior (representada num array) que contem a distancia // // entre as especies // printf("Distancias: \n"); // printf("%7s ", nz_sy[0]); // for(i=1; i<nfol; i++) // printf("%4s ", nz_sy[i]); // printf("\n"); // for(i=0; i<nfol; i++) { // printf("%3s ", nz_sy[i]); // // if (i >= (nfol-3)){ // for(j=0; j<=i; j++) // printf("%.2f ", zero); // for(k=i+1; k<nfol; k++) { // printf("%.2f ", nz_de[e+(qualArvore*ennos)]); // e++; // } // // } // printf("\n"); // } /* printf("\n\nnz_de: "); for(k=0; k<(ennos*qtdArvores); k++) { printf("%.2f ", nz_de[k]); } */ GPU_time = stop_timer(GPU_start_time, "\t Tempo mostrar dados em tela"); GPU_start_time = start_timer(); printf("\n"); free(nz); free(nz_br); free(nz_de); free(symb); free(nz_sy); GPU_time = stop_timer(GPU_start_time, "\t Tempo: Liberar memoria da CPU"); GPU_start_time = start_timer(); cudaFree(nz_de_d); GPU_time = stop_timer(GPU_start_time, "\t Tempo: Liberar memória GPU (matriz de distancia)"); printf("\n\n=================== R e s u m o d o s T e m p o s ======================="); printf("\nCPU -> GPU\tIncluir Esp. \tMatriz dist.\tGPU-> CPU\tI de Moran (em sec)"); printf("\n%.5f \t", ((float) vetorTempo[0]) / (1000 * 1000)); printf("%.5f \t", ((float) vetorTempo[1]) / (1000 * 1000)); printf("%.5f \t", ((float) vetorTempo[2]) / (1000 * 1000)); printf("%.5f \t", ((float) vetorTempo[3]) / (1000 * 1000)); printf("%.5f \t\n", ((float) vetorTempo[4]) / (1000 * 1000)); free(vetorTempo); return 0; } // Returns the current time in microseconds long long start_timer() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000000 + tv.tv_usec; } // Prints the time elapsed since the specified time long long stop_timer(long long start_time, char *name) { struct timeval tv; gettimeofday(&tv, NULL); long long end_time = tv.tv_sec * 1000000 + tv.tv_usec; //printf("%s: %.5f sec\n", name, ((float) (end_time - start_time)) / (1000 * 1000)); return end_time - start_time; } char *toNewick(int idRaiz, int base) { strcpy(str_tmp,""); strcpy(str_float,""); if (nz_f1[idRaiz] < 0) { // Não tem filhos if ((idRaiz-base) < 0 || (idRaiz-base) > (nnos-1)) //printf("ERRO %d\n", (idRaiz-base)); else strcat (str_tmp, nz_sy[idRaiz-base]); strcat (str_tmp, ":"); //sprintf(str_float,"%0.2f", nz_br[idRaiz]); strcat (str_tmp, str_float); return str_tmp; } else { // Tem filhos // printf("("); // printf("%s", toNewick(nz_f1[idRaiz], base)); // printf(","); // printf("%s", toNewick(nz_f2[idRaiz], base)); // printf(")"); // printf("%s", nz_sy[idRaiz-base]); // printf(":"); // sprintf(str_float,"%0.2f", nz_br[idRaiz]); // printf("%s", str_float); return ""; } } int nextprime( int n ) { int Divisor, PossiblePrime; int FoundPrime; PossiblePrime = n; if( PossiblePrime <= 2 ) PossiblePrime = 2; else if( PossiblePrime != 3 ) { if( PossiblePrime % 2 == 0 ) PossiblePrime++; /* Need An Odd Number */ for( ; ; PossiblePrime += 2 ) { FoundPrime = !TRUE; for( Divisor = 3; PossiblePrime % Divisor; Divisor += 2 ) if( Divisor * Divisor > PossiblePrime ) { FoundPrime = TRUE; break; } if( FoundPrime ) break; } } return PossiblePrime; } __device__ int quadratic_probing_insert(unsigned int *nz_hsig, unsigned int *nz_hval, unsigned int sig, int val, int hnnos) { unsigned int j, hk, old; int ib = blockIdx.x; // identificador do bloco j = 0; hk = sig % hnnos; while(j < hnnos) { old = atomicCAS(&nz_hsig[hk+ib*hnnos], UINT_MAX, sig); // se posicao estiver vazia (UINT_MAX = EMPTY) if (old == UINT_MAX) { nz_hval[hk+ib*hnnos] = val; return (hk+ib*hnnos); } j++; hk = (hk + j * j) % hnnos; // hk = (hk + j) % hnnos; } return (-1); } __device__ int quadratic_probing_search(unsigned int *nz_hsig, unsigned int *nz_hval, unsigned int sig, int hnnos) { unsigned int j, hk; int ib = blockIdx.x; // identificador do bloco j = 0; hk = sig % hnnos; while(j < hnnos) { if (nz_hsig[hk+ib*hnnos] == sig) { return (nz_hval[hk+ib*hnnos]); } j++; hk = (hk + j * j) % hnnos; // hk = (hk + j) % hnnos; } return (-1); } // estas duas funcoes sao usada para mapear os indices de um array para uma matriz triangular // superior correspondente (sem a diagonal). para uma matriz nxn, o array terá n(n-1)/2 elementos __host__ __device__ int row_index( int i, int M ){ // retorna o indice da linha M--; float m = M; float row = (-2*m - 1 + sqrt( (4*m*(m+1) - 8*(float)i - 7) )) / -2; if( row == (float)(int) row ) row -= 1; return (int) row; } __host__ __device__ int column_index( int i, int M ){ // retorna o indice da coluna int row = row_index( i, M); M--; return 1 + (i - M * row + row*(row+1) / 2); } __global__ void Load_memory_global_Gpu(int nnos, int *nz, float *nz_br, float *nz_dr, int *nz_qf,int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2) { int i = threadIdx.x; // identificador da thread int index, new_index; int base = (blockIdx.x * blockDim.x * nnos) + nnos * i; //Todas as threads copiam os dados para suas respectivas áreas for(index = 0; index < nnos; index++){ new_index = base+index; nz[new_index] = nz[index] + (nz[index] >= 0 ? base : 0); nz_br[new_index] = nz_br[index]; nz_dr[new_index] = 0; nz_qf[new_index] = nz_qf[index]; nz_qe[new_index] = nz_qe[index]; nz_p[new_index] = nz_p[index] + (nz_p[index] >= 0 ? base : 0); nz_f1[new_index] = nz_f1[index] + (nz_f1[index] >= 0 ? base : 0); nz_f2[new_index] = nz_f2[index] + (nz_f2[index] >= 0 ? base : 0); } } __global__ void Load_memory_shared_Gpu(int nnos, int *nz, float *nz_br, float *nz_dr, int *nz_qf, int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2) { extern __shared__ float nzTemp[]; int i = threadIdx.x; // identificador da thread int index; int base = (blockIdx.x * blockDim.x * nnos) + nnos * i; //Copiar dados do vetor NZ if (threadIdx.x == 0) for(index = 0; index < nnos; index++) nzTemp[index] = nz[index]; __syncthreads(); for(index = 0; index < nnos; index++) nz[base+index] = (int) (nzTemp[index] + (nzTemp[index] >= 0 ? base : 0)); __syncthreads(); //Copiar dados do vetor BR if (threadIdx.x == 0) for(index = 0; index < nnos; index++) nzTemp[index] = nz_br[index]; __syncthreads(); for(index = 0; index < nnos; index++) nz_br[base+index] = nzTemp[index]; __syncthreads(); //Copiar dados do vetor QF if (threadIdx.x == 0) for(index = 0; index < nnos; index++) nzTemp[index] = nz_qf[index]; __syncthreads(); for(index = 0; index < nnos; index++){ nz_dr[base+index] = 0; nz_qf[base+index] = nzTemp[index]; } __syncthreads(); //Copiar dados do vetor QE if (threadIdx.x == 0) for(index = 0; index < nnos; index++) nzTemp[index] = nz_qe[index]; __syncthreads(); for(index = 0; index < nnos; index++) nz_qe[base+index] = nzTemp[index]; __syncthreads(); //Copiar dados do vetor P if (threadIdx.x == 0) for(index = 0; index < nnos; index++) nzTemp[index] = nz_p[index]; __syncthreads(); for(index = 0; index < nnos; index++) nz_p[base+index] = (int) (nzTemp[index] + (nzTemp[index] >= 0 ? base : 0)); __syncthreads(); //Copiar dados do vetor F1 if (threadIdx.x == 0) for(index = 0; index < nnos; index++) nzTemp[index] = nz_f1[index]; __syncthreads(); for(index = 0; index < nnos; index++) nz_f1[base+index] = (int) (nzTemp[index] + (nzTemp[index] >= 0 ? base : 0)); __syncthreads(); //Copiar dados do vetor F2 if (threadIdx.x == 0) for(index = 0; index < nnos; index++) nzTemp[index] = nz_f2[index]; __syncthreads(); for(index = 0; index < nnos; index++) nz_f2[base+index] = (int) (nzTemp[index] + (nzTemp[index] >= 0 ? base : 0)); // __syncthreads(); } __global__ void Insert_tree_Gpu(int nnos, int hnnos, int pos_ins, int idx_ni, int *nz, float *nz_br, int *nz_qf,int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2, curandState *states, unsigned long seed) { int i = threadIdx.x; // identificador da thread float x; // valor gerado aleatoriamente unsigned int valor2; // numero entre 1 e maximo inteiro sem sinal unsigned int valor1; // numero entre 1 e altura da sub-arvore unsigned int shift = 8*sizeof(unsigned int)-1; // bits estao na faixa 0-31, e nao em 1-32 unsigned int mask=1<<shift; // recebe 1 deslocado 31 vezes p/ direita // (10000000 00000000 00000000 00000000) __shared__ int nfol; // numero de folhas da arvore int indMdcc; // no a partir do qual sera inserido uma especie int indNewNode; // aponta para o no internto a ser inserido junto com a especie a ser inserida // idx_ni e o indice inicial dos nos internos a serem inseridos. Este indice // cresce da direita para a esquerda. Veja que pos_ins aponta para a primeira // especie a ser inserida. Serao inseridas nfol-pos_ins+1 especies. int indSisterSpecies; int index; int indNewSpecies; int base = (blockIdx.x * blockDim.x * nnos) + nnos * i; index = 0; indSisterSpecies = 0; nfol = nnos / 2; // folhas estao na metade inferior curand_init(seed+i, base, 0, &states[base]); // Initialize CURAND for(indNewSpecies=(base+pos_ins);indNewSpecies < (base+nfol);indNewSpecies++){ curand(&states[base]); x = curand_uniform (&states[base]); // gera numero aleatorio indNewNode = base + (idx_ni - ((indNewSpecies-base) - pos_ins)); // recebe um no interno a ser usado na insercao das especies indMdcc = nz[indNewSpecies]; // a posicao species [pos_ins <= species < nfol] contem o indice do no interno que // sera usado para inserir a especie, i.e., ponto inicial de insercao (MDCC-most derived consensus clade) valor1 = (int) (1 + x*nz_qf[indMdcc]); // numero entre 1 e altura da sub-arvore valor2 = (unsigned int) (1 + x*UINT_MAX); // numero entre 1 e maximo inteiro sem sinal // a insercao e feita a partir do ponto de insercao mas seguindo os bits de valor2 // se o bit for 1 avanca para a esquerda (f1) e se for 0 avanca para a direita (f2) if (indMdcc <= (base+nfol)) //Se o ponto de inserção for uma folha, então sobe um nível indMdcc = nz_p[indMdcc]; else while (valor1 > 0) { // faca enquando nao alcancar a altura do no em questao ou um no // folha seja alcancado. if(valor2 & mask) if (nz_f1[indMdcc] <= (base+nfol)) break; else indMdcc = nz_f1[indMdcc]; // avanca para proximo filho else if (nz_f2[indMdcc] <= (base+nfol)) break; else indMdcc = nz_f2[indMdcc]; // avanca para proximo filho valor2 <<= 1; // avanca para proximo bit valor1--; // diminui altura da arvore } // // convencao: f1 aa esquerda e f2 aa direita // x = curand_uniform (&states[base]); // gera numero aleatorio - reuso de x if(valor2 & mask) { // insere no aa direita (f2) do no folha (especie) atual (f1) indSisterSpecies = nz_f1[indMdcc]; //nó a partir do qual o calculo do brach para a nova especie será realizado nz_f1[indNewNode] = nz_f1[indMdcc]; nz_f2[indNewNode] = indNewSpecies; nz_p[nz_f1[indNewNode]] = indNewNode; nz_f1[indMdcc] = indNewNode; nz_br[indNewNode] = x * nz_br[nz_f1[indNewNode]]; nz_br[nz_f1[indNewNode]] -= nz_br[indNewNode]; nz_qf[indNewNode] = nz_qf[nz_f1[indNewNode]]++; nz_qe[indNewNode] = nz_qe[nz_f1[indNewNode]]++; } else { // insere no aa esquerda (f1) do no folha (especie) atual (f2) indSisterSpecies = nz_f2[indMdcc]; //nó a partir do qual o calculo do brach para a nova especie será realizado nz_f1[indNewNode] = indNewSpecies; nz_f2[indNewNode] = nz_f2[indMdcc]; nz_p[nz_f2[indNewNode]] = indNewNode; nz_f2[indMdcc] = indNewNode; //Dividir o branch do nó "quebrado", de forma proporcional para o novo nó PAI (indNewNode) nz_br[indNewNode] = x * nz_br[nz_f2[indNewNode]]; nz_br[nz_f2[indNewNode]] -= nz_br[indNewNode]; nz_qf[indNewNode] = nz_qf[nz_f2[indNewNode]]++; //Atualizar informacoes de quantidade de especies nz_qe[indNewNode] = nz_qe[nz_f2[indNewNode]]++; } //atualiza vetor de pais nz_p[indNewSpecies] = indNewNode; nz_p[indNewNode] = indMdcc; nz_qe[indNewSpecies] = 1; //atualizar a qtde de especies e qtd de filhos index = nz_p[indNewNode]; x = nnos/2; while( index > -1 || x <= 0 ){ nz_qe[index] += 1; // if (nz_f1[index] == -2 || nz_f2[index] == -2) break; if ( (nz_f1[index] >= (base+nfol) && nz_qf[nz_f1[index]] >= nz_qf[index]) || (nz_f2[index] >= (base+nfol) && nz_qf[nz_f2[index]] >= nz_qf[index])) nz_qf[index] += 1; index = nz_p[index]; x--; } //Calcular distancia para o nó inserido x = curand_uniform (&states[base]); // gera numero aleatorio - reuso de x if (indSisterSpecies < (base+nfol)) //se irma eh folha, então branch deve possuir tamanho igual a irma nz_br[indNewSpecies] = nz_br[indSisterSpecies]; else { valor2 = (unsigned int) (1 + x*UINT_MAX); // numero entre 1 e maximo inteiro sem sinal nz_br[indNewSpecies] = 0.0; index = indSisterSpecies; while (true){ nz_br[indNewSpecies] += nz_br[index]; if (valor2 & mask){ if (nz_f1[index] == -2) break; index = nz_f1[index]; }else{ if (nz_f2[index] == -2) break; index = nz_f2[index]; } } } } } __global__ void Matrix_distance_Gpu(int nnos, int hnnos, int *nz, float *nz_br, float *nz_dr, float *nz_de, int *nz_qf,int *nz_qe, int *nz_p, int *nz_f1, int *nz_f2, unsigned int *nz_sig, unsigned int *nz_hsig, unsigned int *nz_hval) { float y; // acumula soma das arestas __shared__ int nfol; // numero de folhas da arvore int j; // indice para thread ativa int a, b; // usados no calculo da faixa de elementos (da matriz triangular) a serem considerados unsigned int sig1, sig2, sig3, sig4; // assinaturas de tres nos - da o caminho em bits ate o raiz int bit; // contem bit sendo analizado int ancc; // indice do ancestral comum int nthreads; // numero de threads ativas int r, c; // linha e coluna da matriz triangular superior int bits; // conta quantos bits sao iguais int i = threadIdx.x; // identificador da thread int ib = blockIdx.x; // identificador do bloco int it; // indice de acesso global das threads int ennos; // tamanho da matriz de distancias nfol = nnos / 2; // folhas estao na metade inferior ennos = (nfol * (nfol - 1)) / 2; it = i + ib*nnos; if (i < nfol) { // nos folhos calculam distancia ate a raiz e armazena o caminho (assinatura // em bits) até a raiz y = 0; j = it; // associa threads com nos folhas nz_sig[it] = 1; while (j != -1) { y = y + nz_br[j]; // acumula a distancia if (nz_p[j] == -1) break; nz_sig[it] <<= 1; // acumula o caminho if (nz_f1[nz_p[j]] == j) // acrescenta 0 se vier da direita (f2) nz_sig[it]++; // ou 1 se vier da esquerda (f1) j = nz_p[j]; } quadratic_probing_insert(nz_hsig, nz_hval, nz_sig[it], it, hnnos); nz_dr[it] = y; } __syncthreads(); // espera todas as threads chegarem até aqui if (i < (nfol-1)) { // nos internos calculam distancia ate a raiz e armazena o caminho // (assinatura em bits) até o raiz y = 0; j = it+nfol+1; // associa threads com os nos internos nz_sig[j] = 1; if (nz_p[j] == -1) j = -1; while (j != -1) { y = y + nz_br[j]; // acumula a distancia if (nz_p[j] == -1) break; nz_sig[it+nfol+1] <<= 1; // acumula o caminho if (nz_f1[nz_p[j]] == j) // acrescenta 0 se vier da direita (f2) nz_sig[it+nfol+1]++; // ou 1 se vier da esquerda (f1) j = nz_p[j]; } quadratic_probing_insert(nz_hsig, nz_hval, nz_sig[it+nfol+1], (it+nfol+1), hnnos); nz_dr[it+nfol+1] = y; } __syncthreads(); // espera todas as threads chegarem até aqui // se nfol (numero de especies) for impar, usamos nfol threads // se nfol (numero de especies) for par, usamos nfol-1 threads // isso evita termos que tratar de elementos restantes if ( (nfol % 2) == 0) { nthreads = nfol - 1; // nfol é par: cada thread calcula nfol/2 distancias a = nfol / 2; // quantidade de elementos por thread } else { nthreads = nfol; // nfol é ímpar: cada thread calcula (nfol-1)/2 distancias a = (nfol - 1) / 2; // quantidade de elementos por thread } if (i < nthreads) { for( b = i*a; b < a+(i*a); b++) { r = row_index(b, nfol); c = column_index(b, nfol); sig1 = nz_sig[r+ib*nnos]; sig2 = nz_sig[c+ib*nnos]; sig3 = 1; // inicia com 1 para diferenciar das demais assinaturas, i.e., 10, 100 etc bits = 0; // conta quantos bits sao iguais sig4 = 1; // recebe assinatura invertida while ( (sig1 & 1) == (sig2 & 1) && bits < 32) { // compara bit menos significativo bit = (sig1 & 1); bits++; sig1 >>= 1; // avanca para proximo bit sig2 >>= 1; // avanca para proximo bit sig3 <<= 1; // armazena bits coincidentes - caminho do ancestral comum if (bit) sig3++; // soma 1 ou 0 } while (bits>0) { // inverte a assinatura coincidente incluindo um 1 mais a esquerda sig4 <<= 1; if (sig3 & 1) sig4++; sig3 >>= 1; bits--; } ancc = quadratic_probing_search(nz_hsig, nz_hval, sig4, hnnos); nz_de[b+ib*ennos] = nz_dr[r+ib*nnos] + nz_dr[c+ib*nnos] - 2*nz_dr[ancc]; } } } /* Calcular o I de Moran para cada classe. Sao diversas arvores, cada uma tera o I de Moran para cada classe (nz_class), em seguida faz-se a media e calcula a variancia entre elas. Return: I de Moran por classe e a variancia para cada classe. */ __global__ void I_moran_Gpu(int nnos, int nrClass, float *nz_de, float *nz_trait, float *nz_class_range, float *nz_class_value, float MeanY, float Variance){ int d, r, c, a, b; int nfol, nthreads; float SumProdCross, SumW, w; short int p; int i = threadIdx.x; // identificador da thread int ib = blockIdx.x; // identificador do bloco int ennos; int base; __shared__ float sumTotal, sumTotalProdCross; extern __shared__ float nzClass[]; for(d=0;d<nrClass;d++){ nzClass[d] = nz_class_range[d]; } nfol = nnos/2; ennos = (nfol * (nfol - 1)) / 2; base = ib * ennos; SumW = 0; if ( (nfol % 2) == 0) { nthreads = nfol - 1; // nfol é par: cada thread calcula nfol/2 distancias a = nfol / 2; // quantidade de elementos por thread } else { nthreads = nfol; // nfol é ímpar: cada thread calcula (nfol-1)/2 distancias a = (nfol - 1) / 2; // quantidade de elementos por thread } w = 1; p = 2; //Symetric //Inicializa variaveis compartilhadas sumTotalProdCross = 0; sumTotal = 0; __syncthreads();//aguarda inicializacao das variaveis para continuar execução if (i < nthreads) { for(d=0;d<nrClass;d++){ SumProdCross = 0; SumW = 0; for( b = i*a; b < a+(i*a); b++) { if (nz_de[b+base] > nzClass[d] && nz_de[b+base] <= nzClass[d+1]){ r = row_index(b, nfol); c = column_index(b, nfol); SumW += (w*p); SumProdCross += (((nz_trait[r] - MeanY) * (nz_trait[c] - MeanY))*p); } } //Utilizar operacao atomica atomicFloatAdd(&sumTotalProdCross, SumProdCross); atomicFloatAdd(&sumTotal, SumW); __syncthreads(); // espera todas as threads chegarem até aqui //apenas uma thread calcula o I de Moran if (threadIdx.x == 0){ nz_class_value[(ib*nrClass)+d] = (nfol / sumTotal) * (sumTotalProdCross / Variance); // I de Moran sumTotalProdCross = 0; sumTotal = 0; //printf("Teste %.2f\n", nz_class_value[(ib*nrClass)+d] ); } __syncthreads(); // espera todas as threads chegarem até aqui } } } __device__ inline void atomicFloatAdd(float *address, float val) { int tmp0 = *address; int i_val = __float_as_int(val + __int_as_float(tmp0)); int tmp1; // compare and swap v = (old == tmp0) ? i_val : old; // returns old while( (tmp1 = atomicCAS((int *)address, tmp0, i_val)) != tmp0 ) { tmp0 = tmp1; i_val = __float_as_int(val + __int_as_float(tmp1)); } }
c0e13f807c5d0a68311bc75401d38eaa539f0f91.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** @file fil.cu implements forest inference */ #include <omp.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <treelite/c_api.h> #include <treelite/tree.h> #include <algorithm> #include <cmath> #include <iomanip> #include <limits> #include <stack> #include <utility> #include <cuml/fil/fil.h> #include <cuml/fil/fnv_hash.h> #include <raft/cudart_utils.h> #include <cuml/common/logger.hpp> #include <raft/handle.hpp> #include <raft/mr/device/allocator.hpp> #include <raft/mr/host/allocator.hpp> #include "common_hip.cuh" namespace ML { namespace fil { namespace tl = treelite; __host__ __device__ float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); } /** performs additional transformations on the array of forest predictions (preds) of size n; the transformations are defined by output, and include averaging (multiplying by inv_num_trees), adding global_bias (always done), sigmoid and applying threshold. in case of complement_proba, fills in the complement probability */ __global__ void transform_k(float* preds, size_t n, output_t output, float inv_num_trees, float threshold, float global_bias, bool complement_proba) { size_t i = threadIdx.x + size_t(blockIdx.x) * blockDim.x; if (i >= n) return; if (complement_proba && i % 2 != 0) return; float result = preds[i]; if ((output & output_t::AVG) != 0) result *= inv_num_trees; result += global_bias; if ((output & output_t::SIGMOID) != 0) result = sigmoid(result); // will not be done on CATEGORICAL_LEAF because the whole kernel will not run if ((output & output_t::CLASS) != 0) { result = result > threshold ? 1.0f : 0.0f; } // sklearn outputs numpy array in 'C' order, with the number of classes being last dimension // that is also the default order, so we should use the same one if (complement_proba) { preds[i] = 1.0f - result; preds[i + 1] = result; } else preds[i] = result; } struct forest { void init_n_items(int device) { int max_shm_std = 48 * 1024; // 48 KiB /// the most shared memory a kernel can request on the GPU in question int max_shm = 0; CUDA_CHECK(hipDeviceGetAttribute( &max_shm, hipDeviceAttributeSharedMemPerBlockOptin, device)); // TODO(canonizer): use >48KiB shared memory if available max_shm = ::min(max_shm, max_shm_std); // searching for the most items per block while respecting the shared // memory limits creates a full linear programming problem. // solving it in a single equation looks less tractable than this for (bool predict_proba : {false, true}) { shmem_size_params& ssp_ = predict_proba ? proba_ssp_ : class_ssp_; ssp_.predict_proba = predict_proba; shmem_size_params ssp = ssp_; for (bool cols_in_shmem : {false, true}) { ssp.cols_in_shmem = cols_in_shmem; for (ssp.n_items = 1; ssp.n_items <= (algo_ == algo_t::BATCH_TREE_REORG ? 4 : 1); ++ssp.n_items) { ssp.compute_smem_footprint(); if (ssp.shm_sz < max_shm) ssp_ = ssp; } } ASSERT(max_shm >= ssp_.shm_sz, "FIL out of shared memory. Perhaps the maximum number of \n" "supported classes is exceeded? 5'000 would still be safe."); } } void init_fixed_block_count(int device, int blocks_per_sm) { int max_threads_per_sm, sm_count; CUDA_CHECK(hipDeviceGetAttribute( &max_threads_per_sm, hipDeviceAttributeMaxThreadsPerMultiProcessor, device)); int max_blocks_per_sm = max_threads_per_sm / FIL_TPB; ASSERT(blocks_per_sm <= max_blocks_per_sm, "on this GPU, FIL blocks_per_sm cannot exceed %d", max_blocks_per_sm); CUDA_CHECK(hipDeviceGetAttribute(&sm_count, hipDeviceAttributeMultiprocessorCount, device)); fixed_block_count_ = blocks_per_sm * sm_count; } void init_common(const raft::handle_t& h, const forest_params_t* params) { depth_ = params->depth; num_trees_ = params->num_trees; algo_ = params->algo; output_ = params->output; threshold_ = params->threshold; global_bias_ = params->global_bias; proba_ssp_.leaf_algo = params->leaf_algo; proba_ssp_.num_cols = params->num_cols; proba_ssp_.num_classes = params->num_classes; class_ssp_ = proba_ssp_; int device = h.get_device(); init_n_items(device); // n_items takes priority over blocks_per_sm init_fixed_block_count(device, params->blocks_per_sm); } virtual void infer(predict_params params, hipStream_t stream) = 0; void predict(const raft::handle_t& h, float* preds, const float* data, size_t num_rows, bool predict_proba) { // Initialize prediction parameters. predict_params params(predict_proba ? proba_ssp_ : class_ssp_); params.algo = algo_; params.preds = preds; params.data = data; params.num_rows = num_rows; // ignored unless predict_proba is true and algo is GROVE_PER_CLASS params.transform = output_; // fixed_block_count_ == 0 means the number of thread blocks is // proportional to the number of rows params.num_blocks = fixed_block_count_; /** The binary classification / regression (FLOAT_UNARY_BINARY) predict_proba() works as follows (always 2 outputs): RAW: output the sum of tree predictions AVG is set: divide by the number of trees (averaging) SIGMOID is set: apply sigmoid CLASS is set: ignored SOFTMAX is set: error write the output of the previous stages and its complement The binary classification / regression (FLOAT_UNARY_BINARY) predict() works as follows (always 1 output): RAW (no values set): output the sum of tree predictions AVG is set: divide by the number of trees (averaging) SIGMOID is set: apply sigmoid CLASS is set: apply threshold (equivalent to choosing best class) SOFTMAX is set: error The multi-class classification / regression (CATEGORICAL_LEAF) predict_proba() works as follows (always num_classes outputs): RAW (no values set): output class votes AVG is set: divide by the number of trees (averaging, output class probability) SIGMOID is set: apply sigmoid CLASS is set: ignored SOFTMAX is set: error The multi-class classification / regression (CATEGORICAL_LEAF) predict() works as follows (always 1 output): RAW (no values set): output the label of the class with highest probability, else output label 0. SOFTMAX is set: error All other flags (AVG, SIGMOID, CLASS) are ignored The multi-class classification / regression (GROVE_PER_CLASS) predict_proba() works as follows (always num_classes outputs): RAW (no values set): output class votes AVG is set: divide by the number of trees (averaging, output class probability) SIGMOID is set: apply sigmoid; if SOFTMAX is also set: error CLASS is set: ignored SOFTMAX is set: softmax is applied after averaging and global_bias The multi-class classification / regression (GROVE_PER_CLASS) predict() works as follows (always 1 output): RAW (no values set): output the label of the class with highest margin, equal margins resolved in favor of smaller label integer All other flags (AVG, SIGMOID, CLASS, SOFTMAX) are ignored */ output_t ot = output_; // Treelite applies bias before softmax, but we do after. // Simulating treelite order, which cancels out bias. // If non-proba prediction used, it still will not matter // for the same reason softmax will not. float global_bias = (ot & output_t::SOFTMAX) != 0 ? 0.0f : global_bias_; bool complement_proba = false, do_transform; if (predict_proba) { // no threshold on probabilities ot = output_t(ot & ~output_t::CLASS); switch (params.leaf_algo) { case leaf_algo_t::FLOAT_UNARY_BINARY: params.num_outputs = 2; complement_proba = true; do_transform = true; break; case leaf_algo_t::GROVE_PER_CLASS: // for GROVE_PER_CLASS, averaging happens in infer_k ot = output_t(ot & ~output_t::AVG); params.num_outputs = params.num_classes; do_transform = ot != output_t::RAW && ot != output_t::SOFTMAX || global_bias != 0.0f; break; case leaf_algo_t::CATEGORICAL_LEAF: params.num_outputs = params.num_classes; do_transform = ot != output_t::RAW || global_bias_ != 0.0f; break; default: ASSERT(false, "internal error: invalid leaf_algo_"); } } else { if (params.leaf_algo == leaf_algo_t::FLOAT_UNARY_BINARY) { do_transform = ot != output_t::RAW || global_bias_ != 0.0f; } else { // GROVE_PER_CLASS, CATEGORICAL_LEAF: moot since choosing best class and // all transforms are monotonic. also, would break current code do_transform = false; } params.num_outputs = 1; } // Predict using the forest. hipStream_t stream = h.get_stream(); infer(params, stream); if (do_transform) { size_t num_values_to_transform = (size_t)num_rows * (size_t)params.num_outputs; hipLaunchKernelGGL(( transform_k), dim3(raft::ceildiv(num_values_to_transform, (size_t)FIL_TPB)), dim3(FIL_TPB), 0, stream, preds, num_values_to_transform, ot, num_trees_ > 0 ? (1.0f / num_trees_) : 1.0f, threshold_, global_bias, complement_proba); CUDA_CHECK(hipPeekAtLastError()); } } virtual void free(const raft::handle_t& h) = 0; virtual ~forest() {} int num_trees_ = 0; int depth_ = 0; algo_t algo_ = algo_t::NAIVE; output_t output_ = output_t::RAW; float threshold_ = 0.5; float global_bias_ = 0; shmem_size_params class_ssp_, proba_ssp_; int fixed_block_count_ = 0; }; struct dense_forest : forest { void transform_trees(const dense_node* nodes) { /* Populate node information: For each tree, the nodes are still stored in the breadth-first, left-to-right order. However, instead of storing the nodes of the same tree adjacently, it uses a different layout. In this layout, the roots of all trees (node 0) are stored first, followed by left children of the roots of all trees (node 1), followed by the right children of the roots of all trees (node 2), and so on. */ int global_node = 0; for (int tree = 0; tree < num_trees_; ++tree) { int tree_node = 0; // the counters `level` and `branch` are not used for computing node // indices, they are only here to highlight the node ordering within // each tree for (int level = 0; level <= depth_; ++level) { for (int branch = 0; branch < 1 << level; ++branch) { h_nodes_[tree_node * num_trees_ + tree] = nodes[global_node]; ++tree_node; ++global_node; } } } } void init(const raft::handle_t& h, const dense_node* nodes, const forest_params_t* params) { init_common(h, params); if (algo_ == algo_t::NAIVE) algo_ = algo_t::BATCH_TREE_REORG; int num_nodes = forest_num_nodes(num_trees_, depth_); nodes_ = (dense_node*)h.get_device_allocator()->allocate( sizeof(dense_node) * num_nodes, h.get_stream()); h_nodes_.resize(num_nodes); if (algo_ == algo_t::NAIVE) { std::copy(nodes, nodes + num_nodes, h_nodes_.begin()); } else { transform_trees(nodes); } CUDA_CHECK(hipMemcpyAsync(nodes_, h_nodes_.data(), num_nodes * sizeof(dense_node), hipMemcpyHostToDevice, h.get_stream())); // copy must be finished before freeing the host data CUDA_CHECK(hipStreamSynchronize(h.get_stream())); h_nodes_.clear(); h_nodes_.shrink_to_fit(); } virtual void infer(predict_params params, hipStream_t stream) override { dense_storage forest(nodes_, num_trees_, algo_ == algo_t::NAIVE ? tree_num_nodes(depth_) : 1, algo_ == algo_t::NAIVE ? 1 : num_trees_); fil::infer(forest, params, stream); } virtual void free(const raft::handle_t& h) override { int num_nodes = forest_num_nodes(num_trees_, depth_); h.get_device_allocator()->deallocate(nodes_, sizeof(dense_node) * num_nodes, h.get_stream()); } dense_node* nodes_ = nullptr; thrust::host_vector<dense_node> h_nodes_; }; template <typename node_t> struct sparse_forest : forest { void init(const raft::handle_t& h, const int* trees, const node_t* nodes, const forest_params_t* params) { init_common(h, params); if (algo_ == algo_t::ALGO_AUTO) algo_ = algo_t::NAIVE; depth_ = 0; // a placeholder value num_nodes_ = params->num_nodes; // trees trees_ = (int*)h.get_device_allocator()->allocate(sizeof(int) * num_trees_, h.get_stream()); CUDA_CHECK(hipMemcpyAsync(trees_, trees, sizeof(int) * num_trees_, hipMemcpyHostToDevice, h.get_stream())); // nodes nodes_ = (node_t*)h.get_device_allocator()->allocate( sizeof(node_t) * num_nodes_, h.get_stream()); CUDA_CHECK(hipMemcpyAsync(nodes_, nodes, sizeof(node_t) * num_nodes_, hipMemcpyHostToDevice, h.get_stream())); } virtual void infer(predict_params params, hipStream_t stream) override { sparse_storage<node_t> forest(trees_, nodes_, num_trees_); fil::infer(forest, params, stream); } void free(const raft::handle_t& h) override { h.get_device_allocator()->deallocate(trees_, sizeof(int) * num_trees_, h.get_stream()); h.get_device_allocator()->deallocate(nodes_, sizeof(node_t) * num_nodes_, h.get_stream()); } int num_nodes_ = 0; int* trees_ = nullptr; node_t* nodes_ = nullptr; }; void check_params(const forest_params_t* params, bool dense) { if (dense) { ASSERT(params->depth >= 0, "depth must be non-negative for dense forests"); } else { ASSERT(params->num_nodes >= 0, "num_nodes must be non-negative for sparse forests"); ASSERT(params->algo == algo_t::NAIVE || params->algo == algo_t::ALGO_AUTO, "only ALGO_AUTO and NAIVE algorithms are supported " "for sparse forests"); } ASSERT(params->num_trees >= 0, "num_trees must be non-negative"); ASSERT(params->num_cols >= 0, "num_cols must be non-negative"); switch (params->algo) { case algo_t::ALGO_AUTO: case algo_t::NAIVE: case algo_t::TREE_REORG: case algo_t::BATCH_TREE_REORG: break; default: ASSERT(false, "algo should be ALGO_AUTO, NAIVE, TREE_REORG or BATCH_TREE_REORG"); } switch (params->leaf_algo) { case leaf_algo_t::FLOAT_UNARY_BINARY: if ((params->output & output_t::CLASS) != 0) { ASSERT(params->num_classes == 2, "only supporting binary" " classification using FLOAT_UNARY_BINARY"); } else { ASSERT(params->num_classes == 1, "num_classes must be 1 for " "regression"); } ASSERT((params->output & output_t::SOFTMAX) == 0, "softmax does not make sense for leaf_algo == FLOAT_UNARY_BINARY"); break; case leaf_algo_t::GROVE_PER_CLASS: ASSERT(params->num_classes > 2, "num_classes > 2 is required for leaf_algo == GROVE_PER_CLASS"); ASSERT(params->num_trees % params->num_classes == 0, "num_classes must divide num_trees evenly for GROVE_PER_CLASS"); break; case leaf_algo_t::CATEGORICAL_LEAF: ASSERT(params->num_classes >= 2, "num_classes >= 2 is required for " "leaf_algo == CATEGORICAL_LEAF"); ASSERT((params->output & output_t::SOFTMAX) == 0, "softmax not supported for leaf_algo == CATEGORICAL_LEAF"); break; default: ASSERT(false, "leaf_algo must be FLOAT_UNARY_BINARY, CATEGORICAL_LEAF" " or GROVE_PER_CLASS"); } if ((params->output & ~output_t::ALL_SET) != 0) { ASSERT( false, "output should be a combination of RAW, AVG, SIGMOID, CLASS and SOFTMAX"); } ASSERT(~params->output & (output_t::SIGMOID | output_t::SOFTMAX), "combining softmax and sigmoid is not supported"); ASSERT(params->blocks_per_sm >= 0, "blocks_per_sm must be nonnegative"); } template <typename T, typename L> int tree_root(const tl::Tree<T, L>& tree) { return 0; // Treelite format assumes that the root is 0 } template <typename T, typename L> inline int max_depth(const tl::Tree<T, L>& tree) { // trees of this depth aren't used, so it most likely means bad input data, // e.g. cycles in the forest const int DEPTH_LIMIT = 500; int root_index = tree_root(tree); typedef std::pair<int, int> pair_t; std::stack<pair_t> stack; stack.push(pair_t(root_index, 0)); int max_depth = 0; while (!stack.empty()) { const pair_t& pair = stack.top(); int node_id = pair.first; int depth = pair.second; stack.pop(); while (!tree.IsLeaf(node_id)) { stack.push(pair_t(tree.LeftChild(node_id), depth + 1)); node_id = tree.RightChild(node_id); depth++; ASSERT(depth < DEPTH_LIMIT, "depth limit reached, might be a cycle in the tree"); } // only need to update depth for leaves max_depth = ::max(max_depth, depth); } return max_depth; } template <typename T, typename L> int max_depth(const tl::ModelImpl<T, L>& model) { int depth = 0; const auto& trees = model.trees; #pragma omp parallel for reduction(max : depth) for (size_t i = 0; i < trees.size(); ++i) { const auto& tree = trees[i]; depth = ::max(depth, max_depth(tree)); } return depth; } inline void adjust_threshold(float* pthreshold, int* tl_left, int* tl_right, bool* default_left, tl::Operator comparison_op) { // in treelite (take left node if val [op] threshold), // the meaning of the condition is reversed compared to FIL; // thus, "<" in treelite corresonds to comparison ">=" used by FIL // https://github.com/dmlc/treelite/blob/master/include/treelite/tree.h#L243 switch (comparison_op) { case tl::Operator::kLT: break; case tl::Operator::kLE: // x <= y is equivalent to x < y', where y' is the next representable float *pthreshold = std::nextafterf(*pthreshold, std::numeric_limits<float>::infinity()); break; case tl::Operator::kGT: // x > y is equivalent to x >= y', where y' is the next representable float // left and right still need to be swapped *pthreshold = std::nextafterf(*pthreshold, std::numeric_limits<float>::infinity()); case tl::Operator::kGE: // swap left and right std::swap(*tl_left, *tl_right); *default_left = !*default_left; break; default: ASSERT(false, "only <, >, <= and >= comparisons are supported"); } } /** if the vector consists of zeros and a single one, return the position for the one (assumed class label). Else, asserts false. If the vector contains a NAN, asserts false */ template <typename L> int find_class_label_from_one_hot(L* vector, int len) { bool found_label = false; int out; for (int i = 0; i < len; ++i) { if (vector[i] == static_cast<L>(1.0)) { ASSERT(!found_label, "label vector contains multiple 1.0f"); out = i; found_label = true; } else { ASSERT(vector[i] == static_cast<L>(0.0), "label vector contains values other than 0.0 and 1.0"); } } ASSERT(found_label, "did not find 1.0f in vector"); return out; } template <typename fil_node_t, typename T, typename L> void tl2fil_leaf_payload(fil_node_t* fil_node, const tl::Tree<T, L>& tl_tree, int tl_node_id, const forest_params_t& forest_params) { auto vec = tl_tree.LeafVector(tl_node_id); switch (forest_params.leaf_algo) { case leaf_algo_t::CATEGORICAL_LEAF: ASSERT(vec.size() == forest_params.num_classes, "inconsistent number of classes in treelite leaves"); fil_node->val.idx = find_class_label_from_one_hot(&vec[0], vec.size()); break; case leaf_algo_t::FLOAT_UNARY_BINARY: case leaf_algo_t::GROVE_PER_CLASS: fil_node->val.f = static_cast<float>(tl_tree.LeafValue(tl_node_id)); ASSERT(!tl_tree.HasLeafVector(tl_node_id), "some but not all treelite leaves have leaf_vector()"); break; default: ASSERT(false, "internal error: invalid leaf_algo"); }; } template <typename T, typename L> void node2fil_dense(std::vector<dense_node>* pnodes, int root, int cur, const tl::Tree<T, L>& tree, int node_id, const forest_params_t& forest_params) { if (tree.IsLeaf(node_id)) { (*pnodes)[root + cur] = dense_node(val_t{.f = NAN}, NAN, 0, false, true); tl2fil_leaf_payload(&(*pnodes)[root + cur], tree, node_id, forest_params); return; } // inner node ASSERT(tree.SplitType(node_id) == tl::SplitFeatureType::kNumerical, "only numerical split nodes are supported"); int tl_left = tree.LeftChild(node_id), tl_right = tree.RightChild(node_id); bool default_left = tree.DefaultLeft(node_id); float threshold = static_cast<float>(tree.Threshold(node_id)); adjust_threshold(&threshold, &tl_left, &tl_right, &default_left, tree.ComparisonOp(node_id)); (*pnodes)[root + cur] = dense_node( val_t{.f = 0}, threshold, tree.SplitIndex(node_id), default_left, false); int left = 2 * cur + 1; node2fil_dense(pnodes, root, left, tree, tl_left, forest_params); node2fil_dense(pnodes, root, left + 1, tree, tl_right, forest_params); } template <typename T, typename L> void tree2fil_dense(std::vector<dense_node>* pnodes, int root, const tl::Tree<T, L>& tree, const forest_params_t& forest_params) { node2fil_dense(pnodes, root, 0, tree, tree_root(tree), forest_params); } template <typename fil_node_t, typename T, typename L> int tree2fil_sparse(std::vector<fil_node_t>& nodes, int root, const tl::Tree<T, L>& tree, const forest_params_t& forest_params) { typedef std::pair<int, int> pair_t; std::stack<pair_t> stack; int built_index = root + 1; stack.push(pair_t(tree_root(tree), 0)); while (!stack.empty()) { const pair_t& top = stack.top(); int node_id = top.first; int cur = top.second; stack.pop(); while (!tree.IsLeaf(node_id)) { // inner node ASSERT(tree.SplitType(node_id) == tl::SplitFeatureType::kNumerical, "only numerical split nodes are supported"); // tl_left and tl_right are indices of the children in the treelite tree // (stored as an array of nodes) int tl_left = tree.LeftChild(node_id), tl_right = tree.RightChild(node_id); bool default_left = tree.DefaultLeft(node_id); float threshold = static_cast<float>(tree.Threshold(node_id)); adjust_threshold(&threshold, &tl_left, &tl_right, &default_left, tree.ComparisonOp(node_id)); // reserve space for child nodes // left is the offset of the left child node relative to the tree root // in the array of all nodes of the FIL sparse forest int left = built_index - root; built_index += 2; nodes[root + cur] = fil_node_t(val_t{.f = 0}, threshold, tree.SplitIndex(node_id), default_left, false, left); // push child nodes into the stack stack.push(pair_t(tl_right, left + 1)); //stack.push(pair_t(tl_left, left)); node_id = tl_left; cur = left; } // leaf node nodes[root + cur] = fil_node_t(val_t{.f = NAN}, NAN, 0, false, true, 0); tl2fil_leaf_payload(&nodes[root + cur], tree, node_id, forest_params); } return root; } struct level_entry { int n_branch_nodes, n_leaves; }; typedef std::pair<int, int> pair_t; // hist has branch and leaf count given depth template <typename T, typename L> inline void tree_depth_hist(const tl::Tree<T, L>& tree, std::vector<level_entry>& hist) { std::stack<pair_t> stack; // {tl_id, depth} stack.push({tree_root(tree), 0}); while (!stack.empty()) { const pair_t& top = stack.top(); int node_id = top.first; int depth = top.second; stack.pop(); while (!tree.IsLeaf(node_id)) { if (depth >= hist.size()) hist.resize(depth + 1, {0, 0}); hist[depth].n_branch_nodes++; stack.push({tree.LeftChild(node_id), depth + 1}); node_id = tree.RightChild(node_id); depth++; } if (depth >= hist.size()) hist.resize(depth + 1, {0, 0}); hist[depth].n_leaves++; } } template <typename T, typename L> std::stringstream depth_hist_and_max(const tl::ModelImpl<T, L>& model) { using namespace std; vector<level_entry> hist; for (const auto& tree : model.trees) tree_depth_hist(tree, hist); int min_leaf_depth = -1, leaves_times_depth = 0, total_branches = 0, total_leaves = 0; stringstream forest_shape; ios default_state(nullptr); default_state.copyfmt(forest_shape); forest_shape << "Depth histogram:" << endl << "depth branches leaves nodes" << endl; for (int level = 0; level < hist.size(); ++level) { level_entry e = hist[level]; forest_shape << setw(5) << level << setw(9) << e.n_branch_nodes << setw(7) << e.n_leaves << setw(8) << e.n_branch_nodes + e.n_leaves << endl; forest_shape.copyfmt(default_state); if (e.n_leaves && min_leaf_depth == -1) min_leaf_depth = level; leaves_times_depth += e.n_leaves * level; total_branches += e.n_branch_nodes; total_leaves += e.n_leaves; } int total_nodes = total_branches + total_leaves; forest_shape << "Total: branches: " << total_branches << " leaves: " << total_leaves << " nodes: " << total_nodes << endl; forest_shape << "Avg nodes per tree: " << setprecision(2) << total_nodes / (float)hist[0].n_branch_nodes << endl; forest_shape.copyfmt(default_state); forest_shape << "Leaf depth: min: " << min_leaf_depth << " avg: " << setprecision(2) << fixed << leaves_times_depth / (float)total_leaves << " max: " << hist.size() - 1 << endl; forest_shape.copyfmt(default_state); vector<char> hist_bytes(hist.size() * sizeof(hist[0])); memcpy(&hist_bytes[0], &hist[0], hist_bytes.size()); // std::hash does not promise to not be identity. Xoring plain numbers which // add up to one another erases information, hence, std::hash is unsuitable here forest_shape << "Depth histogram fingerprint: " << hex << fowler_noll_vo_fingerprint64_32(hist_bytes.begin(), hist_bytes.end()) << endl; forest_shape.copyfmt(default_state); return forest_shape; } template <typename T, typename L> size_t tl_leaf_vector_size(const tl::ModelImpl<T, L>& model) { const tl::Tree<T, L>& tree = model.trees[0]; int node_key; for (node_key = tree_root(tree); !tree.IsLeaf(node_key); node_key = tree.RightChild(node_key)) ; if (tree.HasLeafVector(node_key)) return tree.LeafVector(node_key).size(); return 0; } // tl2fil_common is the part of conversion from a treelite model // common for dense and sparse forests template <typename T, typename L> void tl2fil_common(forest_params_t* params, const tl::ModelImpl<T, L>& model, const treelite_params_t* tl_params) { // fill in forest-indendent params params->algo = tl_params->algo; params->threshold = tl_params->threshold; // fill in forest-dependent params params->depth = max_depth(model); // also checks for cycles const tl::ModelParam& param = model.param; // assuming either all leaves use the .leaf_vector() or all leaves use .leaf_value() size_t leaf_vec_size = tl_leaf_vector_size(model); std::string pred_transform(param.pred_transform); if (leaf_vec_size > 0) { ASSERT(leaf_vec_size == model.task_param.num_class, "treelite model inconsistent"); params->num_classes = leaf_vec_size; params->leaf_algo = leaf_algo_t::CATEGORICAL_LEAF; ASSERT(tl_params->output_class, "output_class==true is required for multi-class models"); ASSERT( pred_transform == "max_index" || pred_transform == "identity_multiclass", "only max_index and identity_multiclass values of pred_transform " "are supported for multi-class models"); } else { if (model.task_param.num_class > 1) { params->num_classes = static_cast<int>(model.task_param.num_class); ASSERT(tl_params->output_class, "output_class==true is required for multi-class models"); ASSERT(pred_transform == "identity_multiclass" || pred_transform == "max_index" || pred_transform == "softmax" || pred_transform == "multiclass_ova", "only identity_multiclass, max_index, multiclass_ova and softmax " "values of pred_transform are supported for xgboost-style " "multi-class classification models."); // this function should not know how many threads per block will be used params->leaf_algo = leaf_algo_t::GROVE_PER_CLASS; } else { params->num_classes = tl_params->output_class ? 2 : 1; ASSERT(pred_transform == "sigmoid" || pred_transform == "identity", "only sigmoid and identity values of pred_transform " "are supported for binary classification and regression models."); params->leaf_algo = leaf_algo_t::FLOAT_UNARY_BINARY; } } params->num_cols = model.num_feature; ASSERT(param.sigmoid_alpha == 1.0f, "sigmoid_alpha not supported"); params->global_bias = param.global_bias; params->output = output_t::RAW; /** output_t::CLASS denotes using a threshold in FIL, when predict_proba == false. For all multiclass models, the best class is selected using argmax instead. This happens when either leaf_algo == CATEGORICAL_LEAF or num_classes > 2. **/ if (tl_params->output_class && params->leaf_algo != CATEGORICAL_LEAF && params->num_classes <= 2) { params->output = output_t(params->output | output_t::CLASS); } // "random forest" in treelite means tree output averaging if (model.average_tree_output) { params->output = output_t(params->output | output_t::AVG); } if (pred_transform == "sigmoid" || pred_transform == "multiclass_ova") { params->output = output_t(params->output | output_t::SIGMOID); } if (pred_transform == "softmax") params->output = output_t(params->output | output_t::SOFTMAX); params->num_trees = model.trees.size(); params->blocks_per_sm = tl_params->blocks_per_sm; } // uses treelite model with additional tl_params to initialize FIL params // and dense nodes (stored in *pnodes) template <typename threshold_t, typename leaf_t> void tl2fil_dense(std::vector<dense_node>* pnodes, forest_params_t* params, const tl::ModelImpl<threshold_t, leaf_t>& model, const treelite_params_t* tl_params) { tl2fil_common(params, model, tl_params); // convert the nodes int num_nodes = forest_num_nodes(params->num_trees, params->depth); pnodes->resize(num_nodes, dense_node()); for (int i = 0; i < model.trees.size(); ++i) { tree2fil_dense(pnodes, i * tree_num_nodes(params->depth), model.trees[i], *params); } } template <typename fil_node_t> struct tl2fil_sparse_check_t { template <typename threshold_t, typename leaf_t> static void check(const tl::ModelImpl<threshold_t, leaf_t>& model) { ASSERT(false, "internal error: " "only a specialization of this template should be used"); } }; template <> struct tl2fil_sparse_check_t<sparse_node16> { // no extra check for 16-byte sparse nodes template <typename threshold_t, typename leaf_t> static void check(const tl::ModelImpl<threshold_t, leaf_t>& model) {} }; template <> struct tl2fil_sparse_check_t<sparse_node8> { static const int MAX_FEATURES = 1 << sparse_node8::FID_NUM_BITS; static const int MAX_TREE_NODES = (1 << sparse_node8::LEFT_NUM_BITS) - 1; template <typename threshold_t, typename leaf_t> static void check(const tl::ModelImpl<threshold_t, leaf_t>& model) { // check the number of features int num_features = model.num_feature; ASSERT(num_features <= MAX_FEATURES, "model has %d features, " "but only %d supported for 8-byte sparse nodes", num_features, MAX_FEATURES); // check the number of tree nodes const std::vector<tl::Tree<threshold_t, leaf_t>>& trees = model.trees; for (int i = 0; i < trees.size(); ++i) { int num_nodes = trees[i].num_nodes; ASSERT(num_nodes <= MAX_TREE_NODES, "tree %d has %d nodes, " "but only %d supported for 8-byte sparse nodes", i, num_nodes, MAX_TREE_NODES); } } }; // uses treelite model with additional tl_params to initialize FIL params, // trees (stored in *ptrees) and sparse nodes (stored in *pnodes) template <typename fil_node_t, typename threshold_t, typename leaf_t> void tl2fil_sparse(std::vector<int>* ptrees, std::vector<fil_node_t>* pnodes, forest_params_t* params, const tl::ModelImpl<threshold_t, leaf_t>& model, const treelite_params_t* tl_params) { tl2fil_common(params, model, tl_params); tl2fil_sparse_check_t<fil_node_t>::check(model); size_t num_trees = model.trees.size(); ptrees->reserve(num_trees); ptrees->push_back(0); for (size_t i = 0; i < num_trees - 1; ++i) { ptrees->push_back(model.trees[i].num_nodes + ptrees->back()); } size_t total_nodes = ptrees->back() + model.trees.back().num_nodes; pnodes->resize(total_nodes); // convert the nodes #pragma omp parallel for for (int i = 0; i < num_trees; ++i) { tree2fil_sparse(*pnodes, (*ptrees)[i], model.trees[i], *params); } params->num_nodes = pnodes->size(); } void init_dense(const raft::handle_t& h, forest_t* pf, const dense_node* nodes, const forest_params_t* params) { check_params(params, true); dense_forest* f = new dense_forest; f->init(h, nodes, params); *pf = f; } template <typename fil_node_t> void init_sparse(const raft::handle_t& h, forest_t* pf, const int* trees, const fil_node_t* nodes, const forest_params_t* params) { check_params(params, false); sparse_forest<fil_node_t>* f = new sparse_forest<fil_node_t>; f->init(h, trees, nodes, params); *pf = f; } // explicit instantiations for init_sparse() template void init_sparse<sparse_node16>(const raft::handle_t& h, forest_t* pf, const int* trees, const sparse_node16* nodes, const forest_params_t* params); template void init_sparse<sparse_node8>(const raft::handle_t& h, forest_t* pf, const int* trees, const sparse_node8* nodes, const forest_params_t* params); template <typename threshold_t, typename leaf_t> void from_treelite(const raft::handle_t& handle, forest_t* pforest, const tl::ModelImpl<threshold_t, leaf_t>& model, const treelite_params_t* tl_params) { // Invariants on threshold and leaf types static_assert(std::is_same<threshold_t, float>::value || std::is_same<threshold_t, double>::value, "Model must contain float32 or float64 thresholds for splits"); ASSERT( (std::is_same<leaf_t, float>::value || std::is_same<leaf_t, double>::value), "Models with integer leaf output are not yet supported"); // Display appropriate warnings when float64 values are being casted into // float32, as FIL only supports inferencing with float32 for the time being if (std::is_same<threshold_t, double>::value || std::is_same<leaf_t, double>::value) { CUML_LOG_WARN( "Casting all thresholds and leaf values to float32, as FIL currently " "doesn't support inferencing models with float64 values. " "This may lead to predictions with reduced accuracy."); } storage_type_t storage_type = tl_params->storage_type; // build dense trees by default if (storage_type == storage_type_t::AUTO) { if (tl_params->algo == algo_t::ALGO_AUTO || tl_params->algo == algo_t::NAIVE) { int depth = max_depth(model); // max 2**25 dense nodes, 256 MiB dense model size const int LOG2_MAX_DENSE_NODES = 25; int log2_num_dense_nodes = depth + 1 + int(ceil(std::log2(model.trees.size()))); storage_type = log2_num_dense_nodes > LOG2_MAX_DENSE_NODES ? storage_type_t::SPARSE : storage_type_t::DENSE; } else { // only dense storage is supported for other algorithms storage_type = storage_type_t::DENSE; } } forest_params_t params; switch (storage_type) { case storage_type_t::DENSE: { std::vector<dense_node> nodes; tl2fil_dense(&nodes, &params, model, tl_params); init_dense(handle, pforest, nodes.data(), &params); // sync is necessary as nodes is used in init_dense(), // but destructed at the end of this function CUDA_CHECK(hipStreamSynchronize(handle.get_stream())); if (tl_params->pforest_shape_str) { *tl_params->pforest_shape_str = sprintf_shape(model, storage_type, nodes, {}); } break; } case storage_type_t::SPARSE: { std::vector<int> trees; std::vector<sparse_node16> nodes; tl2fil_sparse(&trees, &nodes, &params, model, tl_params); init_sparse(handle, pforest, trees.data(), nodes.data(), &params); CUDA_CHECK(hipStreamSynchronize(handle.get_stream())); if (tl_params->pforest_shape_str) { *tl_params->pforest_shape_str = sprintf_shape(model, storage_type, nodes, trees); } break; } case storage_type_t::SPARSE8: { std::vector<int> trees; std::vector<sparse_node8> nodes; tl2fil_sparse(&trees, &nodes, &params, model, tl_params); init_sparse(handle, pforest, trees.data(), nodes.data(), &params); CUDA_CHECK(hipStreamSynchronize(handle.get_stream())); if (tl_params->pforest_shape_str) { *tl_params->pforest_shape_str = sprintf_shape(model, storage_type, nodes, trees); } break; } default: ASSERT(false, "tl_params->sparse must be one of AUTO, DENSE or SPARSE"); } } void from_treelite(const raft::handle_t& handle, forest_t* pforest, ModelHandle model, const treelite_params_t* tl_params) { const tl::Model& model_ref = *(tl::Model*)model; model_ref.Dispatch([&](const auto& model_inner) { // model_inner is of the concrete type tl::ModelImpl<threshold_t, leaf_t> from_treelite(handle, pforest, model_inner, tl_params); }); } // allocates caller-owned char* using malloc() template <typename threshold_t, typename leaf_t, typename node_t> char* sprintf_shape(const tl::ModelImpl<threshold_t, leaf_t>& model, storage_type_t storage, const std::vector<node_t>& nodes, const std::vector<int>& trees) { std::stringstream forest_shape = depth_hist_and_max(model); float size_mb = (trees.size() * sizeof(trees.front()) + nodes.size() * sizeof(nodes.front())) / 1e6; forest_shape << storage_type_repr[storage] << " model size " << std::setprecision(2) << size_mb << " MB" << std::endl; // stream may be discontiguous std::string forest_shape_str = forest_shape.str(); // now copy to a non-owning allocation char* shape_out = (char*)malloc(forest_shape_str.size() + 1); // incl. \0 memcpy((void*)shape_out, forest_shape_str.c_str(), forest_shape_str.size() + 1); return shape_out; } void free(const raft::handle_t& h, forest_t f) { f->free(h); delete f; } void predict(const raft::handle_t& h, forest_t f, float* preds, const float* data, size_t num_rows, bool predict_proba) { f->predict(h, preds, data, num_rows, predict_proba); } } // namespace fil } // namespace ML
c0e13f807c5d0a68311bc75401d38eaa539f0f91.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** @file fil.cu implements forest inference */ #include <omp.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <treelite/c_api.h> #include <treelite/tree.h> #include <algorithm> #include <cmath> #include <iomanip> #include <limits> #include <stack> #include <utility> #include <cuml/fil/fil.h> #include <cuml/fil/fnv_hash.h> #include <raft/cudart_utils.h> #include <cuml/common/logger.hpp> #include <raft/handle.hpp> #include <raft/mr/device/allocator.hpp> #include <raft/mr/host/allocator.hpp> #include "common.cuh" namespace ML { namespace fil { namespace tl = treelite; __host__ __device__ float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); } /** performs additional transformations on the array of forest predictions (preds) of size n; the transformations are defined by output, and include averaging (multiplying by inv_num_trees), adding global_bias (always done), sigmoid and applying threshold. in case of complement_proba, fills in the complement probability */ __global__ void transform_k(float* preds, size_t n, output_t output, float inv_num_trees, float threshold, float global_bias, bool complement_proba) { size_t i = threadIdx.x + size_t(blockIdx.x) * blockDim.x; if (i >= n) return; if (complement_proba && i % 2 != 0) return; float result = preds[i]; if ((output & output_t::AVG) != 0) result *= inv_num_trees; result += global_bias; if ((output & output_t::SIGMOID) != 0) result = sigmoid(result); // will not be done on CATEGORICAL_LEAF because the whole kernel will not run if ((output & output_t::CLASS) != 0) { result = result > threshold ? 1.0f : 0.0f; } // sklearn outputs numpy array in 'C' order, with the number of classes being last dimension // that is also the default order, so we should use the same one if (complement_proba) { preds[i] = 1.0f - result; preds[i + 1] = result; } else preds[i] = result; } struct forest { void init_n_items(int device) { int max_shm_std = 48 * 1024; // 48 KiB /// the most shared memory a kernel can request on the GPU in question int max_shm = 0; CUDA_CHECK(cudaDeviceGetAttribute( &max_shm, cudaDevAttrMaxSharedMemoryPerBlockOptin, device)); // TODO(canonizer): use >48KiB shared memory if available max_shm = std::min(max_shm, max_shm_std); // searching for the most items per block while respecting the shared // memory limits creates a full linear programming problem. // solving it in a single equation looks less tractable than this for (bool predict_proba : {false, true}) { shmem_size_params& ssp_ = predict_proba ? proba_ssp_ : class_ssp_; ssp_.predict_proba = predict_proba; shmem_size_params ssp = ssp_; for (bool cols_in_shmem : {false, true}) { ssp.cols_in_shmem = cols_in_shmem; for (ssp.n_items = 1; ssp.n_items <= (algo_ == algo_t::BATCH_TREE_REORG ? 4 : 1); ++ssp.n_items) { ssp.compute_smem_footprint(); if (ssp.shm_sz < max_shm) ssp_ = ssp; } } ASSERT(max_shm >= ssp_.shm_sz, "FIL out of shared memory. Perhaps the maximum number of \n" "supported classes is exceeded? 5'000 would still be safe."); } } void init_fixed_block_count(int device, int blocks_per_sm) { int max_threads_per_sm, sm_count; CUDA_CHECK(cudaDeviceGetAttribute( &max_threads_per_sm, cudaDevAttrMaxThreadsPerMultiProcessor, device)); int max_blocks_per_sm = max_threads_per_sm / FIL_TPB; ASSERT(blocks_per_sm <= max_blocks_per_sm, "on this GPU, FIL blocks_per_sm cannot exceed %d", max_blocks_per_sm); CUDA_CHECK(cudaDeviceGetAttribute(&sm_count, cudaDevAttrMultiProcessorCount, device)); fixed_block_count_ = blocks_per_sm * sm_count; } void init_common(const raft::handle_t& h, const forest_params_t* params) { depth_ = params->depth; num_trees_ = params->num_trees; algo_ = params->algo; output_ = params->output; threshold_ = params->threshold; global_bias_ = params->global_bias; proba_ssp_.leaf_algo = params->leaf_algo; proba_ssp_.num_cols = params->num_cols; proba_ssp_.num_classes = params->num_classes; class_ssp_ = proba_ssp_; int device = h.get_device(); init_n_items(device); // n_items takes priority over blocks_per_sm init_fixed_block_count(device, params->blocks_per_sm); } virtual void infer(predict_params params, cudaStream_t stream) = 0; void predict(const raft::handle_t& h, float* preds, const float* data, size_t num_rows, bool predict_proba) { // Initialize prediction parameters. predict_params params(predict_proba ? proba_ssp_ : class_ssp_); params.algo = algo_; params.preds = preds; params.data = data; params.num_rows = num_rows; // ignored unless predict_proba is true and algo is GROVE_PER_CLASS params.transform = output_; // fixed_block_count_ == 0 means the number of thread blocks is // proportional to the number of rows params.num_blocks = fixed_block_count_; /** The binary classification / regression (FLOAT_UNARY_BINARY) predict_proba() works as follows (always 2 outputs): RAW: output the sum of tree predictions AVG is set: divide by the number of trees (averaging) SIGMOID is set: apply sigmoid CLASS is set: ignored SOFTMAX is set: error write the output of the previous stages and its complement The binary classification / regression (FLOAT_UNARY_BINARY) predict() works as follows (always 1 output): RAW (no values set): output the sum of tree predictions AVG is set: divide by the number of trees (averaging) SIGMOID is set: apply sigmoid CLASS is set: apply threshold (equivalent to choosing best class) SOFTMAX is set: error The multi-class classification / regression (CATEGORICAL_LEAF) predict_proba() works as follows (always num_classes outputs): RAW (no values set): output class votes AVG is set: divide by the number of trees (averaging, output class probability) SIGMOID is set: apply sigmoid CLASS is set: ignored SOFTMAX is set: error The multi-class classification / regression (CATEGORICAL_LEAF) predict() works as follows (always 1 output): RAW (no values set): output the label of the class with highest probability, else output label 0. SOFTMAX is set: error All other flags (AVG, SIGMOID, CLASS) are ignored The multi-class classification / regression (GROVE_PER_CLASS) predict_proba() works as follows (always num_classes outputs): RAW (no values set): output class votes AVG is set: divide by the number of trees (averaging, output class probability) SIGMOID is set: apply sigmoid; if SOFTMAX is also set: error CLASS is set: ignored SOFTMAX is set: softmax is applied after averaging and global_bias The multi-class classification / regression (GROVE_PER_CLASS) predict() works as follows (always 1 output): RAW (no values set): output the label of the class with highest margin, equal margins resolved in favor of smaller label integer All other flags (AVG, SIGMOID, CLASS, SOFTMAX) are ignored */ output_t ot = output_; // Treelite applies bias before softmax, but we do after. // Simulating treelite order, which cancels out bias. // If non-proba prediction used, it still will not matter // for the same reason softmax will not. float global_bias = (ot & output_t::SOFTMAX) != 0 ? 0.0f : global_bias_; bool complement_proba = false, do_transform; if (predict_proba) { // no threshold on probabilities ot = output_t(ot & ~output_t::CLASS); switch (params.leaf_algo) { case leaf_algo_t::FLOAT_UNARY_BINARY: params.num_outputs = 2; complement_proba = true; do_transform = true; break; case leaf_algo_t::GROVE_PER_CLASS: // for GROVE_PER_CLASS, averaging happens in infer_k ot = output_t(ot & ~output_t::AVG); params.num_outputs = params.num_classes; do_transform = ot != output_t::RAW && ot != output_t::SOFTMAX || global_bias != 0.0f; break; case leaf_algo_t::CATEGORICAL_LEAF: params.num_outputs = params.num_classes; do_transform = ot != output_t::RAW || global_bias_ != 0.0f; break; default: ASSERT(false, "internal error: invalid leaf_algo_"); } } else { if (params.leaf_algo == leaf_algo_t::FLOAT_UNARY_BINARY) { do_transform = ot != output_t::RAW || global_bias_ != 0.0f; } else { // GROVE_PER_CLASS, CATEGORICAL_LEAF: moot since choosing best class and // all transforms are monotonic. also, would break current code do_transform = false; } params.num_outputs = 1; } // Predict using the forest. cudaStream_t stream = h.get_stream(); infer(params, stream); if (do_transform) { size_t num_values_to_transform = (size_t)num_rows * (size_t)params.num_outputs; transform_k<<<raft::ceildiv(num_values_to_transform, (size_t)FIL_TPB), FIL_TPB, 0, stream>>>( preds, num_values_to_transform, ot, num_trees_ > 0 ? (1.0f / num_trees_) : 1.0f, threshold_, global_bias, complement_proba); CUDA_CHECK(cudaPeekAtLastError()); } } virtual void free(const raft::handle_t& h) = 0; virtual ~forest() {} int num_trees_ = 0; int depth_ = 0; algo_t algo_ = algo_t::NAIVE; output_t output_ = output_t::RAW; float threshold_ = 0.5; float global_bias_ = 0; shmem_size_params class_ssp_, proba_ssp_; int fixed_block_count_ = 0; }; struct dense_forest : forest { void transform_trees(const dense_node* nodes) { /* Populate node information: For each tree, the nodes are still stored in the breadth-first, left-to-right order. However, instead of storing the nodes of the same tree adjacently, it uses a different layout. In this layout, the roots of all trees (node 0) are stored first, followed by left children of the roots of all trees (node 1), followed by the right children of the roots of all trees (node 2), and so on. */ int global_node = 0; for (int tree = 0; tree < num_trees_; ++tree) { int tree_node = 0; // the counters `level` and `branch` are not used for computing node // indices, they are only here to highlight the node ordering within // each tree for (int level = 0; level <= depth_; ++level) { for (int branch = 0; branch < 1 << level; ++branch) { h_nodes_[tree_node * num_trees_ + tree] = nodes[global_node]; ++tree_node; ++global_node; } } } } void init(const raft::handle_t& h, const dense_node* nodes, const forest_params_t* params) { init_common(h, params); if (algo_ == algo_t::NAIVE) algo_ = algo_t::BATCH_TREE_REORG; int num_nodes = forest_num_nodes(num_trees_, depth_); nodes_ = (dense_node*)h.get_device_allocator()->allocate( sizeof(dense_node) * num_nodes, h.get_stream()); h_nodes_.resize(num_nodes); if (algo_ == algo_t::NAIVE) { std::copy(nodes, nodes + num_nodes, h_nodes_.begin()); } else { transform_trees(nodes); } CUDA_CHECK(cudaMemcpyAsync(nodes_, h_nodes_.data(), num_nodes * sizeof(dense_node), cudaMemcpyHostToDevice, h.get_stream())); // copy must be finished before freeing the host data CUDA_CHECK(cudaStreamSynchronize(h.get_stream())); h_nodes_.clear(); h_nodes_.shrink_to_fit(); } virtual void infer(predict_params params, cudaStream_t stream) override { dense_storage forest(nodes_, num_trees_, algo_ == algo_t::NAIVE ? tree_num_nodes(depth_) : 1, algo_ == algo_t::NAIVE ? 1 : num_trees_); fil::infer(forest, params, stream); } virtual void free(const raft::handle_t& h) override { int num_nodes = forest_num_nodes(num_trees_, depth_); h.get_device_allocator()->deallocate(nodes_, sizeof(dense_node) * num_nodes, h.get_stream()); } dense_node* nodes_ = nullptr; thrust::host_vector<dense_node> h_nodes_; }; template <typename node_t> struct sparse_forest : forest { void init(const raft::handle_t& h, const int* trees, const node_t* nodes, const forest_params_t* params) { init_common(h, params); if (algo_ == algo_t::ALGO_AUTO) algo_ = algo_t::NAIVE; depth_ = 0; // a placeholder value num_nodes_ = params->num_nodes; // trees trees_ = (int*)h.get_device_allocator()->allocate(sizeof(int) * num_trees_, h.get_stream()); CUDA_CHECK(cudaMemcpyAsync(trees_, trees, sizeof(int) * num_trees_, cudaMemcpyHostToDevice, h.get_stream())); // nodes nodes_ = (node_t*)h.get_device_allocator()->allocate( sizeof(node_t) * num_nodes_, h.get_stream()); CUDA_CHECK(cudaMemcpyAsync(nodes_, nodes, sizeof(node_t) * num_nodes_, cudaMemcpyHostToDevice, h.get_stream())); } virtual void infer(predict_params params, cudaStream_t stream) override { sparse_storage<node_t> forest(trees_, nodes_, num_trees_); fil::infer(forest, params, stream); } void free(const raft::handle_t& h) override { h.get_device_allocator()->deallocate(trees_, sizeof(int) * num_trees_, h.get_stream()); h.get_device_allocator()->deallocate(nodes_, sizeof(node_t) * num_nodes_, h.get_stream()); } int num_nodes_ = 0; int* trees_ = nullptr; node_t* nodes_ = nullptr; }; void check_params(const forest_params_t* params, bool dense) { if (dense) { ASSERT(params->depth >= 0, "depth must be non-negative for dense forests"); } else { ASSERT(params->num_nodes >= 0, "num_nodes must be non-negative for sparse forests"); ASSERT(params->algo == algo_t::NAIVE || params->algo == algo_t::ALGO_AUTO, "only ALGO_AUTO and NAIVE algorithms are supported " "for sparse forests"); } ASSERT(params->num_trees >= 0, "num_trees must be non-negative"); ASSERT(params->num_cols >= 0, "num_cols must be non-negative"); switch (params->algo) { case algo_t::ALGO_AUTO: case algo_t::NAIVE: case algo_t::TREE_REORG: case algo_t::BATCH_TREE_REORG: break; default: ASSERT(false, "algo should be ALGO_AUTO, NAIVE, TREE_REORG or BATCH_TREE_REORG"); } switch (params->leaf_algo) { case leaf_algo_t::FLOAT_UNARY_BINARY: if ((params->output & output_t::CLASS) != 0) { ASSERT(params->num_classes == 2, "only supporting binary" " classification using FLOAT_UNARY_BINARY"); } else { ASSERT(params->num_classes == 1, "num_classes must be 1 for " "regression"); } ASSERT((params->output & output_t::SOFTMAX) == 0, "softmax does not make sense for leaf_algo == FLOAT_UNARY_BINARY"); break; case leaf_algo_t::GROVE_PER_CLASS: ASSERT(params->num_classes > 2, "num_classes > 2 is required for leaf_algo == GROVE_PER_CLASS"); ASSERT(params->num_trees % params->num_classes == 0, "num_classes must divide num_trees evenly for GROVE_PER_CLASS"); break; case leaf_algo_t::CATEGORICAL_LEAF: ASSERT(params->num_classes >= 2, "num_classes >= 2 is required for " "leaf_algo == CATEGORICAL_LEAF"); ASSERT((params->output & output_t::SOFTMAX) == 0, "softmax not supported for leaf_algo == CATEGORICAL_LEAF"); break; default: ASSERT(false, "leaf_algo must be FLOAT_UNARY_BINARY, CATEGORICAL_LEAF" " or GROVE_PER_CLASS"); } if ((params->output & ~output_t::ALL_SET) != 0) { ASSERT( false, "output should be a combination of RAW, AVG, SIGMOID, CLASS and SOFTMAX"); } ASSERT(~params->output & (output_t::SIGMOID | output_t::SOFTMAX), "combining softmax and sigmoid is not supported"); ASSERT(params->blocks_per_sm >= 0, "blocks_per_sm must be nonnegative"); } template <typename T, typename L> int tree_root(const tl::Tree<T, L>& tree) { return 0; // Treelite format assumes that the root is 0 } template <typename T, typename L> inline int max_depth(const tl::Tree<T, L>& tree) { // trees of this depth aren't used, so it most likely means bad input data, // e.g. cycles in the forest const int DEPTH_LIMIT = 500; int root_index = tree_root(tree); typedef std::pair<int, int> pair_t; std::stack<pair_t> stack; stack.push(pair_t(root_index, 0)); int max_depth = 0; while (!stack.empty()) { const pair_t& pair = stack.top(); int node_id = pair.first; int depth = pair.second; stack.pop(); while (!tree.IsLeaf(node_id)) { stack.push(pair_t(tree.LeftChild(node_id), depth + 1)); node_id = tree.RightChild(node_id); depth++; ASSERT(depth < DEPTH_LIMIT, "depth limit reached, might be a cycle in the tree"); } // only need to update depth for leaves max_depth = std::max(max_depth, depth); } return max_depth; } template <typename T, typename L> int max_depth(const tl::ModelImpl<T, L>& model) { int depth = 0; const auto& trees = model.trees; #pragma omp parallel for reduction(max : depth) for (size_t i = 0; i < trees.size(); ++i) { const auto& tree = trees[i]; depth = std::max(depth, max_depth(tree)); } return depth; } inline void adjust_threshold(float* pthreshold, int* tl_left, int* tl_right, bool* default_left, tl::Operator comparison_op) { // in treelite (take left node if val [op] threshold), // the meaning of the condition is reversed compared to FIL; // thus, "<" in treelite corresonds to comparison ">=" used by FIL // https://github.com/dmlc/treelite/blob/master/include/treelite/tree.h#L243 switch (comparison_op) { case tl::Operator::kLT: break; case tl::Operator::kLE: // x <= y is equivalent to x < y', where y' is the next representable float *pthreshold = std::nextafterf(*pthreshold, std::numeric_limits<float>::infinity()); break; case tl::Operator::kGT: // x > y is equivalent to x >= y', where y' is the next representable float // left and right still need to be swapped *pthreshold = std::nextafterf(*pthreshold, std::numeric_limits<float>::infinity()); case tl::Operator::kGE: // swap left and right std::swap(*tl_left, *tl_right); *default_left = !*default_left; break; default: ASSERT(false, "only <, >, <= and >= comparisons are supported"); } } /** if the vector consists of zeros and a single one, return the position for the one (assumed class label). Else, asserts false. If the vector contains a NAN, asserts false */ template <typename L> int find_class_label_from_one_hot(L* vector, int len) { bool found_label = false; int out; for (int i = 0; i < len; ++i) { if (vector[i] == static_cast<L>(1.0)) { ASSERT(!found_label, "label vector contains multiple 1.0f"); out = i; found_label = true; } else { ASSERT(vector[i] == static_cast<L>(0.0), "label vector contains values other than 0.0 and 1.0"); } } ASSERT(found_label, "did not find 1.0f in vector"); return out; } template <typename fil_node_t, typename T, typename L> void tl2fil_leaf_payload(fil_node_t* fil_node, const tl::Tree<T, L>& tl_tree, int tl_node_id, const forest_params_t& forest_params) { auto vec = tl_tree.LeafVector(tl_node_id); switch (forest_params.leaf_algo) { case leaf_algo_t::CATEGORICAL_LEAF: ASSERT(vec.size() == forest_params.num_classes, "inconsistent number of classes in treelite leaves"); fil_node->val.idx = find_class_label_from_one_hot(&vec[0], vec.size()); break; case leaf_algo_t::FLOAT_UNARY_BINARY: case leaf_algo_t::GROVE_PER_CLASS: fil_node->val.f = static_cast<float>(tl_tree.LeafValue(tl_node_id)); ASSERT(!tl_tree.HasLeafVector(tl_node_id), "some but not all treelite leaves have leaf_vector()"); break; default: ASSERT(false, "internal error: invalid leaf_algo"); }; } template <typename T, typename L> void node2fil_dense(std::vector<dense_node>* pnodes, int root, int cur, const tl::Tree<T, L>& tree, int node_id, const forest_params_t& forest_params) { if (tree.IsLeaf(node_id)) { (*pnodes)[root + cur] = dense_node(val_t{.f = NAN}, NAN, 0, false, true); tl2fil_leaf_payload(&(*pnodes)[root + cur], tree, node_id, forest_params); return; } // inner node ASSERT(tree.SplitType(node_id) == tl::SplitFeatureType::kNumerical, "only numerical split nodes are supported"); int tl_left = tree.LeftChild(node_id), tl_right = tree.RightChild(node_id); bool default_left = tree.DefaultLeft(node_id); float threshold = static_cast<float>(tree.Threshold(node_id)); adjust_threshold(&threshold, &tl_left, &tl_right, &default_left, tree.ComparisonOp(node_id)); (*pnodes)[root + cur] = dense_node( val_t{.f = 0}, threshold, tree.SplitIndex(node_id), default_left, false); int left = 2 * cur + 1; node2fil_dense(pnodes, root, left, tree, tl_left, forest_params); node2fil_dense(pnodes, root, left + 1, tree, tl_right, forest_params); } template <typename T, typename L> void tree2fil_dense(std::vector<dense_node>* pnodes, int root, const tl::Tree<T, L>& tree, const forest_params_t& forest_params) { node2fil_dense(pnodes, root, 0, tree, tree_root(tree), forest_params); } template <typename fil_node_t, typename T, typename L> int tree2fil_sparse(std::vector<fil_node_t>& nodes, int root, const tl::Tree<T, L>& tree, const forest_params_t& forest_params) { typedef std::pair<int, int> pair_t; std::stack<pair_t> stack; int built_index = root + 1; stack.push(pair_t(tree_root(tree), 0)); while (!stack.empty()) { const pair_t& top = stack.top(); int node_id = top.first; int cur = top.second; stack.pop(); while (!tree.IsLeaf(node_id)) { // inner node ASSERT(tree.SplitType(node_id) == tl::SplitFeatureType::kNumerical, "only numerical split nodes are supported"); // tl_left and tl_right are indices of the children in the treelite tree // (stored as an array of nodes) int tl_left = tree.LeftChild(node_id), tl_right = tree.RightChild(node_id); bool default_left = tree.DefaultLeft(node_id); float threshold = static_cast<float>(tree.Threshold(node_id)); adjust_threshold(&threshold, &tl_left, &tl_right, &default_left, tree.ComparisonOp(node_id)); // reserve space for child nodes // left is the offset of the left child node relative to the tree root // in the array of all nodes of the FIL sparse forest int left = built_index - root; built_index += 2; nodes[root + cur] = fil_node_t(val_t{.f = 0}, threshold, tree.SplitIndex(node_id), default_left, false, left); // push child nodes into the stack stack.push(pair_t(tl_right, left + 1)); //stack.push(pair_t(tl_left, left)); node_id = tl_left; cur = left; } // leaf node nodes[root + cur] = fil_node_t(val_t{.f = NAN}, NAN, 0, false, true, 0); tl2fil_leaf_payload(&nodes[root + cur], tree, node_id, forest_params); } return root; } struct level_entry { int n_branch_nodes, n_leaves; }; typedef std::pair<int, int> pair_t; // hist has branch and leaf count given depth template <typename T, typename L> inline void tree_depth_hist(const tl::Tree<T, L>& tree, std::vector<level_entry>& hist) { std::stack<pair_t> stack; // {tl_id, depth} stack.push({tree_root(tree), 0}); while (!stack.empty()) { const pair_t& top = stack.top(); int node_id = top.first; int depth = top.second; stack.pop(); while (!tree.IsLeaf(node_id)) { if (depth >= hist.size()) hist.resize(depth + 1, {0, 0}); hist[depth].n_branch_nodes++; stack.push({tree.LeftChild(node_id), depth + 1}); node_id = tree.RightChild(node_id); depth++; } if (depth >= hist.size()) hist.resize(depth + 1, {0, 0}); hist[depth].n_leaves++; } } template <typename T, typename L> std::stringstream depth_hist_and_max(const tl::ModelImpl<T, L>& model) { using namespace std; vector<level_entry> hist; for (const auto& tree : model.trees) tree_depth_hist(tree, hist); int min_leaf_depth = -1, leaves_times_depth = 0, total_branches = 0, total_leaves = 0; stringstream forest_shape; ios default_state(nullptr); default_state.copyfmt(forest_shape); forest_shape << "Depth histogram:" << endl << "depth branches leaves nodes" << endl; for (int level = 0; level < hist.size(); ++level) { level_entry e = hist[level]; forest_shape << setw(5) << level << setw(9) << e.n_branch_nodes << setw(7) << e.n_leaves << setw(8) << e.n_branch_nodes + e.n_leaves << endl; forest_shape.copyfmt(default_state); if (e.n_leaves && min_leaf_depth == -1) min_leaf_depth = level; leaves_times_depth += e.n_leaves * level; total_branches += e.n_branch_nodes; total_leaves += e.n_leaves; } int total_nodes = total_branches + total_leaves; forest_shape << "Total: branches: " << total_branches << " leaves: " << total_leaves << " nodes: " << total_nodes << endl; forest_shape << "Avg nodes per tree: " << setprecision(2) << total_nodes / (float)hist[0].n_branch_nodes << endl; forest_shape.copyfmt(default_state); forest_shape << "Leaf depth: min: " << min_leaf_depth << " avg: " << setprecision(2) << fixed << leaves_times_depth / (float)total_leaves << " max: " << hist.size() - 1 << endl; forest_shape.copyfmt(default_state); vector<char> hist_bytes(hist.size() * sizeof(hist[0])); memcpy(&hist_bytes[0], &hist[0], hist_bytes.size()); // std::hash does not promise to not be identity. Xoring plain numbers which // add up to one another erases information, hence, std::hash is unsuitable here forest_shape << "Depth histogram fingerprint: " << hex << fowler_noll_vo_fingerprint64_32(hist_bytes.begin(), hist_bytes.end()) << endl; forest_shape.copyfmt(default_state); return forest_shape; } template <typename T, typename L> size_t tl_leaf_vector_size(const tl::ModelImpl<T, L>& model) { const tl::Tree<T, L>& tree = model.trees[0]; int node_key; for (node_key = tree_root(tree); !tree.IsLeaf(node_key); node_key = tree.RightChild(node_key)) ; if (tree.HasLeafVector(node_key)) return tree.LeafVector(node_key).size(); return 0; } // tl2fil_common is the part of conversion from a treelite model // common for dense and sparse forests template <typename T, typename L> void tl2fil_common(forest_params_t* params, const tl::ModelImpl<T, L>& model, const treelite_params_t* tl_params) { // fill in forest-indendent params params->algo = tl_params->algo; params->threshold = tl_params->threshold; // fill in forest-dependent params params->depth = max_depth(model); // also checks for cycles const tl::ModelParam& param = model.param; // assuming either all leaves use the .leaf_vector() or all leaves use .leaf_value() size_t leaf_vec_size = tl_leaf_vector_size(model); std::string pred_transform(param.pred_transform); if (leaf_vec_size > 0) { ASSERT(leaf_vec_size == model.task_param.num_class, "treelite model inconsistent"); params->num_classes = leaf_vec_size; params->leaf_algo = leaf_algo_t::CATEGORICAL_LEAF; ASSERT(tl_params->output_class, "output_class==true is required for multi-class models"); ASSERT( pred_transform == "max_index" || pred_transform == "identity_multiclass", "only max_index and identity_multiclass values of pred_transform " "are supported for multi-class models"); } else { if (model.task_param.num_class > 1) { params->num_classes = static_cast<int>(model.task_param.num_class); ASSERT(tl_params->output_class, "output_class==true is required for multi-class models"); ASSERT(pred_transform == "identity_multiclass" || pred_transform == "max_index" || pred_transform == "softmax" || pred_transform == "multiclass_ova", "only identity_multiclass, max_index, multiclass_ova and softmax " "values of pred_transform are supported for xgboost-style " "multi-class classification models."); // this function should not know how many threads per block will be used params->leaf_algo = leaf_algo_t::GROVE_PER_CLASS; } else { params->num_classes = tl_params->output_class ? 2 : 1; ASSERT(pred_transform == "sigmoid" || pred_transform == "identity", "only sigmoid and identity values of pred_transform " "are supported for binary classification and regression models."); params->leaf_algo = leaf_algo_t::FLOAT_UNARY_BINARY; } } params->num_cols = model.num_feature; ASSERT(param.sigmoid_alpha == 1.0f, "sigmoid_alpha not supported"); params->global_bias = param.global_bias; params->output = output_t::RAW; /** output_t::CLASS denotes using a threshold in FIL, when predict_proba == false. For all multiclass models, the best class is selected using argmax instead. This happens when either leaf_algo == CATEGORICAL_LEAF or num_classes > 2. **/ if (tl_params->output_class && params->leaf_algo != CATEGORICAL_LEAF && params->num_classes <= 2) { params->output = output_t(params->output | output_t::CLASS); } // "random forest" in treelite means tree output averaging if (model.average_tree_output) { params->output = output_t(params->output | output_t::AVG); } if (pred_transform == "sigmoid" || pred_transform == "multiclass_ova") { params->output = output_t(params->output | output_t::SIGMOID); } if (pred_transform == "softmax") params->output = output_t(params->output | output_t::SOFTMAX); params->num_trees = model.trees.size(); params->blocks_per_sm = tl_params->blocks_per_sm; } // uses treelite model with additional tl_params to initialize FIL params // and dense nodes (stored in *pnodes) template <typename threshold_t, typename leaf_t> void tl2fil_dense(std::vector<dense_node>* pnodes, forest_params_t* params, const tl::ModelImpl<threshold_t, leaf_t>& model, const treelite_params_t* tl_params) { tl2fil_common(params, model, tl_params); // convert the nodes int num_nodes = forest_num_nodes(params->num_trees, params->depth); pnodes->resize(num_nodes, dense_node()); for (int i = 0; i < model.trees.size(); ++i) { tree2fil_dense(pnodes, i * tree_num_nodes(params->depth), model.trees[i], *params); } } template <typename fil_node_t> struct tl2fil_sparse_check_t { template <typename threshold_t, typename leaf_t> static void check(const tl::ModelImpl<threshold_t, leaf_t>& model) { ASSERT(false, "internal error: " "only a specialization of this template should be used"); } }; template <> struct tl2fil_sparse_check_t<sparse_node16> { // no extra check for 16-byte sparse nodes template <typename threshold_t, typename leaf_t> static void check(const tl::ModelImpl<threshold_t, leaf_t>& model) {} }; template <> struct tl2fil_sparse_check_t<sparse_node8> { static const int MAX_FEATURES = 1 << sparse_node8::FID_NUM_BITS; static const int MAX_TREE_NODES = (1 << sparse_node8::LEFT_NUM_BITS) - 1; template <typename threshold_t, typename leaf_t> static void check(const tl::ModelImpl<threshold_t, leaf_t>& model) { // check the number of features int num_features = model.num_feature; ASSERT(num_features <= MAX_FEATURES, "model has %d features, " "but only %d supported for 8-byte sparse nodes", num_features, MAX_FEATURES); // check the number of tree nodes const std::vector<tl::Tree<threshold_t, leaf_t>>& trees = model.trees; for (int i = 0; i < trees.size(); ++i) { int num_nodes = trees[i].num_nodes; ASSERT(num_nodes <= MAX_TREE_NODES, "tree %d has %d nodes, " "but only %d supported for 8-byte sparse nodes", i, num_nodes, MAX_TREE_NODES); } } }; // uses treelite model with additional tl_params to initialize FIL params, // trees (stored in *ptrees) and sparse nodes (stored in *pnodes) template <typename fil_node_t, typename threshold_t, typename leaf_t> void tl2fil_sparse(std::vector<int>* ptrees, std::vector<fil_node_t>* pnodes, forest_params_t* params, const tl::ModelImpl<threshold_t, leaf_t>& model, const treelite_params_t* tl_params) { tl2fil_common(params, model, tl_params); tl2fil_sparse_check_t<fil_node_t>::check(model); size_t num_trees = model.trees.size(); ptrees->reserve(num_trees); ptrees->push_back(0); for (size_t i = 0; i < num_trees - 1; ++i) { ptrees->push_back(model.trees[i].num_nodes + ptrees->back()); } size_t total_nodes = ptrees->back() + model.trees.back().num_nodes; pnodes->resize(total_nodes); // convert the nodes #pragma omp parallel for for (int i = 0; i < num_trees; ++i) { tree2fil_sparse(*pnodes, (*ptrees)[i], model.trees[i], *params); } params->num_nodes = pnodes->size(); } void init_dense(const raft::handle_t& h, forest_t* pf, const dense_node* nodes, const forest_params_t* params) { check_params(params, true); dense_forest* f = new dense_forest; f->init(h, nodes, params); *pf = f; } template <typename fil_node_t> void init_sparse(const raft::handle_t& h, forest_t* pf, const int* trees, const fil_node_t* nodes, const forest_params_t* params) { check_params(params, false); sparse_forest<fil_node_t>* f = new sparse_forest<fil_node_t>; f->init(h, trees, nodes, params); *pf = f; } // explicit instantiations for init_sparse() template void init_sparse<sparse_node16>(const raft::handle_t& h, forest_t* pf, const int* trees, const sparse_node16* nodes, const forest_params_t* params); template void init_sparse<sparse_node8>(const raft::handle_t& h, forest_t* pf, const int* trees, const sparse_node8* nodes, const forest_params_t* params); template <typename threshold_t, typename leaf_t> void from_treelite(const raft::handle_t& handle, forest_t* pforest, const tl::ModelImpl<threshold_t, leaf_t>& model, const treelite_params_t* tl_params) { // Invariants on threshold and leaf types static_assert(std::is_same<threshold_t, float>::value || std::is_same<threshold_t, double>::value, "Model must contain float32 or float64 thresholds for splits"); ASSERT( (std::is_same<leaf_t, float>::value || std::is_same<leaf_t, double>::value), "Models with integer leaf output are not yet supported"); // Display appropriate warnings when float64 values are being casted into // float32, as FIL only supports inferencing with float32 for the time being if (std::is_same<threshold_t, double>::value || std::is_same<leaf_t, double>::value) { CUML_LOG_WARN( "Casting all thresholds and leaf values to float32, as FIL currently " "doesn't support inferencing models with float64 values. " "This may lead to predictions with reduced accuracy."); } storage_type_t storage_type = tl_params->storage_type; // build dense trees by default if (storage_type == storage_type_t::AUTO) { if (tl_params->algo == algo_t::ALGO_AUTO || tl_params->algo == algo_t::NAIVE) { int depth = max_depth(model); // max 2**25 dense nodes, 256 MiB dense model size const int LOG2_MAX_DENSE_NODES = 25; int log2_num_dense_nodes = depth + 1 + int(ceil(std::log2(model.trees.size()))); storage_type = log2_num_dense_nodes > LOG2_MAX_DENSE_NODES ? storage_type_t::SPARSE : storage_type_t::DENSE; } else { // only dense storage is supported for other algorithms storage_type = storage_type_t::DENSE; } } forest_params_t params; switch (storage_type) { case storage_type_t::DENSE: { std::vector<dense_node> nodes; tl2fil_dense(&nodes, &params, model, tl_params); init_dense(handle, pforest, nodes.data(), &params); // sync is necessary as nodes is used in init_dense(), // but destructed at the end of this function CUDA_CHECK(cudaStreamSynchronize(handle.get_stream())); if (tl_params->pforest_shape_str) { *tl_params->pforest_shape_str = sprintf_shape(model, storage_type, nodes, {}); } break; } case storage_type_t::SPARSE: { std::vector<int> trees; std::vector<sparse_node16> nodes; tl2fil_sparse(&trees, &nodes, &params, model, tl_params); init_sparse(handle, pforest, trees.data(), nodes.data(), &params); CUDA_CHECK(cudaStreamSynchronize(handle.get_stream())); if (tl_params->pforest_shape_str) { *tl_params->pforest_shape_str = sprintf_shape(model, storage_type, nodes, trees); } break; } case storage_type_t::SPARSE8: { std::vector<int> trees; std::vector<sparse_node8> nodes; tl2fil_sparse(&trees, &nodes, &params, model, tl_params); init_sparse(handle, pforest, trees.data(), nodes.data(), &params); CUDA_CHECK(cudaStreamSynchronize(handle.get_stream())); if (tl_params->pforest_shape_str) { *tl_params->pforest_shape_str = sprintf_shape(model, storage_type, nodes, trees); } break; } default: ASSERT(false, "tl_params->sparse must be one of AUTO, DENSE or SPARSE"); } } void from_treelite(const raft::handle_t& handle, forest_t* pforest, ModelHandle model, const treelite_params_t* tl_params) { const tl::Model& model_ref = *(tl::Model*)model; model_ref.Dispatch([&](const auto& model_inner) { // model_inner is of the concrete type tl::ModelImpl<threshold_t, leaf_t> from_treelite(handle, pforest, model_inner, tl_params); }); } // allocates caller-owned char* using malloc() template <typename threshold_t, typename leaf_t, typename node_t> char* sprintf_shape(const tl::ModelImpl<threshold_t, leaf_t>& model, storage_type_t storage, const std::vector<node_t>& nodes, const std::vector<int>& trees) { std::stringstream forest_shape = depth_hist_and_max(model); float size_mb = (trees.size() * sizeof(trees.front()) + nodes.size() * sizeof(nodes.front())) / 1e6; forest_shape << storage_type_repr[storage] << " model size " << std::setprecision(2) << size_mb << " MB" << std::endl; // stream may be discontiguous std::string forest_shape_str = forest_shape.str(); // now copy to a non-owning allocation char* shape_out = (char*)malloc(forest_shape_str.size() + 1); // incl. \0 memcpy((void*)shape_out, forest_shape_str.c_str(), forest_shape_str.size() + 1); return shape_out; } void free(const raft::handle_t& h, forest_t f) { f->free(h); delete f; } void predict(const raft::handle_t& h, forest_t f, float* preds, const float* data, size_t num_rows, bool predict_proba) { f->predict(h, preds, data, num_rows, predict_proba); } } // namespace fil } // namespace ML
52a64b8cc55f669746349849d239ab7028abd55e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <common/cuda_tools.hpp> namespace AlphaPose{ const int NUM_BOX_ELEMENT = 6; // left, top, right, bottom, confidence, class static __device__ void affine_project(float* matrix, float x, float y, float* ox, float* oy){ *ox = matrix[0] * x + matrix[1] * y + matrix[2]; *oy = matrix[3] * x + matrix[4] * y + matrix[5]; } static __global__ void decode_kernel(float* predict, int num_bboxes, int num_classes, float confidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= num_bboxes) return; float* pitem = predict + (5 + num_classes) * position; float objectness = pitem[4]; if(objectness < confidence_threshold) return; float* class_confidence = pitem + 5; float confidence = *class_confidence++; int label = 0; for(int i = 1; i < num_classes; ++i, ++class_confidence){ if(*class_confidence > confidence){ confidence = *class_confidence; label = i; } } confidence *= objectness; if(confidence < confidence_threshold) return; int index = atomicAdd(parray, 1); if(index >= max_objects) return; float cx = *pitem++; float cy = *pitem++; float width = *pitem++; float height = *pitem++; float left = cx - width * 0.5f; float top = cy - height * 0.5f; float right = cx + width * 0.5f; float bottom = cy + height * 0.5f; affine_project(invert_affine_matrix, left, top, &left, &top); affine_project(invert_affine_matrix, right, bottom, &right, &bottom); float* pout_item = parray + 1 + index * NUM_BOX_ELEMENT; *pout_item++ = left; *pout_item++ = top; *pout_item++ = right; *pout_item++ = bottom; *pout_item++ = confidence; *pout_item++ = label; } static __device__ float box_iou( float aleft, float atop, float aright, float abottom, float bleft, float btop, float bright, float bbottom ){ float cleft = max(aleft, bleft); float ctop = max(atop, btop); float cright = min(aright, bright); float cbottom = min(abottom, bbottom); float c_area = max(cright - cleft, 0.0f) * max(cbottom - ctop, 0.0f); if(c_area == 0.0f) return 0.0f; float a_area = max(0.0f, aright - aleft) * max(0.0f, abottom - atop); float b_area = max(0.0f, bright - bleft) * max(0.0f, bbottom - btop); return c_area / (a_area + b_area - c_area); } static __global__ void nms_kernel(float* bboxes, int max_objects, float threshold){ int position = (blockDim.x * blockIdx.x + threadIdx.x); int count = min((int)*bboxes, max_objects); if (position >= count) return; float* pcurrent = bboxes + 1 + position * NUM_BOX_ELEMENT; for(int i = 0; i < count; ++i){ float* pitem = bboxes + 1 + i * NUM_BOX_ELEMENT; if(i == position || pcurrent[5] != pitem[5]) continue; float iou = box_iou( pcurrent[0], pcurrent[1], pcurrent[2], pcurrent[3], pitem[0], pitem[1], pitem[2], pitem[3] ); if(iou > threshold){ if(pitem[4] > pcurrent[4]){ // ioub > abia // pcurrent[5] = -1; return; } } } } void decode_kernel_invoker(float* predict, int num_bboxes, int num_classes, float confidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects, hipStream_t stream){ auto grid = CUDATools::grid_dims(num_bboxes); auto block = CUDATools::block_dims(num_bboxes); hipLaunchKernelGGL(( checkCudaKernel(decode_kernel), dim3(grid), dim3(block), 0, stream, predict, num_bboxes, num_classes, confidence_threshold, nms_threshold, invert_affine_matrix, parray, max_objects)); grid = CUDATools::grid_dims(max_objects); block = CUDATools::block_dims(max_objects); hipLaunchKernelGGL(( checkCudaKernel(nms_kernel), dim3(grid), dim3(block), 0, stream, parray, max_objects, nms_threshold)); } };
52a64b8cc55f669746349849d239ab7028abd55e.cu
#include <common/cuda_tools.hpp> namespace AlphaPose{ const int NUM_BOX_ELEMENT = 6; // left, top, right, bottom, confidence, class static __device__ void affine_project(float* matrix, float x, float y, float* ox, float* oy){ *ox = matrix[0] * x + matrix[1] * y + matrix[2]; *oy = matrix[3] * x + matrix[4] * y + matrix[5]; } static __global__ void decode_kernel(float* predict, int num_bboxes, int num_classes, float confidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= num_bboxes) return; float* pitem = predict + (5 + num_classes) * position; float objectness = pitem[4]; if(objectness < confidence_threshold) return; float* class_confidence = pitem + 5; float confidence = *class_confidence++; int label = 0; for(int i = 1; i < num_classes; ++i, ++class_confidence){ if(*class_confidence > confidence){ confidence = *class_confidence; label = i; } } confidence *= objectness; if(confidence < confidence_threshold) return; int index = atomicAdd(parray, 1); if(index >= max_objects) return; float cx = *pitem++; float cy = *pitem++; float width = *pitem++; float height = *pitem++; float left = cx - width * 0.5f; float top = cy - height * 0.5f; float right = cx + width * 0.5f; float bottom = cy + height * 0.5f; affine_project(invert_affine_matrix, left, top, &left, &top); affine_project(invert_affine_matrix, right, bottom, &right, &bottom); float* pout_item = parray + 1 + index * NUM_BOX_ELEMENT; *pout_item++ = left; *pout_item++ = top; *pout_item++ = right; *pout_item++ = bottom; *pout_item++ = confidence; *pout_item++ = label; } static __device__ float box_iou( float aleft, float atop, float aright, float abottom, float bleft, float btop, float bright, float bbottom ){ float cleft = max(aleft, bleft); float ctop = max(atop, btop); float cright = min(aright, bright); float cbottom = min(abottom, bbottom); float c_area = max(cright - cleft, 0.0f) * max(cbottom - ctop, 0.0f); if(c_area == 0.0f) return 0.0f; float a_area = max(0.0f, aright - aleft) * max(0.0f, abottom - atop); float b_area = max(0.0f, bright - bleft) * max(0.0f, bbottom - btop); return c_area / (a_area + b_area - c_area); } static __global__ void nms_kernel(float* bboxes, int max_objects, float threshold){ int position = (blockDim.x * blockIdx.x + threadIdx.x); int count = min((int)*bboxes, max_objects); if (position >= count) return; float* pcurrent = bboxes + 1 + position * NUM_BOX_ELEMENT; for(int i = 0; i < count; ++i){ float* pitem = bboxes + 1 + i * NUM_BOX_ELEMENT; if(i == position || pcurrent[5] != pitem[5]) continue; float iou = box_iou( pcurrent[0], pcurrent[1], pcurrent[2], pcurrent[3], pitem[0], pitem[1], pitem[2], pitem[3] ); if(iou > threshold){ if(pitem[4] > pcurrent[4]){ // 如果发现iou大,并且b > a,置信度。b是第i个框,a是当前框 // 表示当前框要过滤掉,不需要保留了 pcurrent[5] = -1; return; } } } } void decode_kernel_invoker(float* predict, int num_bboxes, int num_classes, float confidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects, cudaStream_t stream){ auto grid = CUDATools::grid_dims(num_bboxes); auto block = CUDATools::block_dims(num_bboxes); checkCudaKernel(decode_kernel<<<grid, block, 0, stream>>>(predict, num_bboxes, num_classes, confidence_threshold, nms_threshold, invert_affine_matrix, parray, max_objects)); grid = CUDATools::grid_dims(max_objects); block = CUDATools::block_dims(max_objects); checkCudaKernel(nms_kernel<<<grid, block, 0, stream>>>(parray, max_objects, nms_threshold)); } };
da6ac48767d2ff5106bc6128e76f3aba075ef82c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CUAPI.h" #include "CUFLU.h" #ifdef GRAVITY #include "CUPOT.h" #endif #ifdef GPU // fluid solver prototypes in different models #if ( MODEL == HYDRO ) #if ( FLU_SCHEME == RTVD ) __global__ void CUFLU_FluidSolver_RTVD( real g_Fluid_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ], real g_Fluid_Out[][NCOMP_TOTAL][ CUBE(PS2) ], real g_Flux [][9][NCOMP_TOTAL][ SQR(PS2) ], const double g_Corner[][3], const real g_Pot_USG[][ CUBE(USG_NXT_F) ], const real dt, const real _dh, const bool StoreFlux, const bool XYZ, const real MinDens, const real MinPres, const real MinEint, const EoS_t EoS ); #elif ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP ) __global__ void CUFLU_FluidSolver_MHM( const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ], real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ], const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ], char g_DE_Array_Out [][ CUBE(PS2) ], real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ], real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ], real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ], real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ], real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ], real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ], const real dt, const real dh, const bool StoreFlux, const bool StoreElectric, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const double Time, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const real MinDens, const real MinPres, const real MinEint, const real DualEnergySwitch, const bool NormPassive, const int NNorm, const bool FracPassive, const int NFrac, const bool JeansMinPres, const real JeansMinPres_Coeff, const EoS_t EoS ); #elif ( FLU_SCHEME == CTU ) __global__ void CUFLU_FluidSolver_CTU( const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ], real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ], const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ], char g_DE_Array_Out [][ CUBE(PS2) ], real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ], real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ], real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ], real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ], real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ], real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ], const real dt, const real dh, const bool StoreFlux, const bool StoreElectric, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const double Time, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const real MinDens, const real MinPres, const real MinEint, const real DualEnergySwitch, const bool NormPassive, const int NNorm, const bool FracPassive, const int NFrac, const bool JeansMinPres, const real JeansMinPres_Coeff, const EoS_t EoS ); #endif // FLU_SCHEME __global__ void CUFLU_dtSolver_HydroCFL( real g_dt_Array[], const real g_Flu_Array[][FLU_NIN_T][ CUBE(PS1) ], const real g_Mag_Array[][NCOMP_MAG][ PS1P1*SQR(PS1) ], const real dh, const real Safety, const real MinPres, const EoS_t EoS ); #ifdef GRAVITY __global__ void CUPOT_dtSolver_HydroGravity( real g_dt_Array[], const real g_Pot_Array[][ CUBE(GRA_NXT) ], const double g_Corner_Array[][3], const real dh, const real Safety, const bool P5_Gradient, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const double ExtAcc_Time ); #endif #elif ( MODEL == ELBDM ) __global__ void CUFLU_ELBDMSolver( real g_Fluid_In [][FLU_NIN ][ FLU_NXT*FLU_NXT*FLU_NXT ], real g_Fluid_Out[][FLU_NOUT][ PS2*PS2*PS2 ], real g_Flux [][9][NFLUX_TOTAL][ PS2*PS2 ], const real dt, const real _dh, const real Eta, const bool StoreFlux, const real Taylor3_Coeff, const bool XYZ, const real MinDens ); #else #error : ERROR : unsupported MODEL !! #endif // MODEL #ifdef GRAVITY // Poisson solver prototypes #if ( POT_SCHEME == SOR ) #ifdef USE_PSOLVER_10TO14 __global__ void CUPOT_PoissonSolver_SOR_10to14cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #else __global__ void CUPOT_PoissonSolver_SOR_16to18cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #endif // #ifdef USE_PSOLVER_10TO14 ... else ... #elif ( POT_SCHEME == MG ) __global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const real dh_Min, const int Max_Iter, const int NPre_Smooth, const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff, const IntScheme_t IntScheme ); #endif // POT_SCHEME // Gravity solver prototypes in different models #if ( MODEL == HYDRO ) __global__ void CUPOT_HydroGravitySolver( real g_Flu_Array_New[][GRA_NIN][ CUBE(PS1) ], const real g_Pot_Array_New[][ CUBE(GRA_NXT) ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_G) ], const real g_Flu_Array_USG[][GRA_NIN-1][ CUBE(PS1) ], char g_DE_Array [][ CUBE(PS1) ], const real g_Emag_Array [][ CUBE(PS1) ], const real dt, const real dh, const bool P5_Gradient, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const double TimeNew, const double TimeOld, const real MinEint ); #elif ( MODEL == ELBDM ) __global__ void CUPOT_ELBDMGravitySolver( real g_Flu_Array[][GRA_NIN][ PS1*PS1*PS1 ], const real g_Pot_Array[][ GRA_NXT*GRA_NXT*GRA_NXT ], const double g_Corner_Array[][3], const real EtaDt, const real dh, const real Lambda ); #else #error : ERROR : unsupported MODEL !! #endif // MODEL #endif // GRAVITY // source-term solver prototype __global__ void CUSRC_SrcSolver_IterateAllCells( const real g_Flu_Array_In [][FLU_NIN_S ][ CUBE(SRC_NXT) ], real g_Flu_Array_Out[][FLU_NOUT_S][ CUBE(PS1) ], const real g_Mag_Array_In [][NCOMP_MAG ][ SRC_NXT_P1*SQR(SRC_NXT) ], const double g_Corner_Array[][3], const SrcTerms_t SrcTerms, const int NPatchGroup, const real dt, const real dh, const double TimeNew, const double TimeOld, const real MinDens, const real MinPres, const real MinEint, const EoS_t EoS ); //------------------------------------------------------------------------------------------------------- // Function : CUAPI_Set_Default_GPU_Parameter // Description : Set several GPU parameters to the default values if they are not set in the input file // // Parameter : GPU_NStream : Number of streams for the asynchronous memory copy in GPU // Flu_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the fluid solver // Pot_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the Poisson solver // Che_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the Grackle solver // Src_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the source-term solver //------------------------------------------------------------------------------------------------------- void CUAPI_Set_Default_GPU_Parameter( int &GPU_NStream, int &Flu_GPU_NPGroup, int &Pot_GPU_NPGroup, int &Che_GPU_NPGroup, int &Src_GPU_NPGroup ) { if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ...\n", __FUNCTION__ ); // get the device ID int GetDeviceID = 999; CUDA_CHECK_ERROR( hipGetDevice( &GetDeviceID ) ); // load the device properties hipDeviceProp_t DeviceProp; CUDA_CHECK_ERROR( hipGetDeviceProperties( &DeviceProp, GetDeviceID ) ); // set the default GPU parameters // (1) GPU_NSTREAM if ( GPU_NStream <= 0 ) { if ( DeviceProp.deviceOverlap ) { # if ( MODEL == HYDRO ) # if ( GPU_ARCH == FERMI ) GPU_NStream = 8; # elif ( GPU_ARCH == KEPLER ) GPU_NStream = 16; # elif ( GPU_ARCH == MAXWELL ) GPU_NStream = 16; # elif ( GPU_ARCH == PASCAL ) GPU_NStream = 16; # elif ( GPU_ARCH == VOLTA ) GPU_NStream = 16; # elif ( GPU_ARCH == TURING ) GPU_NStream = 16; # elif ( GPU_ARCH == AMPERE ) GPU_NStream = 16; # else # error : UNKNOWN GPU_ARCH !! # endif # elif ( MODEL == ELBDM ) # if ( GPU_ARCH == FERMI ) GPU_NStream = 8; # elif ( GPU_ARCH == KEPLER ) GPU_NStream = 16; # elif ( GPU_ARCH == MAXWELL ) GPU_NStream = 16; # elif ( GPU_ARCH == PASCAL ) GPU_NStream = 16; # elif ( GPU_ARCH == VOLTA ) GPU_NStream = 16; # elif ( GPU_ARCH == TURING ) GPU_NStream = 16; # elif ( GPU_ARCH == AMPERE ) GPU_NStream = 16; # else # error : ERROR : UNKNOWN GPU_ARCH !! # endif # else # error : ERROR : UNKNOWN MODEL !! # endif // MODEL } // if ( DeviceProp.deviceOverlap ) else GPU_NStream = 1; if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "GPU_NSTREAM", GPU_NSTREAM ); } // if ( GPU_NStream <= 0 ) // (2) XXX_GPU_NPGROUP // (2-1) FLU_GPU_NPGROUP if ( Flu_GPU_NPGroup <= 0 ) { # if ( MODEL == HYDRO ) # if ( GPU_ARCH == FERMI ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == TURING ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == AMPERE ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif # elif ( MODEL == ELBDM ) # if ( GPU_ARCH == FERMI ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == TURING ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == AMPERE ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif # else # error : ERROR : UNKNOWN MODEL !! # endif // MODEL if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "FLU_GPU_NPGROUP", Flu_GPU_NPGroup ); } // if ( Flu_GPU_NPGroup <= 0 ) // (2-2) POT_GPU_NPGROUP # ifdef GRAVITY if ( Pot_GPU_NPGroup <= 0 ) { # if ( GPU_ARCH == FERMI ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == TURING ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == AMPERE ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "POT_GPU_NPGROUP", Pot_GPU_NPGroup ); } // if ( Pot_GPU_NPGroup <= 0 ) # endif // (2-3) CHE_GPU_NPGROUP # ifdef SUPPORT_GRACKLE if ( Che_GPU_NPGroup <= 0 ) { # if ( GPU_ARCH == FERMI ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == TURING ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == AMPERE ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "CHE_GPU_NPGROUP", Che_GPU_NPGroup ); } // if ( Che_GPU_NPGroup <= 0 ) # endif // (2-4) SRC_GPU_NPGROUP if ( Src_GPU_NPGroup <= 0 ) { # if ( GPU_ARCH == FERMI ) Src_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Src_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Src_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Src_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Src_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == TURING ) Src_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == AMPERE ) Src_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "SRC_GPU_NPGROUP", Src_GPU_NPGroup ); } // if ( Src_GPU_NPGroup <= 0 ) // (3) cache preference // (3-1) fluid solver # if ( MODEL == HYDRO ) # if ( FLU_SCHEME == RTVD ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_FluidSolver_RTVD, hipFuncCachePreferShared ) ); # elif ( FLU_SCHEME == MHM ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_FluidSolver_MHM, hipFuncCachePreferL1 ) ); # elif ( FLU_SCHEME == MHM_RP ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_FluidSolver_MHM, hipFuncCachePreferL1 ) ); # elif ( FLU_SCHEME == CTU ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_FluidSolver_CTU, hipFuncCachePreferL1 ) ); # endif CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_dtSolver_HydroCFL, hipFuncCachePreferShared ) ); # ifdef GRAVITY CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_dtSolver_HydroGravity, hipFuncCachePreferShared ) ); # endif # elif ( MODEL == ELBDM ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_ELBDMSolver, hipFuncCachePreferShared ) ); # else # error : ERROR : unsupported MODEL !! # endif // MODEL # ifdef GRAVITY // (3-2) Poisson solver # if ( POT_SCHEME == SOR ) # ifdef USE_PSOLVER_10TO14 CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_PoissonSolver_SOR_10to14cube, hipFuncCachePreferShared ) ); # else CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_PoissonSolver_SOR_16to18cube, hipFuncCachePreferShared ) ); # endif # elif ( POT_SCHEME == MG ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_PoissonSolver_MG, hipFuncCachePreferShared ) ); # endif // POT_SCHEME // (3-3) gravity solver # if ( MODEL == HYDRO ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_HydroGravitySolver, hipFuncCachePreferShared ) ); # elif ( MODEL == ELBDM ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_ELBDMGravitySolver, hipFuncCachePreferL1 ) ); # else # error : ERROR : unsupported MODEL !! # endif // MODEL # endif // GRAVITY // (3-4) source-term solver CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUSRC_SrcSolver_IterateAllCells, hipFuncCachePreferL1 ) ); if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ... done\n", __FUNCTION__ ); } // FUNCTION : CUAPI_Set_Default_GPU_Parameter #endif // #ifdef GPU
da6ac48767d2ff5106bc6128e76f3aba075ef82c.cu
#include "CUAPI.h" #include "CUFLU.h" #ifdef GRAVITY #include "CUPOT.h" #endif #ifdef GPU // fluid solver prototypes in different models #if ( MODEL == HYDRO ) #if ( FLU_SCHEME == RTVD ) __global__ void CUFLU_FluidSolver_RTVD( real g_Fluid_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ], real g_Fluid_Out[][NCOMP_TOTAL][ CUBE(PS2) ], real g_Flux [][9][NCOMP_TOTAL][ SQR(PS2) ], const double g_Corner[][3], const real g_Pot_USG[][ CUBE(USG_NXT_F) ], const real dt, const real _dh, const bool StoreFlux, const bool XYZ, const real MinDens, const real MinPres, const real MinEint, const EoS_t EoS ); #elif ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP ) __global__ void CUFLU_FluidSolver_MHM( const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ], real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ], const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ], char g_DE_Array_Out [][ CUBE(PS2) ], real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ], real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ], real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ], real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ], real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ], real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ], const real dt, const real dh, const bool StoreFlux, const bool StoreElectric, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const double Time, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const real MinDens, const real MinPres, const real MinEint, const real DualEnergySwitch, const bool NormPassive, const int NNorm, const bool FracPassive, const int NFrac, const bool JeansMinPres, const real JeansMinPres_Coeff, const EoS_t EoS ); #elif ( FLU_SCHEME == CTU ) __global__ void CUFLU_FluidSolver_CTU( const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ], real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ], const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ], char g_DE_Array_Out [][ CUBE(PS2) ], real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ], real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ], real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ], real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ], real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ], real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ], real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ], const real dt, const real dh, const bool StoreFlux, const bool StoreElectric, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const double Time, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const real MinDens, const real MinPres, const real MinEint, const real DualEnergySwitch, const bool NormPassive, const int NNorm, const bool FracPassive, const int NFrac, const bool JeansMinPres, const real JeansMinPres_Coeff, const EoS_t EoS ); #endif // FLU_SCHEME __global__ void CUFLU_dtSolver_HydroCFL( real g_dt_Array[], const real g_Flu_Array[][FLU_NIN_T][ CUBE(PS1) ], const real g_Mag_Array[][NCOMP_MAG][ PS1P1*SQR(PS1) ], const real dh, const real Safety, const real MinPres, const EoS_t EoS ); #ifdef GRAVITY __global__ void CUPOT_dtSolver_HydroGravity( real g_dt_Array[], const real g_Pot_Array[][ CUBE(GRA_NXT) ], const double g_Corner_Array[][3], const real dh, const real Safety, const bool P5_Gradient, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const double ExtAcc_Time ); #endif #elif ( MODEL == ELBDM ) __global__ void CUFLU_ELBDMSolver( real g_Fluid_In [][FLU_NIN ][ FLU_NXT*FLU_NXT*FLU_NXT ], real g_Fluid_Out[][FLU_NOUT][ PS2*PS2*PS2 ], real g_Flux [][9][NFLUX_TOTAL][ PS2*PS2 ], const real dt, const real _dh, const real Eta, const bool StoreFlux, const real Taylor3_Coeff, const bool XYZ, const real MinDens ); #else #error : ERROR : unsupported MODEL !! #endif // MODEL #ifdef GRAVITY // Poisson solver prototypes #if ( POT_SCHEME == SOR ) #ifdef USE_PSOLVER_10TO14 __global__ void CUPOT_PoissonSolver_SOR_10to14cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #else __global__ void CUPOT_PoissonSolver_SOR_16to18cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #endif // #ifdef USE_PSOLVER_10TO14 ... else ... #elif ( POT_SCHEME == MG ) __global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const real dh_Min, const int Max_Iter, const int NPre_Smooth, const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff, const IntScheme_t IntScheme ); #endif // POT_SCHEME // Gravity solver prototypes in different models #if ( MODEL == HYDRO ) __global__ void CUPOT_HydroGravitySolver( real g_Flu_Array_New[][GRA_NIN][ CUBE(PS1) ], const real g_Pot_Array_New[][ CUBE(GRA_NXT) ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_G) ], const real g_Flu_Array_USG[][GRA_NIN-1][ CUBE(PS1) ], char g_DE_Array [][ CUBE(PS1) ], const real g_Emag_Array [][ CUBE(PS1) ], const real dt, const real dh, const bool P5_Gradient, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const double TimeNew, const double TimeOld, const real MinEint ); #elif ( MODEL == ELBDM ) __global__ void CUPOT_ELBDMGravitySolver( real g_Flu_Array[][GRA_NIN][ PS1*PS1*PS1 ], const real g_Pot_Array[][ GRA_NXT*GRA_NXT*GRA_NXT ], const double g_Corner_Array[][3], const real EtaDt, const real dh, const real Lambda ); #else #error : ERROR : unsupported MODEL !! #endif // MODEL #endif // GRAVITY // source-term solver prototype __global__ void CUSRC_SrcSolver_IterateAllCells( const real g_Flu_Array_In [][FLU_NIN_S ][ CUBE(SRC_NXT) ], real g_Flu_Array_Out[][FLU_NOUT_S][ CUBE(PS1) ], const real g_Mag_Array_In [][NCOMP_MAG ][ SRC_NXT_P1*SQR(SRC_NXT) ], const double g_Corner_Array[][3], const SrcTerms_t SrcTerms, const int NPatchGroup, const real dt, const real dh, const double TimeNew, const double TimeOld, const real MinDens, const real MinPres, const real MinEint, const EoS_t EoS ); //------------------------------------------------------------------------------------------------------- // Function : CUAPI_Set_Default_GPU_Parameter // Description : Set several GPU parameters to the default values if they are not set in the input file // // Parameter : GPU_NStream : Number of streams for the asynchronous memory copy in GPU // Flu_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the fluid solver // Pot_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the Poisson solver // Che_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the Grackle solver // Src_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the source-term solver //------------------------------------------------------------------------------------------------------- void CUAPI_Set_Default_GPU_Parameter( int &GPU_NStream, int &Flu_GPU_NPGroup, int &Pot_GPU_NPGroup, int &Che_GPU_NPGroup, int &Src_GPU_NPGroup ) { if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ...\n", __FUNCTION__ ); // get the device ID int GetDeviceID = 999; CUDA_CHECK_ERROR( cudaGetDevice( &GetDeviceID ) ); // load the device properties cudaDeviceProp DeviceProp; CUDA_CHECK_ERROR( cudaGetDeviceProperties( &DeviceProp, GetDeviceID ) ); // set the default GPU parameters // (1) GPU_NSTREAM if ( GPU_NStream <= 0 ) { if ( DeviceProp.deviceOverlap ) { # if ( MODEL == HYDRO ) # if ( GPU_ARCH == FERMI ) GPU_NStream = 8; # elif ( GPU_ARCH == KEPLER ) GPU_NStream = 16; # elif ( GPU_ARCH == MAXWELL ) GPU_NStream = 16; # elif ( GPU_ARCH == PASCAL ) GPU_NStream = 16; # elif ( GPU_ARCH == VOLTA ) GPU_NStream = 16; # elif ( GPU_ARCH == TURING ) GPU_NStream = 16; # elif ( GPU_ARCH == AMPERE ) GPU_NStream = 16; # else # error : UNKNOWN GPU_ARCH !! # endif # elif ( MODEL == ELBDM ) # if ( GPU_ARCH == FERMI ) GPU_NStream = 8; # elif ( GPU_ARCH == KEPLER ) GPU_NStream = 16; # elif ( GPU_ARCH == MAXWELL ) GPU_NStream = 16; # elif ( GPU_ARCH == PASCAL ) GPU_NStream = 16; # elif ( GPU_ARCH == VOLTA ) GPU_NStream = 16; # elif ( GPU_ARCH == TURING ) GPU_NStream = 16; # elif ( GPU_ARCH == AMPERE ) GPU_NStream = 16; # else # error : ERROR : UNKNOWN GPU_ARCH !! # endif # else # error : ERROR : UNKNOWN MODEL !! # endif // MODEL } // if ( DeviceProp.deviceOverlap ) else GPU_NStream = 1; if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "GPU_NSTREAM", GPU_NSTREAM ); } // if ( GPU_NStream <= 0 ) // (2) XXX_GPU_NPGROUP // (2-1) FLU_GPU_NPGROUP if ( Flu_GPU_NPGroup <= 0 ) { # if ( MODEL == HYDRO ) # if ( GPU_ARCH == FERMI ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == TURING ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == AMPERE ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif # elif ( MODEL == ELBDM ) # if ( GPU_ARCH == FERMI ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == TURING ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == AMPERE ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif # else # error : ERROR : UNKNOWN MODEL !! # endif // MODEL if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "FLU_GPU_NPGROUP", Flu_GPU_NPGroup ); } // if ( Flu_GPU_NPGroup <= 0 ) // (2-2) POT_GPU_NPGROUP # ifdef GRAVITY if ( Pot_GPU_NPGroup <= 0 ) { # if ( GPU_ARCH == FERMI ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == TURING ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == AMPERE ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "POT_GPU_NPGROUP", Pot_GPU_NPGroup ); } // if ( Pot_GPU_NPGroup <= 0 ) # endif // (2-3) CHE_GPU_NPGROUP # ifdef SUPPORT_GRACKLE if ( Che_GPU_NPGroup <= 0 ) { # if ( GPU_ARCH == FERMI ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == TURING ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == AMPERE ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "CHE_GPU_NPGROUP", Che_GPU_NPGroup ); } // if ( Che_GPU_NPGroup <= 0 ) # endif // (2-4) SRC_GPU_NPGROUP if ( Src_GPU_NPGroup <= 0 ) { # if ( GPU_ARCH == FERMI ) Src_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Src_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Src_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Src_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Src_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == TURING ) Src_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == AMPERE ) Src_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "SRC_GPU_NPGROUP", Src_GPU_NPGroup ); } // if ( Src_GPU_NPGroup <= 0 ) // (3) cache preference // (3-1) fluid solver # if ( MODEL == HYDRO ) # if ( FLU_SCHEME == RTVD ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_FluidSolver_RTVD, cudaFuncCachePreferShared ) ); # elif ( FLU_SCHEME == MHM ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_FluidSolver_MHM, cudaFuncCachePreferL1 ) ); # elif ( FLU_SCHEME == MHM_RP ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_FluidSolver_MHM, cudaFuncCachePreferL1 ) ); # elif ( FLU_SCHEME == CTU ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_FluidSolver_CTU, cudaFuncCachePreferL1 ) ); # endif CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_dtSolver_HydroCFL, cudaFuncCachePreferShared ) ); # ifdef GRAVITY CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_dtSolver_HydroGravity, cudaFuncCachePreferShared ) ); # endif # elif ( MODEL == ELBDM ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_ELBDMSolver, cudaFuncCachePreferShared ) ); # else # error : ERROR : unsupported MODEL !! # endif // MODEL # ifdef GRAVITY // (3-2) Poisson solver # if ( POT_SCHEME == SOR ) # ifdef USE_PSOLVER_10TO14 CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_PoissonSolver_SOR_10to14cube, cudaFuncCachePreferShared ) ); # else CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_PoissonSolver_SOR_16to18cube, cudaFuncCachePreferShared ) ); # endif # elif ( POT_SCHEME == MG ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_PoissonSolver_MG, cudaFuncCachePreferShared ) ); # endif // POT_SCHEME // (3-3) gravity solver # if ( MODEL == HYDRO ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_HydroGravitySolver, cudaFuncCachePreferShared ) ); # elif ( MODEL == ELBDM ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_ELBDMGravitySolver, cudaFuncCachePreferL1 ) ); # else # error : ERROR : unsupported MODEL !! # endif // MODEL # endif // GRAVITY // (3-4) source-term solver CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUSRC_SrcSolver_IterateAllCells, cudaFuncCachePreferL1 ) ); if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ... done\n", __FUNCTION__ ); } // FUNCTION : CUAPI_Set_Default_GPU_Parameter #endif // #ifdef GPU
efec5f5e79a41d583b19770d223e65caad14e3ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #define PIx2 6.2831853071795864769252867665590058f #include <stdio.h> #include <stdlib.h> __global__ void ComputePhiMagGPU(int numK, const float* phiR, const float* phiI, float* phiMag){ /******************************************************************** * * Compute the magnitude of Fourier Transform at each sample point * ********************************************************************/ int tid = threadIdx.x + blockIdx.x * blockDim.x; // INSERT KERNEL CODE HERE if(tid<numK){ float real = phiR[tid]; float imag = phiI[tid]; phiMag[tid] = real*real + imag*imag; } } void basicComputePhiMagGPU(int numK, float* phiR, float* phiI, float* phiMag){ // Initialize thread block and kernel grid dimensions const unsigned int BLOCK_SIZE = 1024; dim3 DimGrid((numK-1)/BLOCK_SIZE + 1,1,1); dim3 DimBlock(BLOCK_SIZE,1,1); // Call the kernel for calculating magnitude of Phi hipLaunchKernelGGL(( ComputePhiMagGPU), dim3(DimGrid),dim3(DimBlock), 0, 0, numK, phiR, phiI, phiMag); } __global__ void ComputeQGPU(int numK, int numX, const struct kValues* kVals, const float* x, const float* y, const float* z,float* Qr, float* Qi){ /******************************************************************** * * Calculate Q at each voxel point * ********************************************************************/ int tid = threadIdx.x + blockIdx.x * blockDim.x; // INSERT KERNEL CODE HERE if(tid<numX){ Qr[tid] = 0; Qi[tid] = 0; } // register allocate voxel inputs and outputs float reg_x = x[tid]; float reg_y = y[tid]; float reg_z = z[tid]; float reg_Qr = Qr[tid]; float reg_Qi = Qi[tid]; //loop over all the sample points for(int m = 0; m < numK; m++){ // float exp = 2 * PI * (kVals[m].Kx * reg_x + kVals[m].Ky * reg_y + kVals[m].Kz * reg_z); reg_Qr += kVals[m].PhiMag * cos(exp); reg_Qi += kVals[m].PhiMag * sin(exp); } Qr[tid] = reg_Qr; Qi[tid] = reg_Qi; } void basicComputeQGPU(int numK, int numX, struct kValues* kVals, float* x, float* y, float* z,float* Qr, float* Qi){ // Initialize thread block and kernel grid dimensions const unsigned int BLOCK_SIZE = 1024; dim3 DimGrid((numX-1)/BLOCK_SIZE + 1,1,1); dim3 DimBlock(BLOCK_SIZE,1,1); // Call the kernel for calculating Q matrix hipLaunchKernelGGL(( ComputeQGPU), dim3(DimGrid),dim3(DimBlock), 0, 0, numK, numX, kVals, x, y, z, Qr, Qi); }
efec5f5e79a41d583b19770d223e65caad14e3ae.cu
/****************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #define PIx2 6.2831853071795864769252867665590058f #include <stdio.h> #include <stdlib.h> __global__ void ComputePhiMagGPU(int numK, const float* phiR, const float* phiI, float* phiMag){ /******************************************************************** * * Compute the magnitude of Fourier Transform at each sample point * ********************************************************************/ int tid = threadIdx.x + blockIdx.x * blockDim.x; // INSERT KERNEL CODE HERE if(tid<numK){ float real = phiR[tid]; float imag = phiI[tid]; phiMag[tid] = real*real + imag*imag; } } void basicComputePhiMagGPU(int numK, float* phiR, float* phiI, float* phiMag){ // Initialize thread block and kernel grid dimensions const unsigned int BLOCK_SIZE = 1024; dim3 DimGrid((numK-1)/BLOCK_SIZE + 1,1,1); dim3 DimBlock(BLOCK_SIZE,1,1); // Call the kernel for calculating magnitude of Phi ComputePhiMagGPU<<<DimGrid,DimBlock>>>(numK, phiR, phiI, phiMag); } __global__ void ComputeQGPU(int numK, int numX, const struct kValues* kVals, const float* x, const float* y, const float* z,float* Qr, float* Qi){ /******************************************************************** * * Calculate Q at each voxel point * ********************************************************************/ int tid = threadIdx.x + blockIdx.x * blockDim.x; // INSERT KERNEL CODE HERE if(tid<numX){ Qr[tid] = 0; Qi[tid] = 0; } // register allocate voxel inputs and outputs float reg_x = x[tid]; float reg_y = y[tid]; float reg_z = z[tid]; float reg_Qr = Qr[tid]; float reg_Qi = Qi[tid]; //loop over all the sample points for(int m = 0; m < numK; m++){ // float exp = 2 * PI * (kVals[m].Kx * reg_x + kVals[m].Ky * reg_y + kVals[m].Kz * reg_z); reg_Qr += kVals[m].PhiMag * cos(exp); reg_Qi += kVals[m].PhiMag * sin(exp); } Qr[tid] = reg_Qr; Qi[tid] = reg_Qi; } void basicComputeQGPU(int numK, int numX, struct kValues* kVals, float* x, float* y, float* z,float* Qr, float* Qi){ // Initialize thread block and kernel grid dimensions const unsigned int BLOCK_SIZE = 1024; dim3 DimGrid((numX-1)/BLOCK_SIZE + 1,1,1); dim3 DimBlock(BLOCK_SIZE,1,1); // Call the kernel for calculating Q matrix ComputeQGPU<<<DimGrid,DimBlock>>>(numK, numX, kVals, x, y, z, Qr, Qi); }
e9024e58c57a8e46d158e725da89065b814fe302.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <cstddef> #include <sys/time.h> #include <iostream> #include <vector> void checkError(hipError_t err) { if (err != hipSuccess) { std::cout << hipGetErrorString(err) << std::endl; exit(-1); } } __global__ void reduce_neighbored_global(int* A, int* B, const int N) { // reduction with global memory for (int loop=0; loop < 1000; ++loop) { const int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x + threadIdx.x; const int gridStride = blockDim.x * gridDim.x; int temp = idx + gridStride; while (temp < N) { A[idx] += A[temp]; temp += gridStride; } __syncthreads(); for (int s = 1; s < blockDim.x; s *= 2) { if (tid % (2*s) == 0) { A[idx] += A[idx + s]; } __syncthreads(); } if (tid == 0) B[blockIdx.x] = A[idx]; } } __global__ void reduce_neighbored_conflict_divergent(int* A, int* B, const int N) { //reduction with shared memory for (int loop=0; loop < 1000; ++loop) { const int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x + threadIdx.x; const int gridStride = blockDim.x * gridDim.x; __shared__ int buf[1024]; buf[tid] = 0; while (idx < N) { buf[tid] += A[idx]; idx += gridStride; } __syncthreads(); for (int s = 1; s < blockDim.x; s *= 2) { if (tid % (2*s) == 0) { buf[tid] += buf[tid + s]; } __syncthreads(); } if (tid == 0) B[blockIdx.x] = buf[0]; } } __global__ void reduce_neighbored_conflict_nondivergent(int* A, int* B, const int N) { //remove warp divergence for (int loop=0; loop < 1000; ++loop) { const int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x + threadIdx.x; const int gridStride = blockDim.x * gridDim.x; __shared__ int buf[1024]; buf[tid] = 0; while (idx < N) { buf[tid] += A[idx]; idx += gridStride; } __syncthreads(); for (int s = 1; s < blockDim.x; s *= 2) { const int index = 2 * s * tid; if (index < blockDim.x) { buf[index] += buf[index + s]; } __syncthreads(); } if (tid == 0) B[blockIdx.x] = buf[0]; } } __global__ void reduce_interleaved_noconflict_nondivergent(int* A, int* B, const int N) { // remove bank conflicts for (int loop=0; loop < 1000; ++loop) { const int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x + threadIdx.x; const int gridStride = blockDim.x * gridDim.x; __shared__ int buf[1024]; buf[tid] = 0; while (idx < N) { buf[tid] += A[idx]; idx += gridStride; } __syncthreads(); for (int s = blockDim.x/2; s > 0; s >>= 1) { if (tid < s) { buf[tid] += buf[tid + s]; } __syncthreads(); } if (tid == 0) B[blockIdx.x] = buf[0]; } } __device__ void unroll(volatile int* buf, int tid) { if (tid < 32) { buf[tid] += buf[tid + 32]; buf[tid] += buf[tid + 16]; buf[tid] += buf[tid + 8]; buf[tid] += buf[tid + 4]; buf[tid] += buf[tid + 2]; buf[tid] += buf[tid + 1]; } } __global__ void reduce_interleaved_noconflict_nondivergent_unrolled(int* A, int* B, const int N) { // unroll last loops for (int loop=0; loop < 1000; ++loop) { const int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x + threadIdx.x; const int gridStride = blockDim.x * gridDim.x; __shared__ int buf[1024]; buf[tid] = 0; while (idx < N) { buf[tid] += A[idx]; idx += gridStride; } __syncthreads(); for (int s = blockDim.x/2; s > 32; s >>= 1) { if (tid < s) { buf[tid] += buf[tid + s]; } __syncthreads(); } unroll(buf, tid); if (tid == 0) B[blockIdx.x] = buf[0]; } } template <unsigned int blockSize> __global__ void reduce_interleaved_noconflict_nondivergent_completelyunrolled(int* A, int* B, const int N) { for (int loop=0; loop < 1000; ++loop) { const int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x + threadIdx.x; const int gridStride = blockDim.x * gridDim.x; __shared__ int buf[1024]; buf[tid] = 0; while (idx < N) { buf[tid] += A[idx]; idx += gridStride; } __syncthreads(); if (blockSize>=1024 && tid < 512) buf[tid] += buf[tid + 512]; __syncthreads(); if (blockSize>=512 && tid < 256) buf[tid] += buf[tid + 256]; __syncthreads(); if (blockSize>=256 && tid < 128) buf[tid] += buf[tid + 128]; __syncthreads(); if (blockSize>=128 && tid < 64) buf[tid] += buf[tid + 64]; __syncthreads(); unroll(buf, tid); if (tid == 0) B[blockIdx.x] = buf[0]; } } int main() { const int nElem = 1024*2048; std::vector<int> A(nElem, 1); std::vector<int> B(1024, 1); const int nBytes = nElem * sizeof(int); std::cout << nBytes * 1e-6 << std::endl; int* d_A; int* d_B; checkError(hipMalloc(&d_A, nBytes)); checkError(hipMalloc(&d_B, 1024 * sizeof(int))); //warmup checkError( hipMemcpy(d_A, &A[0], nBytes, hipMemcpyHostToDevice) ); checkError( hipDeviceSynchronize() ); hipLaunchKernelGGL(( reduce_neighbored_conflict_divergent) , dim3(1024), dim3(512) , 0, 0, d_A, d_B, nElem); checkError( hipPeekAtLastError() ); checkError( hipDeviceSynchronize() ); checkError( hipMemcpy(d_A, &A[0], nBytes, hipMemcpyHostToDevice) ); checkError( hipDeviceSynchronize() ); hipLaunchKernelGGL(( reduce_neighbored_global) , dim3(1024), dim3(512) , 0, 0, d_A, d_B, nElem); checkError( hipPeekAtLastError() ); checkError( hipDeviceSynchronize() ); checkError( hipMemcpy(d_A, &A[0], nBytes, hipMemcpyHostToDevice) ); checkError( hipDeviceSynchronize() ); hipLaunchKernelGGL(( reduce_neighbored_conflict_divergent) , dim3(1024), dim3(512) , 0, 0, d_A, d_B, nElem); checkError( hipPeekAtLastError() ); checkError( hipDeviceSynchronize() ); checkError( hipMemcpy(d_A, &A[0], nBytes, hipMemcpyHostToDevice) ); checkError( hipDeviceSynchronize() ); hipLaunchKernelGGL(( reduce_neighbored_conflict_nondivergent) , dim3(1024), dim3(512) , 0, 0, d_A, d_B, nElem); checkError( hipPeekAtLastError() ); checkError( hipDeviceSynchronize() ); checkError( hipMemcpy(d_A, &A[0], nBytes, hipMemcpyHostToDevice) ); checkError( hipDeviceSynchronize() ); hipLaunchKernelGGL(( reduce_interleaved_noconflict_nondivergent) , dim3(1024), dim3(512) , 0, 0, d_A, d_B, nElem); checkError( hipPeekAtLastError() ); checkError( hipDeviceSynchronize() ); checkError( hipMemcpy(d_A, &A[0], nBytes, hipMemcpyHostToDevice) ); checkError( hipDeviceSynchronize() ); hipLaunchKernelGGL(( reduce_interleaved_noconflict_nondivergent_unrolled) , dim3(1024), dim3(512) , 0, 0, d_A, d_B, nElem); checkError( hipPeekAtLastError() ); checkError( hipDeviceSynchronize() ); checkError( hipMemcpy(d_A, &A[0], nBytes, hipMemcpyHostToDevice) ); checkError( hipDeviceSynchronize() ); hipLaunchKernelGGL(( reduce_interleaved_noconflict_nondivergent_completelyunrolled<512>) , dim3(1024), dim3(512) , 0, 0, d_A, d_B, nElem); checkError( hipPeekAtLastError() ); checkError( hipDeviceSynchronize() ); checkError(hipMemcpy(&B[0], d_B, 1024 * sizeof(int), hipMemcpyDeviceToHost)); for (long long i = 0; i < 1024; ++i) { if (B.at(i) != 2048) { std::cout << "error: " << i << " " << B.at(i) << std::endl; exit(-1); } } checkError(hipFree(d_A)); checkError(hipFree(d_B)); }
e9024e58c57a8e46d158e725da89065b814fe302.cu
#include <cuda_runtime.h> #include <cstddef> #include <sys/time.h> #include <iostream> #include <vector> void checkError(cudaError_t err) { if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << std::endl; exit(-1); } } __global__ void reduce_neighbored_global(int* A, int* B, const int N) { // reduction with global memory for (int loop=0; loop < 1000; ++loop) { const int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x + threadIdx.x; const int gridStride = blockDim.x * gridDim.x; int temp = idx + gridStride; while (temp < N) { A[idx] += A[temp]; temp += gridStride; } __syncthreads(); for (int s = 1; s < blockDim.x; s *= 2) { if (tid % (2*s) == 0) { A[idx] += A[idx + s]; } __syncthreads(); } if (tid == 0) B[blockIdx.x] = A[idx]; } } __global__ void reduce_neighbored_conflict_divergent(int* A, int* B, const int N) { //reduction with shared memory for (int loop=0; loop < 1000; ++loop) { const int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x + threadIdx.x; const int gridStride = blockDim.x * gridDim.x; __shared__ int buf[1024]; buf[tid] = 0; while (idx < N) { buf[tid] += A[idx]; idx += gridStride; } __syncthreads(); for (int s = 1; s < blockDim.x; s *= 2) { if (tid % (2*s) == 0) { buf[tid] += buf[tid + s]; } __syncthreads(); } if (tid == 0) B[blockIdx.x] = buf[0]; } } __global__ void reduce_neighbored_conflict_nondivergent(int* A, int* B, const int N) { //remove warp divergence for (int loop=0; loop < 1000; ++loop) { const int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x + threadIdx.x; const int gridStride = blockDim.x * gridDim.x; __shared__ int buf[1024]; buf[tid] = 0; while (idx < N) { buf[tid] += A[idx]; idx += gridStride; } __syncthreads(); for (int s = 1; s < blockDim.x; s *= 2) { const int index = 2 * s * tid; if (index < blockDim.x) { buf[index] += buf[index + s]; } __syncthreads(); } if (tid == 0) B[blockIdx.x] = buf[0]; } } __global__ void reduce_interleaved_noconflict_nondivergent(int* A, int* B, const int N) { // remove bank conflicts for (int loop=0; loop < 1000; ++loop) { const int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x + threadIdx.x; const int gridStride = blockDim.x * gridDim.x; __shared__ int buf[1024]; buf[tid] = 0; while (idx < N) { buf[tid] += A[idx]; idx += gridStride; } __syncthreads(); for (int s = blockDim.x/2; s > 0; s >>= 1) { if (tid < s) { buf[tid] += buf[tid + s]; } __syncthreads(); } if (tid == 0) B[blockIdx.x] = buf[0]; } } __device__ void unroll(volatile int* buf, int tid) { if (tid < 32) { buf[tid] += buf[tid + 32]; buf[tid] += buf[tid + 16]; buf[tid] += buf[tid + 8]; buf[tid] += buf[tid + 4]; buf[tid] += buf[tid + 2]; buf[tid] += buf[tid + 1]; } } __global__ void reduce_interleaved_noconflict_nondivergent_unrolled(int* A, int* B, const int N) { // unroll last loops for (int loop=0; loop < 1000; ++loop) { const int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x + threadIdx.x; const int gridStride = blockDim.x * gridDim.x; __shared__ int buf[1024]; buf[tid] = 0; while (idx < N) { buf[tid] += A[idx]; idx += gridStride; } __syncthreads(); for (int s = blockDim.x/2; s > 32; s >>= 1) { if (tid < s) { buf[tid] += buf[tid + s]; } __syncthreads(); } unroll(buf, tid); if (tid == 0) B[blockIdx.x] = buf[0]; } } template <unsigned int blockSize> __global__ void reduce_interleaved_noconflict_nondivergent_completelyunrolled(int* A, int* B, const int N) { for (int loop=0; loop < 1000; ++loop) { const int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x + threadIdx.x; const int gridStride = blockDim.x * gridDim.x; __shared__ int buf[1024]; buf[tid] = 0; while (idx < N) { buf[tid] += A[idx]; idx += gridStride; } __syncthreads(); if (blockSize>=1024 && tid < 512) buf[tid] += buf[tid + 512]; __syncthreads(); if (blockSize>=512 && tid < 256) buf[tid] += buf[tid + 256]; __syncthreads(); if (blockSize>=256 && tid < 128) buf[tid] += buf[tid + 128]; __syncthreads(); if (blockSize>=128 && tid < 64) buf[tid] += buf[tid + 64]; __syncthreads(); unroll(buf, tid); if (tid == 0) B[blockIdx.x] = buf[0]; } } int main() { const int nElem = 1024*2048; std::vector<int> A(nElem, 1); std::vector<int> B(1024, 1); const int nBytes = nElem * sizeof(int); std::cout << nBytes * 1e-6 << std::endl; int* d_A; int* d_B; checkError(cudaMalloc(&d_A, nBytes)); checkError(cudaMalloc(&d_B, 1024 * sizeof(int))); //warmup checkError( cudaMemcpy(d_A, &A[0], nBytes, cudaMemcpyHostToDevice) ); checkError( cudaDeviceSynchronize() ); reduce_neighbored_conflict_divergent <<< 1024, 512 >>> (d_A, d_B, nElem); checkError( cudaPeekAtLastError() ); checkError( cudaDeviceSynchronize() ); checkError( cudaMemcpy(d_A, &A[0], nBytes, cudaMemcpyHostToDevice) ); checkError( cudaDeviceSynchronize() ); reduce_neighbored_global <<< 1024, 512 >>> (d_A, d_B, nElem); checkError( cudaPeekAtLastError() ); checkError( cudaDeviceSynchronize() ); checkError( cudaMemcpy(d_A, &A[0], nBytes, cudaMemcpyHostToDevice) ); checkError( cudaDeviceSynchronize() ); reduce_neighbored_conflict_divergent <<< 1024, 512 >>> (d_A, d_B, nElem); checkError( cudaPeekAtLastError() ); checkError( cudaDeviceSynchronize() ); checkError( cudaMemcpy(d_A, &A[0], nBytes, cudaMemcpyHostToDevice) ); checkError( cudaDeviceSynchronize() ); reduce_neighbored_conflict_nondivergent <<< 1024, 512 >>> (d_A, d_B, nElem); checkError( cudaPeekAtLastError() ); checkError( cudaDeviceSynchronize() ); checkError( cudaMemcpy(d_A, &A[0], nBytes, cudaMemcpyHostToDevice) ); checkError( cudaDeviceSynchronize() ); reduce_interleaved_noconflict_nondivergent <<< 1024, 512 >>> (d_A, d_B, nElem); checkError( cudaPeekAtLastError() ); checkError( cudaDeviceSynchronize() ); checkError( cudaMemcpy(d_A, &A[0], nBytes, cudaMemcpyHostToDevice) ); checkError( cudaDeviceSynchronize() ); reduce_interleaved_noconflict_nondivergent_unrolled <<< 1024, 512 >>> (d_A, d_B, nElem); checkError( cudaPeekAtLastError() ); checkError( cudaDeviceSynchronize() ); checkError( cudaMemcpy(d_A, &A[0], nBytes, cudaMemcpyHostToDevice) ); checkError( cudaDeviceSynchronize() ); reduce_interleaved_noconflict_nondivergent_completelyunrolled<512> <<< 1024, 512 >>> (d_A, d_B, nElem); checkError( cudaPeekAtLastError() ); checkError( cudaDeviceSynchronize() ); checkError(cudaMemcpy(&B[0], d_B, 1024 * sizeof(int), cudaMemcpyDeviceToHost)); for (long long i = 0; i < 1024; ++i) { if (B.at(i) != 2048) { std::cout << "error: " << i << " " << B.at(i) << std::endl; exit(-1); } } checkError(cudaFree(d_A)); checkError(cudaFree(d_B)); }
3d7d2a6adc6fee48ce26a593130fdf8938a9ba07.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mat_gpu.h" #define LO16(x) ((x) & 0x0000FFFF) #define HI16(x) ((x) >> 16) #define WA_LOOP(r) _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ _Pragma("unroll") \ for (int f = 0; f < filtersPerThread; f++) { \ prod[f][c] += shImages[threadIdx.y + c * B_Y][(r)] * shHidActs[threadIdx.x + f * B_X][(r)]; \ } \ } #define WA_LOOP2(r) _Pragma("unroll") \ for (int f = 0; f < filtersPerThread; f++) { \ _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ prod[f][c] += shImages[threadIdx.y + c * B_Y][(r)] * shHidActs[threadIdx.x + f * B_X][(r)]; \ } \ } #define WA_IMLOAD(r) imPreload[r] = im[(r) * B_X * B_Y / preloadCases * imgPixels * imgStride]; #define WA_IMLOAD_TX(r) imPreload[r] = tex1Dfetch<float>(images, imgOffset2 + (r) * B_X * B_Y / preloadCases * imgPixels * imgStride); #define WA_HALOAD(r) haPreload[r] = ha[(r) * B_X * B_Y / preloadCases * numImages * numModules]; #define WA_HALOAD_TX(r) haPreload[r] = tex1Dfetch<float>(hidActs, hidActsOffset2 + (r) * B_X * B_Y / preloadCases * numImages * numModules); __device__ __forceinline__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( const int my, const int mx, const int paddingStart, const int numModulesX, const int moduleStride, const int blockPixelY, const int blockPixelX, const int imgSizeX, const int imgStride, int& pixIdx, int& m) { const int imgLoadModPosY = paddingStart + my * moduleStride; const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxY = imgLoadModPosY + blockPixelY; // pixel x,y coords in image const int pxX = imgLoadModPosX + blockPixelX; pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image m = my * numModulesX + mx; } /* * Each block computes weight gradients for 1 pixel, B_Y * colorsPerThread colors and B_X * filtersPerThread filters * threadIdx.x determines filter * threadIdx.y determines color * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines color batch of B_Y * colorsPerThread * blockIdx.z determines pixel in filter * NOTE: blockIdx.z is limited to values < 2^16. This means that this routine will * fail for filters >= 256*256. I'm assuming I won't ever use such large filters. * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) * B_X * B_Y must be divisible by preloadCases */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __global__ void conv_weight_acts_mc_mf_kepler_sw(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize; const int blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; hidActs += blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; //if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); // if (mStartY == mEndY || mStartX == mEndX) { // return; // } float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float prod[colorsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][f] = 0; } } /* * Note; iterating this way is about 1% slower and uses a few more registers than iterating * over the modules linearly. But it's consistent with the preload routines, * so I'm using it. */ for (int my = mStartY; my < mEndY; my++) { const int imgLoadModPosY = paddingStart + my * moduleStride; const int pxY = imgLoadModPosY + blockPixelY; // pixel x,y coords in image for (int mx = mStartX; mx < mEndX; mx++) { const int m = my * numModulesX + mx; const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxX = imgLoadModPosX + blockPixelX; const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { // Checking this condition actually makes things faster ... :/ // So I've removed the !checkCaseBounds flag and just check it all the time. if (caseIdx + loadX < numImages) { /* * As long as B_Y * B_X is divisible by preloadCases this will loop the right * number of times. * * This will load some images from filter pixels that don't exist (it'll set those to 0), * but the code does not produce any output for those pixels (see last lines). */ if (loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) { shImgLoad[(y) * preloadCases] = images[caseIdx + y * imgPixels * imgStride + pixIdx]; } } } if (loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) { shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules + m * numImages]; } } } } else { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) { shImgLoad[(y) * preloadCases] = 0; } } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) { shHidActLoad[y * (preloadCases + 1)] = 0; } } } __syncthreads(); #pragma unroll for (int i = 0; i < preloadCases; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f]; } } } } /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> __global__ void conv_weight_acts_c_kepler_sw(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X*filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; images += loadX; hidActs += blockFilterIdx * numImages * numModules // + loadY * numImages * numModules + loadX; targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; //float* shImgLoad = &shImages[loadY][loadX]; //float* shHidActLoad = &shHidActs[loadY][loadX]; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } const int mStartX = blockModuleStartX; const int mStartY = blockModuleStartY; const int mEndX = min(numModulesX, blockModuleStartX + sumWidth); const int mEndY = min(numModulesY, blockModuleStartY + sumWidth); // if (mStartY == mEndY || mStartX == mEndX) { // return; // } const int fYOff = (blockPixelOffset + tidx) / filterSize; const int fXOff = (blockPixelOffset + tidx) % filterSize; __shared__ int pxIdxes[B_Y*pixelsPerThread]; for (int my = mStartY; my < mEndY; my++) { const int imgLoadModPosY = paddingStart + my * moduleStride; for (int mx = mStartX; mx < mEndX; mx++) { const int m = my * numModulesX + mx; __syncthreads(); const int imgLoadModPosX = paddingStart + mx * moduleStride; if (tidx < B_Y * pixelsPerThread) { // const int imgLoadModPosY = paddingStart + my * moduleStride; // const int imgLoadModPosX = paddingStart + mx * moduleStride; int pxY = (imgLoadModPosY + fYOff); int pxX = (imgLoadModPosX + fXOff); int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; } __syncthreads(); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { if (/*loadY < B_X*filtersPerThread &&*/ (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { const int fIdx = ((loadY + y) % filtersPerThread) * B_X + (loadY + y) / filtersPerThread; // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X*filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || loadY+y < B_X*filtersPerThread) { shHidActs[loadY+y][loadX]= hidActs[caseIdx + fIdx * numImages * numModules + m * numImages]; } } } else { #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { // const int fIdx = ((loadY + y) % filtersPerThread) * B_X + (loadY + y) / filtersPerThread; // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X*filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || loadY+y < B_X*filtersPerThread) { shHidActs[loadY+y][loadX] = 0; } } } #pragma unroll for (int pp = 0; pp < pixelsPerThread; pp += pixelCache) { //if (loadY < B_Y * pixelCache) { // This condition is not necessary for correctness, but it speeds things a bit /* * As long as B_Y * B_X is divisible by preloadCases this will loop the right * number of times. * * This will load some imgGrads from filter pixels that don't exit (it'll set those to 0), * but the code does not produce any output for those pixels (see last lines). */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = pp * B_Y + loadY + y; // pixel idx in filter if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[caseIdx + c * imgPixels * imgStride + pixIdx]; } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX]= 0; } } } } //} __syncthreads(); #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int i = 0; i < preloadCases; i++) { #pragma unroll for (int p = 0; p < pixelCache; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][pp + p][f] += shImages[threadIdx.y + p * B_Y + c * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i]; } } } } __syncthreads(); } } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } #define WA_C3_LOOP(pp, c) _Pragma("unroll") \ for (int i = 0; i < preloadCases; i++) { \ _Pragma("unroll") \ for (int p = 0; p < pixelCache; p++) { \ _Pragma("unroll") \ for (int f = 0; f < filtersPerThread; f++) { \ prod[c][(pp) + p][f] += shImages[threadIdx.y + p * B_Y + (c) * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i]; \ } \ } \ } #define WA_C3_LOOP2(pp) _Pragma("unroll") \ for (int p = 0; p < pixelCache; p++) { \ _Pragma("unroll") \ for (int i = 0; i < preloadCases; i++) { \ _Pragma("unroll") \ for (int f = 0; f < filtersPerThread; f++) { \ _Pragma("unroll") \ for (int c = 0; c < 3; ++c) { \ prod[c][(pp) + p][f] += shImages[threadIdx.y + p * B_Y + (c) * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i]; \ } \ } \ } \ } #define WA_3_FIDX(y) (((loadY + (y)*B_X*B_Y/preloadCases) % filtersPerThread) * B_X + (loadY + (y)*B_X*B_Y/preloadCases) / filtersPerThread) /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> //__launch_bounds__(256,2) __global__ void conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3(hipTextureObject_t images, hipTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X*filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; const int imgOffset = loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadX; // images += loadX; // hidActs += blockFilterIdx * numImages * numModules // + loadX; targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; //float* shImgLoad = &shImages[loadY][loadX]; //float* shHidActLoad = &shHidActs[loadY][loadX]; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } const int mStartX = blockModuleStartX; const int mStartY = blockModuleStartY; const int mEndX = min(numModulesX, blockModuleStartX + sumWidth); const int mEndY = min(numModulesY, blockModuleStartY + sumWidth); const bool doWork = mStartY < mEndY && mStartX < mEndX; // if (!doWork) { // hidActs -= // } // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12] float haPreload[filtersPerThread * preloadCases / B_Y]; // [8] // if (blockIdx.x != 0 || blockIdx.y !=0) { // return; // } // printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY); const int fYOff = (blockPixelOffset + tidx) / filterSize; const int fXOff = (blockPixelOffset + tidx) % filterSize; __shared__ int pxIdxes[B_Y*pixelsPerThread]; // __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [8] int m = mStartY * numModulesX + mStartX; int fidx[filtersPerThread * preloadCases / B_Y]; if (doWork) { #pragma unroll for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) { const int fIdx = WA_3_FIDX(y); // if (doWork) { haPreload[y] = tex1Dfetch<float>(hidActs, hidActsOffset + fIdx * numImages * numModules + m * numImages); // } fidx[y] = fIdx * numImages * numModules; } } for (int my = mStartY; my < mEndY; my++) { const int imgLoadModPosY = paddingStart + my * moduleStride; for (int mx = mStartX; mx < mEndX; mx++) { m = my * numModulesX + mx; // __syncthreads(); const int imgLoadModPosX = paddingStart + mx * moduleStride; if (tidx < B_Y * pixelsPerThread) { // const int imgLoadModPosY = paddingStart + my * moduleStride; // const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxY = (imgLoadModPosY + fYOff); const int pxX = (imgLoadModPosX + fXOff); const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; } __syncthreads(); int myNext = my, mxNext = mx, mNext = m; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); mNext = myNext * numModulesX + mxNext; } for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { const bool lastBatch = caseIdx + preloadCases == numImages; // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (lastBatch) { // ha = &hidActs[mNext * numImages]; hidActsOffset2 = hidActsOffset + mNext * numImages; } #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)]; } /* ================================================================================== * Iteration 0 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter if (pxIdx + blockPixelOffset < filterPixels) { const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx); } } } } __syncthreads(); haPreload[0] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[0]); haPreload[1] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[1]); WA_C3_LOOP(0,0); haPreload[2] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[2]); haPreload[3] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[3]); WA_C3_LOOP(0,1); haPreload[4] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[4]); haPreload[5] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[5]); WA_C3_LOOP(0,2); haPreload[6] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[6]); haPreload[7] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[7]); __syncthreads(); } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { // if (threadIdx.x == 3) targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> __launch_bounds__(256,2) __global__ void conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3(hipTextureObject_t images, hipTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X*filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; const int imgOffset = loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadX; // images += loadX; // hidActs += blockFilterIdx * numImages * numModules // + loadX; targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; //float* shImgLoad = &shImages[loadY][loadX]; //float* shHidActLoad = &shHidActs[loadY][loadX]; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } const int mStartX = blockModuleStartX; const int mStartY = blockModuleStartY; const int mEndX = min(numModulesX, blockModuleStartX + sumWidth); const int mEndY = min(numModulesY, blockModuleStartY + sumWidth); const bool doWork = mStartY < mEndY && mStartX < mEndX; // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12] float haPreload[filtersPerThread * preloadCases / B_Y]; // [6] // if (blockIdx.x != 0 || blockIdx.y !=0) { // return; // } // printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY); const int fYOff = (blockPixelOffset + tidx) / filterSize; const int fXOff = (blockPixelOffset + tidx) % filterSize; __shared__ int pxIdxes[B_Y*pixelsPerThread]; // __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [6] int m = mStartY * numModulesX + mStartX; int fidx[filtersPerThread * preloadCases / B_Y]; // if (doWork) { #pragma unroll for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) { fidx[y] = WA_3_FIDX(y) * numImages * numModules; if (doWork) { // Not actually necessary, I think haPreload[y] = tex1Dfetch<float>(hidActs, hidActsOffset + fidx[y] + m * numImages); } } // } int mNext = mStartY * numModulesX + mStartX; for (int my = mStartY; my < mEndY; my++) { // const int imgLoadModPosY = paddingStart + my * moduleStride; for (int mx = mStartX; mx < mEndX; mx++) { m = mNext;//my * numModulesX + mx; // __syncthreads(); // const int imgLoadModPosX = paddingStart + mx * moduleStride; if (tidx < B_Y * pixelsPerThread) { const int imgLoadModPosY = paddingStart + my * moduleStride; const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxY = (imgLoadModPosY + fYOff); const int pxX = (imgLoadModPosX + fXOff); const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; } __syncthreads(); const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; mNext = lastModule * m + !lastModule * ((my + (mx + 1 == mEndX)) * numModulesX + (mx + 1 == mEndX ? mStartX : mx + 1)); // if (!lastModule) { // const int mxNext = mx + 1 == mEndX ? mStartX : mx + 1; // const int myNext = my + (mx + 1 == mEndX); // mNext = myNext * numModulesX + mxNext; // } for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { const bool lastBatch = caseIdx + preloadCases == numImages; // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = hidActs + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages; const int hidActsOffset2 = hidActsOffset + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages; // if (lastBatch) { // ha = &hidActs[mNext * numImages]; // } #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)]; } /* ================================================================================== * Iteration 0 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx); } } } } __syncthreads(); haPreload[0] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[0]); haPreload[1] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[1]); haPreload[2] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[2]); haPreload[3] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[3]); haPreload[4] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[4]); haPreload[5] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[5]); WA_C3_LOOP2(0); __syncthreads(); /* ================================================================================== * Iteration 1 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { // const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx); } } } } __syncthreads(); WA_C3_LOOP2(2); __syncthreads(); } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(128, 4) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16(hipTextureObject_t images, hipTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; // if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [8] float haPreload[preloadCases*filtersPerThread/B_Y]; // [8] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // It's bizarre, but this is the fastest way I've found to get it not to load nonexistent pixels. // All other ways cause crazy excessive register usage. const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * imgPixels * imgStride + pixIdx); imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + idx); } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Almost certainly not necessary here. const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * numImages * numModules + m * numImages); haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + idx); } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } __syncthreads(); #pragma unroll for (int z = 0; z < 8; ++z) { WA_IMLOAD_TX(z); WA_LOOP2(z); } #pragma unroll for (int z = 0; z < 8; ++z) { WA_HALOAD_TX(z); WA_LOOP2(z+8); } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(256, 2) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32(hipTextureObject_t images, hipTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [6] float haPreload[preloadCases*filtersPerThread/B_Y]; // [16] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); if (doWork) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + y * imgPixels * imgStride + pixIdx); } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + y * numImages * numModules + m * numImages); } } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } __syncthreads(); // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; } WA_LOOP(0); WA_LOOP(1); WA_LOOP(2); WA_LOOP(3); WA_LOOP(4); WA_LOOP(5); WA_IMLOAD_TX(0); WA_LOOP(6); WA_IMLOAD_TX(1); WA_LOOP(7); WA_IMLOAD_TX(2); WA_LOOP(8); WA_IMLOAD_TX(3); WA_LOOP(9); WA_IMLOAD_TX(4); WA_LOOP(10); WA_IMLOAD_TX(5); WA_LOOP(11); WA_HALOAD_TX(0); WA_LOOP(12); WA_HALOAD_TX(1); WA_LOOP(13); WA_HALOAD_TX(2); WA_LOOP(14); WA_HALOAD_TX(3); WA_LOOP(15); WA_HALOAD_TX(4); WA_LOOP(16); WA_HALOAD_TX(5); WA_LOOP(17); WA_HALOAD_TX(6); WA_LOOP(18); WA_HALOAD_TX(7); WA_LOOP(19); WA_HALOAD_TX(8); WA_LOOP(20); WA_HALOAD_TX(9); WA_LOOP(21); WA_HALOAD_TX(10); WA_LOOP(22); WA_HALOAD_TX(11); WA_LOOP(23); WA_HALOAD_TX(12); WA_LOOP(24); WA_HALOAD_TX(13); WA_LOOP(25); WA_HALOAD_TX(14); WA_LOOP(26); WA_HALOAD_TX(15); WA_LOOP(27); WA_LOOP(28); WA_LOOP(29); WA_LOOP(30); WA_LOOP(31); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(256, 2) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16(hipTextureObject_t images, hipTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; // if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [4] float haPreload[preloadCases*filtersPerThread/B_Y]; // [8] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); if (doWork && loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + y * imgPixels * imgStride + pixIdx); } } if (doWork && loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + y * numImages * numModules + m * numImages); } } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { // const float* im = &images[caseIdx + preloadCases + pixIdx]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; // im = &images[pixIdxNext]; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; // ha = &hidActs[mNext * numImages]; } if (loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } } if (loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } } __syncthreads(); WA_LOOP(0); WA_IMLOAD_TX(0); WA_LOOP(1); WA_IMLOAD_TX(1); WA_LOOP(2); WA_IMLOAD_TX(2); WA_LOOP(3); WA_IMLOAD_TX(3); WA_LOOP(4); WA_HALOAD_TX(0); WA_LOOP(5); WA_HALOAD_TX(1); WA_LOOP(6); WA_HALOAD_TX(2); WA_LOOP(7); WA_HALOAD_TX(3); WA_LOOP(8); WA_HALOAD_TX(4); WA_LOOP(9); WA_HALOAD_TX(5); WA_LOOP(10); WA_HALOAD_TX(6); WA_LOOP(11); WA_HALOAD_TX(7); WA_LOOP(12); WA_LOOP(13); WA_LOOP(14); WA_LOOP(15); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModules, numImages) * * targets: (numModuleY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) * * TODO: you can get a slight speed boost for local non-convolutional units by writing special * routines for partialSum = 1. But I dunno if the code duplication is worth it... * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _weightActs(MatGPU& images, MatGPU& hidActs, MatGPU& targets, size_t imgSize1, size_t imgSize2, size_t padding, size_t chunks_num, size_t sum_width) { // activation sizes int imgSizeX = (int) imgSize1; int imgSizeY = (int) imgSize2; int paddingStart = -(int) padding; int outputModuleChunks = (int) chunks_num; int sumWidth = (int) sum_width; int moduleStride = 1; int numGroups = 1; float scaleTargets = 0; float scaleOutput = 1; mexAssert(images.stride_ == 1 && hidActs.stride_ == 1 && targets.stride_ == 1, "In _weightActs one of strides is not 1"); int numImages = (int) images.size1_; int imgPixels = imgSizeX * imgSizeY; mexAssert(images.size2_ % imgPixels == 0, "wa5"); int numImgColors = (int) images.size2_ / imgPixels; mexAssert(numImgColors % numGroups == 0, "wa4"); mexAssert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 4 == 0)), "wa1"); int numFilterColors = numImgColors / numGroups; mexAssert(numGroups == 1 || numFilterColors % 16 == 0, "wa2"); int numFilters = (int) targets.size1_; mexAssert(numFilters % (16 * numGroups) == 0, "wa3"); mexAssert(targets.size2_ % (outputModuleChunks * numFilterColors) == 0, "wa8"); int filterPixels = (int) targets.size2_ / (outputModuleChunks * numFilterColors); int filterSize = (int) sqrt((double) filterPixels); mexAssert(filterSize * filterSize == filterPixels, "wa7"); mexAssert(paddingStart <= 0, "wa9"); int numModulesX = imgSizeX - 2 * paddingStart + 1 - filterSize; int numModulesY = imgSizeY - 2 * paddingStart + 1 - filterSize; int numModules = numModulesY * numModulesX; mexAssert(hidActs.size1_ == numImages, "wa14"); mexAssert(hidActs.size2_ == numFilters * numModules, "wa13"); int chunks_x = (int) DIVUP(numModulesX, sumWidth); int chunks_y = (int) DIVUP(numModulesY, sumWidth); mexAssert(chunks_x * chunks_y == outputModuleChunks, "wa15"); int imgStride = numImages; int numFiltersPerGroup = numFilters / numGroups; int preloadCases = 32; dim3 blocks, threads; int bx, by; int pixelsPerThread, filtersPerThread, colorsPerThread; // Worth playing with these parameters to find best values for your problem. // These values work relatively well, but not optimal for all problems. if (numFilterColors > 3) { filtersPerThread = numFiltersPerGroup % 64 == 0 ? 4 : numFiltersPerGroup % 32 == 0 ? 2 : 1; colorsPerThread = numFilterColors % 64 == 0 ? 8 : numFilterColors % 48 == 0 ? 6 : numFilterColors % 32 == 0 ? 8 : 4; by = (numFilterColors / colorsPerThread) % 8 == 0 ? 8 : 4; bx = numFiltersPerGroup % 128 == 0 ? 32 : 16; preloadCases = filtersPerThread * colorsPerThread < 32 ? 32 : 16; blocks = dim3(outputModuleChunks*(numFilters/(bx*filtersPerThread)), numFilterColors / (by*colorsPerThread), filterPixels); mexAssert(numFilterColors % (by*colorsPerThread) == 0, "wa17"); } else { // This is ugly but it's nice to spell it out clearly mexAssert(numGroups == 1, "wa16"); // Just for sanity // NOTE: these things are only optimized for colors = 3. I didn't really test other cases. if (numFilters % 64 == 0) { // TODO: having a separate case for 128 would make things faster, but I probably don't care about 128 filtersPerThread = 4; pixelsPerThread = 2; by = 16; bx = 16; preloadCases = 32; } else if (numFilters % 48 == 0) { filtersPerThread = 3; pixelsPerThread = 4; by = 16; bx = 16; preloadCases = 32; } else if (numFilters % 32 == 0) { filtersPerThread = 2; pixelsPerThread = 2; by = 8; bx = 16; preloadCases = 16; } else { // This case is completely untested. It might be really slow. But no time now. filtersPerThread = 1; pixelsPerThread = 16; by = 16; bx = 16; preloadCases = 32; } blocks = dim3(outputModuleChunks*(numFilters/(bx*filtersPerThread)), DIVUP(filterPixels, by*pixelsPerThread)); } mexAssert((by * bx) % preloadCases == 0, "wa18"); mexAssert(numFilters % (bx * filtersPerThread) == 0, "wa19"); threads = dim3(bx, by); bool checkCaseBounds = numImages % preloadCases != 0; hipStream_t stream = MatGPU::_defaultStream; if (checkCaseBounds == false) { if (numFilterColors > 3) { if (numFilterColors % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, stream, images.getTextureObject(), hidActs.getTextureObject(), targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, stream, images.getTextureObject(), hidActs.getTextureObject(), targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.getTextureObject(), hidActs.getTextureObject(), targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, false, false >), dim3(blocks), dim3(threads), 0, stream, images.getTextureObject(), hidActs.getTextureObject(), targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, false, false >), dim3(blocks), dim3(threads), 0, stream, images.getTextureObject(), hidActs.getTextureObject(), targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors > 3) { if (numFilterColors % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numFiltersPerGroup % 128 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, false, true >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, false, true >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, true >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, true >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, true >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, true >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, true >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, true >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numFiltersPerGroup % 64 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, true >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, true >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, true >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, true >), dim3(blocks), dim3(threads), 0, stream, images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } } } mexAssert(hipGetLastError() == hipSuccess, "weightActs: kernel execution failed"); }
3d7d2a6adc6fee48ce26a593130fdf8938a9ba07.cu
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mat_gpu.h" #define LO16(x) ((x) & 0x0000FFFF) #define HI16(x) ((x) >> 16) #define WA_LOOP(r) _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ _Pragma("unroll") \ for (int f = 0; f < filtersPerThread; f++) { \ prod[f][c] += shImages[threadIdx.y + c * B_Y][(r)] * shHidActs[threadIdx.x + f * B_X][(r)]; \ } \ } #define WA_LOOP2(r) _Pragma("unroll") \ for (int f = 0; f < filtersPerThread; f++) { \ _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ prod[f][c] += shImages[threadIdx.y + c * B_Y][(r)] * shHidActs[threadIdx.x + f * B_X][(r)]; \ } \ } #define WA_IMLOAD(r) imPreload[r] = im[(r) * B_X * B_Y / preloadCases * imgPixels * imgStride]; #define WA_IMLOAD_TX(r) imPreload[r] = tex1Dfetch<float>(images, imgOffset2 + (r) * B_X * B_Y / preloadCases * imgPixels * imgStride); #define WA_HALOAD(r) haPreload[r] = ha[(r) * B_X * B_Y / preloadCases * numImages * numModules]; #define WA_HALOAD_TX(r) haPreload[r] = tex1Dfetch<float>(hidActs, hidActsOffset2 + (r) * B_X * B_Y / preloadCases * numImages * numModules); __device__ __forceinline__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( const int my, const int mx, const int paddingStart, const int numModulesX, const int moduleStride, const int blockPixelY, const int blockPixelX, const int imgSizeX, const int imgStride, int& pixIdx, int& m) { const int imgLoadModPosY = paddingStart + my * moduleStride; const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxY = imgLoadModPosY + blockPixelY; // pixel x,y coords in image const int pxX = imgLoadModPosX + blockPixelX; pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image m = my * numModulesX + mx; } /* * Each block computes weight gradients for 1 pixel, B_Y * colorsPerThread colors and B_X * filtersPerThread filters * threadIdx.x determines filter * threadIdx.y determines color * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines color batch of B_Y * colorsPerThread * blockIdx.z determines pixel in filter * NOTE: blockIdx.z is limited to values < 2^16. This means that this routine will * fail for filters >= 256*256. I'm assuming I won't ever use such large filters. * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) * B_X * B_Y must be divisible by preloadCases */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __global__ void conv_weight_acts_mc_mf_kepler_sw(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize; const int blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; hidActs += blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; //if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); // if (mStartY == mEndY || mStartX == mEndX) { // return; // } float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float prod[colorsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][f] = 0; } } /* * Note; iterating this way is about 1% slower and uses a few more registers than iterating * over the modules linearly. But it's consistent with the preload routines, * so I'm using it. */ for (int my = mStartY; my < mEndY; my++) { const int imgLoadModPosY = paddingStart + my * moduleStride; const int pxY = imgLoadModPosY + blockPixelY; // pixel x,y coords in image for (int mx = mStartX; mx < mEndX; mx++) { const int m = my * numModulesX + mx; const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxX = imgLoadModPosX + blockPixelX; const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { // Checking this condition actually makes things faster ... :/ // So I've removed the !checkCaseBounds flag and just check it all the time. if (caseIdx + loadX < numImages) { /* * As long as B_Y * B_X is divisible by preloadCases this will loop the right * number of times. * * This will load some images from filter pixels that don't exist (it'll set those to 0), * but the code does not produce any output for those pixels (see last lines). */ if (loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) { shImgLoad[(y) * preloadCases] = images[caseIdx + y * imgPixels * imgStride + pixIdx]; } } } if (loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) { shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules + m * numImages]; } } } } else { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) { shImgLoad[(y) * preloadCases] = 0; } } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) { shHidActLoad[y * (preloadCases + 1)] = 0; } } } __syncthreads(); #pragma unroll for (int i = 0; i < preloadCases; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f]; } } } } /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> __global__ void conv_weight_acts_c_kepler_sw(float* images, float* hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X*filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; images += loadX; hidActs += blockFilterIdx * numImages * numModules // + loadY * numImages * numModules + loadX; targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; //float* shImgLoad = &shImages[loadY][loadX]; //float* shHidActLoad = &shHidActs[loadY][loadX]; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } const int mStartX = blockModuleStartX; const int mStartY = blockModuleStartY; const int mEndX = min(numModulesX, blockModuleStartX + sumWidth); const int mEndY = min(numModulesY, blockModuleStartY + sumWidth); // if (mStartY == mEndY || mStartX == mEndX) { // return; // } const int fYOff = (blockPixelOffset + tidx) / filterSize; const int fXOff = (blockPixelOffset + tidx) % filterSize; __shared__ int pxIdxes[B_Y*pixelsPerThread]; for (int my = mStartY; my < mEndY; my++) { const int imgLoadModPosY = paddingStart + my * moduleStride; for (int mx = mStartX; mx < mEndX; mx++) { const int m = my * numModulesX + mx; __syncthreads(); const int imgLoadModPosX = paddingStart + mx * moduleStride; if (tidx < B_Y * pixelsPerThread) { // const int imgLoadModPosY = paddingStart + my * moduleStride; // const int imgLoadModPosX = paddingStart + mx * moduleStride; int pxY = (imgLoadModPosY + fYOff); int pxX = (imgLoadModPosX + fXOff); int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; } __syncthreads(); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { if (/*loadY < B_X*filtersPerThread &&*/ (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { const int fIdx = ((loadY + y) % filtersPerThread) * B_X + (loadY + y) / filtersPerThread; // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X*filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || loadY+y < B_X*filtersPerThread) { shHidActs[loadY+y][loadX]= hidActs[caseIdx + fIdx * numImages * numModules + m * numImages]; } } } else { #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { // const int fIdx = ((loadY + y) % filtersPerThread) * B_X + (loadY + y) / filtersPerThread; // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_X*filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || loadY+y < B_X*filtersPerThread) { shHidActs[loadY+y][loadX] = 0; } } } #pragma unroll for (int pp = 0; pp < pixelsPerThread; pp += pixelCache) { //if (loadY < B_Y * pixelCache) { // This condition is not necessary for correctness, but it speeds things a bit /* * As long as B_Y * B_X is divisible by preloadCases this will loop the right * number of times. * * This will load some imgGrads from filter pixels that don't exit (it'll set those to 0), * but the code does not produce any output for those pixels (see last lines). */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = pp * B_Y + loadY + y; // pixel idx in filter if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[caseIdx + c * imgPixels * imgStride + pixIdx]; } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX]= 0; } } } } //} __syncthreads(); #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int i = 0; i < preloadCases; i++) { #pragma unroll for (int p = 0; p < pixelCache; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][pp + p][f] += shImages[threadIdx.y + p * B_Y + c * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i]; } } } } __syncthreads(); } } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } #define WA_C3_LOOP(pp, c) _Pragma("unroll") \ for (int i = 0; i < preloadCases; i++) { \ _Pragma("unroll") \ for (int p = 0; p < pixelCache; p++) { \ _Pragma("unroll") \ for (int f = 0; f < filtersPerThread; f++) { \ prod[c][(pp) + p][f] += shImages[threadIdx.y + p * B_Y + (c) * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i]; \ } \ } \ } #define WA_C3_LOOP2(pp) _Pragma("unroll") \ for (int p = 0; p < pixelCache; p++) { \ _Pragma("unroll") \ for (int i = 0; i < preloadCases; i++) { \ _Pragma("unroll") \ for (int f = 0; f < filtersPerThread; f++) { \ _Pragma("unroll") \ for (int c = 0; c < 3; ++c) { \ prod[c][(pp) + p][f] += shImages[threadIdx.y + p * B_Y + (c) * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i]; \ } \ } \ } \ } #define WA_3_FIDX(y) (((loadY + (y)*B_X*B_Y/preloadCases) % filtersPerThread) * B_X + (loadY + (y)*B_X*B_Y/preloadCases) / filtersPerThread) /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> //__launch_bounds__(256,2) __global__ void conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3(cudaTextureObject_t images, cudaTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X*filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; const int imgOffset = loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadX; // images += loadX; // hidActs += blockFilterIdx * numImages * numModules // + loadX; targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; //float* shImgLoad = &shImages[loadY][loadX]; //float* shHidActLoad = &shHidActs[loadY][loadX]; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } const int mStartX = blockModuleStartX; const int mStartY = blockModuleStartY; const int mEndX = min(numModulesX, blockModuleStartX + sumWidth); const int mEndY = min(numModulesY, blockModuleStartY + sumWidth); const bool doWork = mStartY < mEndY && mStartX < mEndX; // if (!doWork) { // hidActs -= // } // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12] float haPreload[filtersPerThread * preloadCases / B_Y]; // [8] // if (blockIdx.x != 0 || blockIdx.y !=0) { // return; // } // printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY); const int fYOff = (blockPixelOffset + tidx) / filterSize; const int fXOff = (blockPixelOffset + tidx) % filterSize; __shared__ int pxIdxes[B_Y*pixelsPerThread]; // __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [8] int m = mStartY * numModulesX + mStartX; int fidx[filtersPerThread * preloadCases / B_Y]; if (doWork) { #pragma unroll for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) { const int fIdx = WA_3_FIDX(y); // if (doWork) { haPreload[y] = tex1Dfetch<float>(hidActs, hidActsOffset + fIdx * numImages * numModules + m * numImages); // } fidx[y] = fIdx * numImages * numModules; } } for (int my = mStartY; my < mEndY; my++) { const int imgLoadModPosY = paddingStart + my * moduleStride; for (int mx = mStartX; mx < mEndX; mx++) { m = my * numModulesX + mx; // __syncthreads(); const int imgLoadModPosX = paddingStart + mx * moduleStride; if (tidx < B_Y * pixelsPerThread) { // const int imgLoadModPosY = paddingStart + my * moduleStride; // const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxY = (imgLoadModPosY + fYOff); const int pxX = (imgLoadModPosX + fXOff); const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; } __syncthreads(); int myNext = my, mxNext = mx, mNext = m; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); mNext = myNext * numModulesX + mxNext; } for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { const bool lastBatch = caseIdx + preloadCases == numImages; // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (lastBatch) { // ha = &hidActs[mNext * numImages]; hidActsOffset2 = hidActsOffset + mNext * numImages; } #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)]; } /* ================================================================================== * Iteration 0 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter if (pxIdx + blockPixelOffset < filterPixels) { const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx); } } } } __syncthreads(); haPreload[0] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[0]); haPreload[1] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[1]); WA_C3_LOOP(0,0); haPreload[2] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[2]); haPreload[3] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[3]); WA_C3_LOOP(0,1); haPreload[4] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[4]); haPreload[5] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[5]); WA_C3_LOOP(0,2); haPreload[6] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[6]); haPreload[7] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[7]); __syncthreads(); } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { // if (threadIdx.x == 3) targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } /* * Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters * threadIdx.x determines filter * threadIdx.y determines pixel in filter * * blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum * blockIdx.y determines pixel batch of B_Y * pixelsPerThread * * Number of filters must be divisible by B_X * filtersPerThread * Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false. * * images: (numColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters) * * B_Y * B_X should be divisible by preloadCases. * preloadCases one of 16, 32. * B_X one of 4, 8, 16, 32 * B_Y arbitrary (satisfying divisibility constraints) * numModules must be divisible by partialSum * pixelsPerThread must be divisible by pixelCache * * After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)... * so the compiler is messing up here somehow. It's unable to optimize that case away. */ template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds> __launch_bounds__(256,2) __global__ void conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3(cudaTextureObject_t images, cudaTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels __shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X*filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks); // const int moduleStride = (imgSize - filterSize + 1) / numModulesX; const int numModules = numModulesY * numModulesX; const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread; const int imgOffset = loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadX; // images += loadX; // hidActs += blockFilterIdx * numImages * numModules // + loadX; targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.y * numFilters + threadIdx.x; //float* shImgLoad = &shImages[loadY][loadX]; //float* shHidActLoad = &shHidActs[loadY][loadX]; float prod[numColors][pixelsPerThread][filtersPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[c][p][f] = 0; } } } const int mStartX = blockModuleStartX; const int mStartY = blockModuleStartY; const int mEndX = min(numModulesX, blockModuleStartX + sumWidth); const int mEndY = min(numModulesY, blockModuleStartY + sumWidth); const bool doWork = mStartY < mEndY && mStartX < mEndX; // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12] float haPreload[filtersPerThread * preloadCases / B_Y]; // [6] // if (blockIdx.x != 0 || blockIdx.y !=0) { // return; // } // printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY); const int fYOff = (blockPixelOffset + tidx) / filterSize; const int fXOff = (blockPixelOffset + tidx) % filterSize; __shared__ int pxIdxes[B_Y*pixelsPerThread]; // __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [6] int m = mStartY * numModulesX + mStartX; int fidx[filtersPerThread * preloadCases / B_Y]; // if (doWork) { #pragma unroll for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) { fidx[y] = WA_3_FIDX(y) * numImages * numModules; if (doWork) { // Not actually necessary, I think haPreload[y] = tex1Dfetch<float>(hidActs, hidActsOffset + fidx[y] + m * numImages); } } // } int mNext = mStartY * numModulesX + mStartX; for (int my = mStartY; my < mEndY; my++) { // const int imgLoadModPosY = paddingStart + my * moduleStride; for (int mx = mStartX; mx < mEndX; mx++) { m = mNext;//my * numModulesX + mx; // __syncthreads(); // const int imgLoadModPosX = paddingStart + mx * moduleStride; if (tidx < B_Y * pixelsPerThread) { const int imgLoadModPosY = paddingStart + my * moduleStride; const int imgLoadModPosX = paddingStart + mx * moduleStride; const int pxY = (imgLoadModPosY + fYOff); const int pxX = (imgLoadModPosX + fXOff); const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1; } __syncthreads(); const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; mNext = lastModule * m + !lastModule * ((my + (mx + 1 == mEndX)) * numModulesX + (mx + 1 == mEndX ? mStartX : mx + 1)); // if (!lastModule) { // const int mxNext = mx + 1 == mEndX ? mStartX : mx + 1; // const int myNext = my + (mx + 1 == mEndX); // mNext = myNext * numModulesX + mxNext; // } for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { const bool lastBatch = caseIdx + preloadCases == numImages; // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = hidActs + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages; const int hidActsOffset2 = hidActsOffset + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages; // if (lastBatch) { // ha = &hidActs[mNext * numImages]; // } #pragma unroll for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)]; } /* ================================================================================== * Iteration 0 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx); } } } } __syncthreads(); haPreload[0] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[0]); haPreload[1] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[1]); haPreload[2] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[2]); haPreload[3] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[3]); haPreload[4] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[4]); haPreload[5] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[5]); WA_C3_LOOP2(0); __syncthreads(); /* ================================================================================== * Iteration 1 * ================================================================================== */ #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { // const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0; } } } #pragma unroll for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) { // Make sure number of rows in the array is divisible by number of rows filled per iteration if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) { const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride; if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx); } } } } __syncthreads(); WA_C3_LOOP2(2); __syncthreads(); } } } if (scale) { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f]; } } } } } else { #pragma unroll for (int p = 0; p < pixelsPerThread; p++) { if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f]; } } } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(128, 4) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16(cudaTextureObject_t images, cudaTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; // if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); // if (mStartY == mEndY || mStartX == mEndX) { // return; // } // const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [8] float haPreload[preloadCases*filtersPerThread/B_Y]; // [8] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { // It's bizarre, but this is the fastest way I've found to get it not to load nonexistent pixels. // All other ways cause crazy excessive register usage. const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * imgPixels * imgStride + pixIdx); imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + idx); } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { // Almost certainly not necessary here. const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * numImages * numModules + m * numImages); haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + idx); } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } __syncthreads(); #pragma unroll for (int z = 0; z < 8; ++z) { WA_IMLOAD_TX(z); WA_LOOP2(z); } #pragma unroll for (int z = 0; z < 8; ++z) { WA_HALOAD_TX(z); WA_LOOP2(z+8); } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(256, 2) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32(cudaTextureObject_t images, cudaTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [6] float haPreload[preloadCases*filtersPerThread/B_Y]; // [16] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); if (doWork) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + y * imgPixels * imgStride + pixIdx); } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + y * numImages * numModules + m * numImages); } } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } __syncthreads(); // const float* im = &images[caseIdx + preloadCases + pixIdx]; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; } WA_LOOP(0); WA_LOOP(1); WA_LOOP(2); WA_LOOP(3); WA_LOOP(4); WA_LOOP(5); WA_IMLOAD_TX(0); WA_LOOP(6); WA_IMLOAD_TX(1); WA_LOOP(7); WA_IMLOAD_TX(2); WA_LOOP(8); WA_IMLOAD_TX(3); WA_LOOP(9); WA_IMLOAD_TX(4); WA_LOOP(10); WA_IMLOAD_TX(5); WA_LOOP(11); WA_HALOAD_TX(0); WA_LOOP(12); WA_HALOAD_TX(1); WA_LOOP(13); WA_HALOAD_TX(2); WA_LOOP(14); WA_HALOAD_TX(3); WA_LOOP(15); WA_HALOAD_TX(4); WA_LOOP(16); WA_HALOAD_TX(5); WA_LOOP(17); WA_HALOAD_TX(6); WA_LOOP(18); WA_HALOAD_TX(7); WA_LOOP(19); WA_HALOAD_TX(8); WA_LOOP(20); WA_HALOAD_TX(9); WA_LOOP(21); WA_HALOAD_TX(10); WA_LOOP(22); WA_HALOAD_TX(11); WA_LOOP(23); WA_HALOAD_TX(12); WA_LOOP(24); WA_HALOAD_TX(13); WA_LOOP(25); WA_HALOAD_TX(14); WA_LOOP(26); WA_HALOAD_TX(15); WA_LOOP(27); WA_LOOP(28); WA_LOOP(29); WA_LOOP(30); WA_LOOP(31); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModulesY, numModulesX, numImages) * * targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) */ template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale> __launch_bounds__(256, 2) __global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16(cudaTextureObject_t images, cudaTextureObject_t hidActs, float* targets, const int numImages, const int numFilters, const int numModulesY, const int numModulesX, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int imgStride, const int numImgColors, const int numGroups, const int sumWidth, const float scaleTargets, const float scaleOutputs) { __shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases __shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts const int tidx = B_X * threadIdx.y + threadIdx.x; const int loadY = tidx / preloadCases, loadX = tidx % preloadCases; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int numFilterBlocks = numFilters / (B_X * filtersPerThread); const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks; const int numModuleChunksX = DIVUP(numModulesX, sumWidth); // const int numModuleChunksY = DIVUP(numModulesY, sumWidth); const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX; const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX; const int blockModuleStartX = blockModuleChunkX * sumWidth; const int blockModuleStartY = blockModuleChunkY * sumWidth; // const int moduleIdx = partialSum * outputModuleIdx; const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks); const int numModules = numModulesY * numModulesX; const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numFilterColors = numImgColors / numGroups; const int blockPixelOffset = blockIdx.z; // pixel idx in filter const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize; const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread; const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors; const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX; // images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX; const int hidActsOffset = blockFilterIdx * numImages * numModules + loadY * numImages * numModules + loadX; // // hidActs += // blockFilterIdx * numImages * numModules // + loadY * numImages * numModules // + loadX; targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors + (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters + blockPixelOffset * numFilters + blockFilterIdx + threadIdx.x; // if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return; const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride)); const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride)); const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride))); const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride))); const bool doWork = mStartY < mEndY && mStartX < mEndX; float* shHidActLoad = &shHidActs[loadY][loadX]; float* shImgLoad = &shImages[loadY][loadX]; float imPreload[preloadCases*colorsPerThread/B_X]; // [4] float haPreload[preloadCases*filtersPerThread/B_Y]; // [8] float prod[filtersPerThread][colorsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[f][c] = 0; } } int pixIdx, pixIdxNext, m, mNext; conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( mStartY, mStartX, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdx, m); if (doWork && loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + y * imgPixels * imgStride + pixIdx); } } if (doWork && loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + y * numImages * numModules + m * numImages); } } for (int my = mStartY; my < mEndY; my++) { for (int mx = mStartX; mx < mEndX; mx++) { int myNext = my, mxNext = mx; const bool lastModule = my == mEndY - 1 && mx == mEndX - 1; if (!lastModule) { mxNext = mx + 1 == mEndX ? mStartX : mx + 1; myNext = my + (mx + 1 == mEndX); } conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords( myNext, mxNext, paddingStart, numModulesX, moduleStride, blockPixelY, blockPixelX, imgSizeX, imgStride, pixIdxNext, mNext); for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) { // const float* im = &images[caseIdx + preloadCases + pixIdx]; int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx; int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages; // const float* ha = &hidActs[caseIdx + preloadCases + m * numImages]; if (caseIdx + preloadCases == numImages) { pixIdx = pixIdxNext; m = mNext; // im = &images[pixIdxNext]; imgOffset2 = imgOffset + pixIdxNext; hidActsOffset2 = hidActsOffset + mNext * numImages; // ha = &hidActs[mNext * numImages]; } if (loadY < B_Y * colorsPerThread) { #pragma unroll for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) { shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)]; } } if (loadY < B_X * filtersPerThread) { #pragma unroll for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) { shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)]; } } __syncthreads(); WA_LOOP(0); WA_IMLOAD_TX(0); WA_LOOP(1); WA_IMLOAD_TX(1); WA_LOOP(2); WA_IMLOAD_TX(2); WA_LOOP(3); WA_IMLOAD_TX(3); WA_LOOP(4); WA_HALOAD_TX(0); WA_LOOP(5); WA_HALOAD_TX(1); WA_LOOP(6); WA_HALOAD_TX(2); WA_LOOP(7); WA_HALOAD_TX(3); WA_LOOP(8); WA_HALOAD_TX(4); WA_LOOP(9); WA_HALOAD_TX(5); WA_LOOP(10); WA_HALOAD_TX(6); WA_LOOP(11); WA_HALOAD_TX(7); WA_LOOP(12); WA_LOOP(13); WA_LOOP(14); WA_LOOP(15); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c]; } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c]; } } } } /* * images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given * hidActs: (numFilters, numModules, numImages) * * targets: (numModuleY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters) * * TODO: you can get a slight speed boost for local non-convolutional units by writing special * routines for partialSum = 1. But I dunno if the code duplication is worth it... * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _weightActs(MatGPU& images, MatGPU& hidActs, MatGPU& targets, size_t imgSize1, size_t imgSize2, size_t padding, size_t chunks_num, size_t sum_width) { // activation sizes int imgSizeX = (int) imgSize1; int imgSizeY = (int) imgSize2; int paddingStart = -(int) padding; int outputModuleChunks = (int) chunks_num; int sumWidth = (int) sum_width; int moduleStride = 1; int numGroups = 1; float scaleTargets = 0; float scaleOutput = 1; mexAssert(images.stride_ == 1 && hidActs.stride_ == 1 && targets.stride_ == 1, "In _weightActs one of strides is not 1"); int numImages = (int) images.size1_; int imgPixels = imgSizeX * imgSizeY; mexAssert(images.size2_ % imgPixels == 0, "wa5"); int numImgColors = (int) images.size2_ / imgPixels; mexAssert(numImgColors % numGroups == 0, "wa4"); mexAssert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 4 == 0)), "wa1"); int numFilterColors = numImgColors / numGroups; mexAssert(numGroups == 1 || numFilterColors % 16 == 0, "wa2"); int numFilters = (int) targets.size1_; mexAssert(numFilters % (16 * numGroups) == 0, "wa3"); mexAssert(targets.size2_ % (outputModuleChunks * numFilterColors) == 0, "wa8"); int filterPixels = (int) targets.size2_ / (outputModuleChunks * numFilterColors); int filterSize = (int) sqrt((double) filterPixels); mexAssert(filterSize * filterSize == filterPixels, "wa7"); mexAssert(paddingStart <= 0, "wa9"); int numModulesX = imgSizeX - 2 * paddingStart + 1 - filterSize; int numModulesY = imgSizeY - 2 * paddingStart + 1 - filterSize; int numModules = numModulesY * numModulesX; mexAssert(hidActs.size1_ == numImages, "wa14"); mexAssert(hidActs.size2_ == numFilters * numModules, "wa13"); int chunks_x = (int) DIVUP(numModulesX, sumWidth); int chunks_y = (int) DIVUP(numModulesY, sumWidth); mexAssert(chunks_x * chunks_y == outputModuleChunks, "wa15"); int imgStride = numImages; int numFiltersPerGroup = numFilters / numGroups; int preloadCases = 32; dim3 blocks, threads; int bx, by; int pixelsPerThread, filtersPerThread, colorsPerThread; // Worth playing with these parameters to find best values for your problem. // These values work relatively well, but not optimal for all problems. if (numFilterColors > 3) { filtersPerThread = numFiltersPerGroup % 64 == 0 ? 4 : numFiltersPerGroup % 32 == 0 ? 2 : 1; colorsPerThread = numFilterColors % 64 == 0 ? 8 : numFilterColors % 48 == 0 ? 6 : numFilterColors % 32 == 0 ? 8 : 4; by = (numFilterColors / colorsPerThread) % 8 == 0 ? 8 : 4; bx = numFiltersPerGroup % 128 == 0 ? 32 : 16; preloadCases = filtersPerThread * colorsPerThread < 32 ? 32 : 16; blocks = dim3(outputModuleChunks*(numFilters/(bx*filtersPerThread)), numFilterColors / (by*colorsPerThread), filterPixels); mexAssert(numFilterColors % (by*colorsPerThread) == 0, "wa17"); } else { // This is ugly but it's nice to spell it out clearly mexAssert(numGroups == 1, "wa16"); // Just for sanity // NOTE: these things are only optimized for colors = 3. I didn't really test other cases. if (numFilters % 64 == 0) { // TODO: having a separate case for 128 would make things faster, but I probably don't care about 128 filtersPerThread = 4; pixelsPerThread = 2; by = 16; bx = 16; preloadCases = 32; } else if (numFilters % 48 == 0) { filtersPerThread = 3; pixelsPerThread = 4; by = 16; bx = 16; preloadCases = 32; } else if (numFilters % 32 == 0) { filtersPerThread = 2; pixelsPerThread = 2; by = 8; bx = 16; preloadCases = 16; } else { // This case is completely untested. It might be really slow. But no time now. filtersPerThread = 1; pixelsPerThread = 16; by = 16; bx = 16; preloadCases = 32; } blocks = dim3(outputModuleChunks*(numFilters/(bx*filtersPerThread)), DIVUP(filterPixels, by*pixelsPerThread)); } mexAssert((by * bx) % preloadCases == 0, "wa18"); mexAssert(numFilters % (bx * filtersPerThread) == 0, "wa19"); threads = dim3(bx, by); bool checkCaseBounds = numImages % preloadCases != 0; cudaStream_t stream = MatGPU::_defaultStream; if (checkCaseBounds == false) { if (numFilterColors > 3) { if (numFilterColors % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, false ><<<blocks, threads, 0, stream>>>(images.getTextureObject(), hidActs.getTextureObject(), targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, false ><<<blocks, threads, 0, stream>>>(images.getTextureObject(), hidActs.getTextureObject(), targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, false ><<<blocks, threads, 0, stream>>>(images.getTextureObject(), hidActs.getTextureObject(), targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, false, false ><<<blocks, threads, 0, stream>>>(images.getTextureObject(), hidActs.getTextureObject(), targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, false, false ><<<blocks, threads, 0, stream>>>(images.getTextureObject(), hidActs.getTextureObject(), targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, false >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors > 3) { if (numFilterColors % 64 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numFiltersPerGroup % 128 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >, cudaFuncCachePreferShared); conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, false, true ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, false, true ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, true ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, true ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, true ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, true ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, true ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, true ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numFiltersPerGroup % 64 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, true ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 48 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, true ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, true ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } else if (numFiltersPerGroup % 16 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, true >, cudaFuncCachePreferShared); conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, true ><<<blocks, threads, 0, stream>>>(images.data_, hidActs.data_, targets.data_, numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput); } } } } mexAssert(cudaGetLastError() == cudaSuccess, "weightActs: kernel execution failed"); }
75d845adbd230e11c860a81d51eba0649760d989.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> __global__ void jacobikernel( float* a, float* newa, float* lchange, int n, int m, float w0, float w1, float w2 ) { int ti = threadIdx.x; int tj = threadIdx.y; int i = blockIdx.x * blockDim.x + ti + 1; int j = blockIdx.y * blockDim.y + tj + 1; __shared__ float mychange[18*18]; float mnewa, molda; mychange[tj*18+ti] = a[(j-1)*m+i-1]; if( ti < 2 ) mychange[tj*18+ti+16] = a[(j-1)*m+i+15]; if( tj < 2 ) mychange[(tj+16)*18+ti] = a[(j+15)*m+i-1]; if( tj < 2 && ti < 2 ) mychange[(tj+16)*18+ti+16] = a[(j+15)*m+i+15]; __syncthreads(); molda = mychange[(tj+1)*18+(ti+1)]; mnewa = w0*molda + w1 * (mychange[(tj+1)*18+(ti )] + mychange[(tj )*18+(ti+1)] + mychange[(tj+1)*18+(ti+2)] + mychange[(tj+2)*18+(ti+1)]) + w2 * (mychange[(tj )*18+(ti )] + mychange[(tj+2)*18+(ti )] + mychange[(tj )*18+(ti+2)] + mychange[(tj+2)*18+(ti+2)]); newa[j*m+i] = mnewa; __syncthreads(); int ii = ti+blockDim.x*tj; mychange[ii] = fabsf( mnewa - molda ); __syncthreads(); int nn = blockDim.x * blockDim.y; while( (nn>>=1) > 0 ){ if( ii < nn ) mychange[ii] = fmaxf( mychange[ii], mychange[ii+nn] ); __syncthreads(); } if( ii == 0 ) lchange[blockIdx.x + gridDim.x*blockIdx.y] = mychange[0]; } __global__ void reductionkernel( float* lchange, int n ) { __shared__ float mychange[256]; float mych = 0.0f; int ii = threadIdx.x, m; if( ii < n ) mych = lchange[ii]; m = blockDim.x; while( m <= n ){ mych = fmaxf( mych, lchange[ii+m] ); m += blockDim.x; } mychange[ii] = mych; __syncthreads(); int nn = blockDim.x; while( (nn>>=1) > 0 ){ if( ii < nn ) mychange[ii] = fmaxf(mychange[ii],mychange[ii+nn]); __syncthreads(); } if( ii == 0 ) lchange[0] = mychange[0]; } static float sumtime; void JacobiGPU( float* a, int n, int m, float w0, float w1, float w2, float tol ) { float change; int iters; size_t memsize; int bx, by, gx, gy; float *da, *dnewa, *lchange; hipEvent_t e1, e2; bx = 16; by = 16; gx = (n-2)/bx + ((n-2)%bx == 0?0:1); gy = (m-2)/by + ((m-2)%by == 0?0:1); sumtime = 0.0f; memsize = sizeof(float) * n * m; hipMalloc( &da, memsize ); hipMalloc( &dnewa, memsize ); hipMalloc( &lchange, gx * gy * sizeof(float) ); hipEventCreate( &e1 ); hipEventCreate( &e2 ); dim3 block( bx, by ); dim3 grid( gx, gy ); iters = 0; hipMemcpy( da, a, memsize, hipMemcpyHostToDevice ); hipMemcpy( dnewa, a, memsize, hipMemcpyHostToDevice ); do{ float msec; ++iters; hipEventRecord( e1 ); hipLaunchKernelGGL(( jacobikernel), dim3(grid), dim3(block) , 0, 0, da, dnewa, lchange, n, m, w0, w1, w2 ); hipLaunchKernelGGL(( reductionkernel), dim3(1), dim3(bx*by) , 0, 0, lchange, gx*gy ); hipEventRecord( e2 ); hipMemcpy( &change, lchange, sizeof(float), hipMemcpyDeviceToHost ); hipEventElapsedTime( &msec, e1, e2 ); sumtime += msec; float *ta; ta = da; da = dnewa; dnewa = ta; }while( change > tol ); printf( "JacobiGPU converged in %d iterations to residual %f\n", iters, change ); printf( "JacobiGPU used %f seconds total\n", sumtime/1000.0f ); hipMemcpy( a, dnewa, memsize, hipMemcpyDeviceToHost ); hipFree( da ); hipFree( dnewa ); hipFree( lchange ); hipEventDestroy( e1 ); hipEventDestroy( e2 ); } static void init( float* a, int n, int m ) { int i, j; memset( a, 0, sizeof(float) * n * m ); /* boundary conditions */ for( j = 0; j < n; ++j ){ a[j*m+n-1] = j; } for( i = 0; i < m; ++i ){ a[(n-1)*m+i] = i; } a[(n-1)*m+m-1] = m+n; } int main( int argc, char* argv[] ) { int n, m; float *a; struct timeval tt1, tt2; int ms; float fms; if( argc <= 1 ){ fprintf( stderr, "%s sizen [sizem]\n", argv[0] ); return 1; } n = atoi( argv[1] ); if( n <= 0 ) n = 100; m = n; if( argc > 2 ){ m = atoi( argv[2] ); if( m <= 0 ) m = 100; } printf( "Jacobi %d x %d\n", n, m ); a = (float*)malloc( sizeof(float) * n * m ); init( a, n, m ); gettimeofday( &tt1, NULL ); JacobiGPU( a, n, m, .2, .1, .1, .1 ); gettimeofday( &tt2, NULL ); ms = (tt2.tv_sec - tt1.tv_sec); ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec); fms = (float)ms / 1000000.0f; printf( "time(gpu ) = %f seconds\n", fms ); }
75d845adbd230e11c860a81d51eba0649760d989.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> __global__ void jacobikernel( float* a, float* newa, float* lchange, int n, int m, float w0, float w1, float w2 ) { int ti = threadIdx.x; int tj = threadIdx.y; int i = blockIdx.x * blockDim.x + ti + 1; int j = blockIdx.y * blockDim.y + tj + 1; __shared__ float mychange[18*18]; float mnewa, molda; mychange[tj*18+ti] = a[(j-1)*m+i-1]; if( ti < 2 ) mychange[tj*18+ti+16] = a[(j-1)*m+i+15]; if( tj < 2 ) mychange[(tj+16)*18+ti] = a[(j+15)*m+i-1]; if( tj < 2 && ti < 2 ) mychange[(tj+16)*18+ti+16] = a[(j+15)*m+i+15]; __syncthreads(); molda = mychange[(tj+1)*18+(ti+1)]; mnewa = w0*molda + w1 * (mychange[(tj+1)*18+(ti )] + mychange[(tj )*18+(ti+1)] + mychange[(tj+1)*18+(ti+2)] + mychange[(tj+2)*18+(ti+1)]) + w2 * (mychange[(tj )*18+(ti )] + mychange[(tj+2)*18+(ti )] + mychange[(tj )*18+(ti+2)] + mychange[(tj+2)*18+(ti+2)]); newa[j*m+i] = mnewa; __syncthreads(); int ii = ti+blockDim.x*tj; mychange[ii] = fabsf( mnewa - molda ); __syncthreads(); int nn = blockDim.x * blockDim.y; while( (nn>>=1) > 0 ){ if( ii < nn ) mychange[ii] = fmaxf( mychange[ii], mychange[ii+nn] ); __syncthreads(); } if( ii == 0 ) lchange[blockIdx.x + gridDim.x*blockIdx.y] = mychange[0]; } __global__ void reductionkernel( float* lchange, int n ) { __shared__ float mychange[256]; float mych = 0.0f; int ii = threadIdx.x, m; if( ii < n ) mych = lchange[ii]; m = blockDim.x; while( m <= n ){ mych = fmaxf( mych, lchange[ii+m] ); m += blockDim.x; } mychange[ii] = mych; __syncthreads(); int nn = blockDim.x; while( (nn>>=1) > 0 ){ if( ii < nn ) mychange[ii] = fmaxf(mychange[ii],mychange[ii+nn]); __syncthreads(); } if( ii == 0 ) lchange[0] = mychange[0]; } static float sumtime; void JacobiGPU( float* a, int n, int m, float w0, float w1, float w2, float tol ) { float change; int iters; size_t memsize; int bx, by, gx, gy; float *da, *dnewa, *lchange; cudaEvent_t e1, e2; bx = 16; by = 16; gx = (n-2)/bx + ((n-2)%bx == 0?0:1); gy = (m-2)/by + ((m-2)%by == 0?0:1); sumtime = 0.0f; memsize = sizeof(float) * n * m; cudaMalloc( &da, memsize ); cudaMalloc( &dnewa, memsize ); cudaMalloc( &lchange, gx * gy * sizeof(float) ); cudaEventCreate( &e1 ); cudaEventCreate( &e2 ); dim3 block( bx, by ); dim3 grid( gx, gy ); iters = 0; cudaMemcpy( da, a, memsize, cudaMemcpyHostToDevice ); cudaMemcpy( dnewa, a, memsize, cudaMemcpyHostToDevice ); do{ float msec; ++iters; cudaEventRecord( e1 ); jacobikernel<<< grid, block >>>( da, dnewa, lchange, n, m, w0, w1, w2 ); reductionkernel<<< 1, bx*by >>>( lchange, gx*gy ); cudaEventRecord( e2 ); cudaMemcpy( &change, lchange, sizeof(float), cudaMemcpyDeviceToHost ); cudaEventElapsedTime( &msec, e1, e2 ); sumtime += msec; float *ta; ta = da; da = dnewa; dnewa = ta; }while( change > tol ); printf( "JacobiGPU converged in %d iterations to residual %f\n", iters, change ); printf( "JacobiGPU used %f seconds total\n", sumtime/1000.0f ); cudaMemcpy( a, dnewa, memsize, cudaMemcpyDeviceToHost ); cudaFree( da ); cudaFree( dnewa ); cudaFree( lchange ); cudaEventDestroy( e1 ); cudaEventDestroy( e2 ); } static void init( float* a, int n, int m ) { int i, j; memset( a, 0, sizeof(float) * n * m ); /* boundary conditions */ for( j = 0; j < n; ++j ){ a[j*m+n-1] = j; } for( i = 0; i < m; ++i ){ a[(n-1)*m+i] = i; } a[(n-1)*m+m-1] = m+n; } int main( int argc, char* argv[] ) { int n, m; float *a; struct timeval tt1, tt2; int ms; float fms; if( argc <= 1 ){ fprintf( stderr, "%s sizen [sizem]\n", argv[0] ); return 1; } n = atoi( argv[1] ); if( n <= 0 ) n = 100; m = n; if( argc > 2 ){ m = atoi( argv[2] ); if( m <= 0 ) m = 100; } printf( "Jacobi %d x %d\n", n, m ); a = (float*)malloc( sizeof(float) * n * m ); init( a, n, m ); gettimeofday( &tt1, NULL ); JacobiGPU( a, n, m, .2, .1, .1, .1 ); gettimeofday( &tt2, NULL ); ms = (tt2.tv_sec - tt1.tv_sec); ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec); fms = (float)ms / 1000000.0f; printf( "time(gpu ) = %f seconds\n", fms ); }
da55336d01d9c7a7a5ab24567240ee075cf8aede.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void refill_randoms(float *dRand, int N, hiprandState_t *states) { int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; int nthreads = gridDim.x * blockDim.x; hiprandState_t *state = states + tid; for (i = tid; i < N; i += nthreads) { dRand[i] = hiprand_uniform(state); } }
da55336d01d9c7a7a5ab24567240ee075cf8aede.cu
#include "includes.h" __global__ void refill_randoms(float *dRand, int N, curandState *states) { int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; int nthreads = gridDim.x * blockDim.x; curandState *state = states + tid; for (i = tid; i < N; i += nthreads) { dRand[i] = curand_uniform(state); } }
0782b4d6f87d8c6a283ed0c5768e81780b5eb6aa.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <float.h> #include <stdint.h> #include <stdio.h> #include <limits> #include "BufferCompaction.h" #include "ExtensionFunctions.hpp" #include "GpuRtConstants.h" #include "HyperLogLogRank.h" #include "TableFunctions/TableFunctions.hpp" #if TORCH_HIP_VERSION < 10000 static_assert(false, "CUDA v10.0 or later is required."); #endif #if (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 350) static_assert(false, "CUDA Compute Capability of 3.5 or greater is required."); #endif extern "C" __device__ int64_t get_thread_index() { return threadIdx.x; } extern "C" __device__ int64_t get_block_index() { return blockIdx.x; } extern "C" __device__ int32_t pos_start_impl(const int32_t* row_index_resume) { return blockIdx.x * blockDim.x + threadIdx.x; } extern "C" __device__ int32_t group_buff_idx_impl() { return pos_start_impl(NULL); } extern "C" __device__ int32_t pos_step_impl() { return blockDim.x * gridDim.x; } extern "C" __device__ int8_t thread_warp_idx(const int8_t warp_sz) { return threadIdx.x % warp_sz; } extern "C" __device__ const int64_t* init_shared_mem_nop( const int64_t* groups_buffer, const int32_t groups_buffer_size) { return groups_buffer; } extern "C" __device__ void write_back_nop(int64_t* dest, int64_t* src, const int32_t sz) { } /* * Just declares and returns a dynamic shared memory pointer. Total size should be * properly set during kernel launch */ extern "C" __device__ int64_t* declare_dynamic_shared_memory() { extern __shared__ int64_t shared_mem_buffer[]; return shared_mem_buffer; } /** * Initializes the shared memory buffer for perfect hash group by. * In this function, we simply copy the global group by buffer (already initialized on the * host and transferred) to all shared memory group by buffers. */ extern "C" __device__ const int64_t* init_shared_mem(const int64_t* global_groups_buffer, const int32_t groups_buffer_size) { // dynamic shared memory declaration extern __shared__ int64_t shared_groups_buffer[]; // it is assumed that buffer size is aligned with 64-bit units // so it is safe to assign 64-bit to each thread const int32_t buffer_units = groups_buffer_size >> 3; for (int32_t pos = threadIdx.x; pos < buffer_units; pos += blockDim.x) { shared_groups_buffer[pos] = global_groups_buffer[pos]; } __syncthreads(); return shared_groups_buffer; } #define init_group_by_buffer_gpu_impl init_group_by_buffer_gpu #include "GpuInitGroups.cu" #undef init_group_by_buffer_gpu_impl // Dynamic watchdog: monitoring up to 64 SMs. E.g. GP100 config may have 60: // 6 Graphics Processing Clusters (GPCs) * 10 Streaming Multiprocessors // TODO(Saman): move these into a kernel parameter, allocated and initialized through CUDA __device__ int64_t dw_sm_cycle_start[128]; // Set from host before launching the kernel // TODO(Saman): make this cycle budget something constant in codegen level __device__ int64_t dw_cycle_budget = 0; // Set from host before launching the kernel __device__ int32_t dw_abort = 0; // TBD: set from host (async) __device__ int32_t runtime_interrupt_flag = 0; __inline__ __device__ uint32_t get_smid(void) { uint32_t ret; asm("mov.u32 %0, %%smid;" : "=r"(ret)); return ret; } /* * The main objective of this function is to return true, if any of the following two * scenarios happen: * 1. receives a host request for aborting the kernel execution * 2. kernel execution takes longer clock cycles than it was initially allowed * The assumption is that all (or none) threads within a block return true for the * watchdog, and the first thread within each block compares the recorded clock cycles for * its occupying SM with the allowed budget. It also assumes that all threads entering * this function are active (no critical edge exposure) * NOTE: dw_cycle_budget, dw_abort, and dw_sm_cycle_start[] are all variables in global * memory scope. */ extern "C" __device__ bool dynamic_watchdog() { // check for dynamic watchdog, if triggered all threads return true if (dw_cycle_budget == 0LL) { return false; // Uninitialized watchdog can't check time } if (dw_abort == 1) { return true; // Received host request to abort } uint32_t smid = get_smid(); if (smid >= 128) { return false; } __shared__ volatile int64_t dw_block_cycle_start; // Thread block shared cycle start __shared__ volatile bool dw_should_terminate; // all threads within a block should return together if // watchdog criteria is met // thread 0 either initializes or read the initial clock cycle, the result is stored // into shared memory. Since all threads wihtin a block shares the same SM, there's no // point in using more threads here. if (threadIdx.x == 0) { dw_block_cycle_start = 0LL; int64_t cycle_count = static_cast<int64_t>(clock64()); // Make sure the block hasn't switched SMs if (smid == get_smid()) { dw_block_cycle_start = static_cast<int64_t>( atomicCAS(reinterpret_cast<unsigned long long*>(&dw_sm_cycle_start[smid]), 0ULL, static_cast<unsigned long long>(cycle_count))); } int64_t cycles = cycle_count - dw_block_cycle_start; if ((smid == get_smid()) && (dw_block_cycle_start > 0LL) && (cycles > dw_cycle_budget)) { // Check if we're out of time on this particular SM dw_should_terminate = true; } else { dw_should_terminate = false; } } __syncthreads(); return dw_should_terminate; } extern "C" __device__ bool check_interrupt() { return (runtime_interrupt_flag == 1) ? true : false; } template <typename T = unsigned long long> inline __device__ T get_empty_key() { return EMPTY_KEY_64; } template <> inline __device__ unsigned int get_empty_key() { return EMPTY_KEY_32; } template <typename T> inline __device__ int64_t* get_matching_group_value(int64_t* groups_buffer, const uint32_t h, const T* key, const uint32_t key_count, const uint32_t row_size_quad) { const T empty_key = get_empty_key<T>(); uint32_t off = h * row_size_quad; auto row_ptr = reinterpret_cast<T*>(groups_buffer + off); { const T old = atomicCAS(row_ptr, empty_key, *key); if (empty_key == old && key_count > 1) { for (size_t i = 1; i <= key_count - 1; ++i) { atomicExch(row_ptr + i, key[i]); } } } if (key_count > 1) { while (atomicAdd(row_ptr + key_count - 1, 0) == empty_key) { // spin until the winning thread has finished writing the entire key and the init // value } } bool match = true; for (uint32_t i = 0; i < key_count; ++i) { if (row_ptr[i] != key[i]) { match = false; break; } } if (match) { auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count); return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8)); } return NULL; } extern "C" __device__ int64_t* get_matching_group_value(int64_t* groups_buffer, const uint32_t h, const int64_t* key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t* init_vals) { switch (key_width) { case 4: return get_matching_group_value(groups_buffer, h, reinterpret_cast<const unsigned int*>(key), key_count, row_size_quad); case 8: return get_matching_group_value(groups_buffer, h, reinterpret_cast<const unsigned long long*>(key), key_count, row_size_quad); default: return NULL; } } template <typename T> __device__ int32_t get_matching_group_value_columnar_slot(int64_t* groups_buffer, const uint32_t entry_count, const uint32_t h, const T* key, const uint32_t key_count) { const T empty_key = get_empty_key<T>(); const uint64_t old = atomicCAS(reinterpret_cast<T*>(groups_buffer + h), empty_key, *key); // the winner thread proceeds with writing the rest fo the keys if (old == empty_key) { uint32_t offset = h + entry_count; for (size_t i = 1; i < key_count; ++i) { *reinterpret_cast<T*>(groups_buffer + offset) = key[i]; offset += entry_count; } } __threadfence(); // for all threads except the winning thread, memory content of the keys // related to the hash offset are checked again. In case of a complete match // the hash offset is returned, otherwise -1 is returned if (old != empty_key) { uint32_t offset = h; for (uint32_t i = 0; i < key_count; ++i) { if (*reinterpret_cast<T*>(groups_buffer + offset) != key[i]) { return -1; } offset += entry_count; } } return h; } extern "C" __device__ int32_t get_matching_group_value_columnar_slot(int64_t* groups_buffer, const uint32_t entry_count, const uint32_t h, const int64_t* key, const uint32_t key_count, const uint32_t key_width) { switch (key_width) { case 4: return get_matching_group_value_columnar_slot( groups_buffer, entry_count, h, reinterpret_cast<const unsigned int*>(key), key_count); case 8: return get_matching_group_value_columnar_slot( groups_buffer, entry_count, h, reinterpret_cast<const unsigned long long*>(key), key_count); default: return -1; } } extern "C" __device__ int64_t* get_matching_group_value_columnar( int64_t* groups_buffer, const uint32_t h, const int64_t* key, const uint32_t key_qw_count, const size_t entry_count) { uint32_t off = h; { const uint64_t old = atomicCAS( reinterpret_cast<unsigned long long*>(groups_buffer + off), EMPTY_KEY_64, *key); if (EMPTY_KEY_64 == old) { for (size_t i = 0; i < key_qw_count; ++i) { groups_buffer[off] = key[i]; off += entry_count; } return &groups_buffer[off]; } } __syncthreads(); off = h; for (size_t i = 0; i < key_qw_count; ++i) { if (groups_buffer[off] != key[i]) { return NULL; } off += entry_count; } return &groups_buffer[off]; } #include "GroupByRuntime.cpp" #include "JoinHashTableQueryRuntime.cpp" #include "MurmurHash.cpp" #include "TopKRuntime.cpp" __device__ int64_t atomicMax64(int64_t* address, int64_t val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, max((long long)val, (long long)assumed)); } while (assumed != old); return old; } __device__ int64_t atomicMin64(int64_t* address, int64_t val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, min((long long)val, (long long)assumed)); } while (assumed != old); return old; } #if (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600) __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif __device__ double atomicMax(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(max(val, __longlong_as_double(assumed)))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } __device__ float atomicMax(float* address, float val) { int* address_as_int = (int*)address; int old = *address_as_int, assumed; do { assumed = old; old = atomicCAS( address_as_int, assumed, __float_as_int(max(val, __int_as_float(assumed)))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __int_as_float(old); } __device__ double atomicMin(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(min(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } __device__ double atomicMin(float* address, float val) { int* address_as_ull = (int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS( address_as_ull, assumed, __float_as_int(min(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } extern "C" __device__ uint64_t agg_count_shared(uint64_t* agg, const int64_t val) { return static_cast<uint64_t>(atomicAdd(reinterpret_cast<uint32_t*>(agg), 1UL)); } extern "C" __device__ uint32_t agg_count_int32_shared(uint32_t* agg, const int32_t val) { return atomicAdd(agg, 1UL); } extern "C" __device__ uint64_t agg_count_double_shared(uint64_t* agg, const double val) { return agg_count_shared(agg, val); } extern "C" __device__ uint32_t agg_count_float_shared(uint32_t* agg, const float val) { return agg_count_int32_shared(agg, val); } extern "C" __device__ int64_t agg_sum_shared(int64_t* agg, const int64_t val) { return atomicAdd(reinterpret_cast<unsigned long long*>(agg), val); } extern "C" __device__ int32_t agg_sum_int32_shared(int32_t* agg, const int32_t val) { return atomicAdd(agg, val); } extern "C" __device__ void agg_sum_float_shared(int32_t* agg, const float val) { atomicAdd(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_sum_double_shared(int64_t* agg, const double val) { atomicAdd(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_max_shared(int64_t* agg, const int64_t val) { atomicMax64(agg, val); } extern "C" __device__ void agg_max_int32_shared(int32_t* agg, const int32_t val) { atomicMax(agg, val); } extern "C" __device__ void agg_max_double_shared(int64_t* agg, const double val) { atomicMax(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_max_float_shared(int32_t* agg, const float val) { atomicMax(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_min_shared(int64_t* agg, const int64_t val) { atomicMin64(agg, val); } extern "C" __device__ void agg_min_int32_shared(int32_t* agg, const int32_t val) { atomicMin(agg, val); } #if TORCH_HIP_VERSION > 10000 && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __device__ void atomicMax16(int16_t* agg, const int16_t val) { unsigned short int* address_as_us = reinterpret_cast<unsigned short int*>(agg); unsigned short int old = *address_as_us, assumed; do { assumed = old; old = atomicCAS(address_as_us, assumed, static_cast<unsigned short>(max(static_cast<short int>(val), static_cast<short int>(assumed)))); } while (assumed != old); } #else __device__ void atomicMax16(int16_t* agg, const int16_t val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; swap_value = (reinterpret_cast<size_t>(agg) & 0x2) ? static_cast<unsigned int>(max(static_cast<int16_t>(old_value >> 16), val)) << 16 | (old_value & 0xFFFF) : (old_value & 0xFFFF0000) | static_cast<unsigned int>( max(static_cast<int16_t>(old_value & 0xFFFF), val)); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (old_value != compare_value); } #endif __device__ void atomicMax8(int8_t* agg, const int8_t val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); // __byte_perm(unsigned int A, unsigned int B, unsigned int s): // if s == 0x3214 returns {A[31..24], A[23..16], A[15..8], B[7..0]} // if s == 0x3240 returns {A[31..24], A[23..16], B[7...0], A[7..0]} // if s == 0x3410 returns {A[31..24], B[7....0], A[15..8], A[7..0]} // if s == 0x4210 returns {B[7....0], A[23..16], A[15..8], A[7..0]} constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; auto max_value = static_cast<unsigned int>( // compare val with its corresponding bits in the compare_value max(val, static_cast<int8_t>(__byte_perm( compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440)))); swap_value = __byte_perm( compare_value, max_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (compare_value != old_value); } #if TORCH_HIP_VERSION > 10000 && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __device__ void atomicMin16(int16_t* agg, const int16_t val) { unsigned short int* address_as_us = reinterpret_cast<unsigned short int*>(agg); unsigned short int old = *address_as_us, assumed; do { assumed = old; old = atomicCAS(address_as_us, assumed, static_cast<unsigned short>(min(static_cast<short int>(val), static_cast<short int>(assumed)))); } while (assumed != old); } #else __device__ void atomicMin16(int16_t* agg, const int16_t val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; swap_value = (reinterpret_cast<size_t>(agg) & 0x2) ? static_cast<unsigned int>(min(static_cast<int16_t>(old_value >> 16), val)) << 16 | (old_value & 0xFFFF) : (old_value & 0xFFFF0000) | static_cast<unsigned int>( min(static_cast<int16_t>(old_value & 0xFFFF), val)); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (old_value != compare_value); } #endif __device__ void atomicMin16SkipVal(int16_t* agg, const int16_t val, const int16_t skip_val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; int16_t selected_old_val = (reinterpret_cast<size_t>(agg) & 0x2) ? static_cast<int16_t>(old_value >> 16) : static_cast<int16_t>(old_value & 0xFFFF); swap_value = (reinterpret_cast<size_t>(agg) & 0x2) ? static_cast<unsigned int>( selected_old_val == skip_val ? val : min(selected_old_val, val)) << 16 | (old_value & 0xFFFF) : (old_value & 0xFFFF0000) | static_cast<unsigned int>( selected_old_val == skip_val ? val : min(selected_old_val, val)); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (old_value != compare_value); } __device__ void atomicMin8(int8_t* agg, const int8_t val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; auto min_value = static_cast<unsigned int>( min(val, static_cast<int8_t>(__byte_perm( compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440)))); swap_value = __byte_perm( compare_value, min_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (compare_value != old_value); } __device__ void atomicMin8SkipVal(int8_t* agg, const int8_t val, const int8_t skip_val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; int8_t selected_old_val = static_cast<int8_t>( __byte_perm(compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440)); auto min_value = static_cast<unsigned int>( selected_old_val == skip_val ? val : min(val, selected_old_val)); swap_value = __byte_perm( compare_value, min_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (compare_value != old_value); } extern "C" __device__ void agg_max_int16_shared(int16_t* agg, const int16_t val) { return atomicMax16(agg, val); } extern "C" __device__ void agg_max_int8_shared(int8_t* agg, const int8_t val) { return atomicMax8(agg, val); } extern "C" __device__ void agg_min_int16_shared(int16_t* agg, const int16_t val) { return atomicMin16(agg, val); } extern "C" __device__ void agg_min_int8_shared(int8_t* agg, const int8_t val) { return atomicMin8(agg, val); } extern "C" __device__ void agg_min_double_shared(int64_t* agg, const double val) { atomicMin(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_min_float_shared(int32_t* agg, const float val) { atomicMin(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_id_shared(int64_t* agg, const int64_t val) { *agg = val; } extern "C" __device__ int32_t checked_single_agg_id_shared(int64_t* agg, const int64_t val, const int64_t null_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(agg); unsigned long long int old = *address_as_ull, assumed; if (val == null_val) { return 0; } do { if (static_cast<int64_t>(old) != null_val) { if (static_cast<int64_t>(old) != val) { // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES return 15; } else { break; } } assumed = old; old = atomicCAS(address_as_ull, assumed, val); } while (assumed != old); return 0; } #define DEF_AGG_ID_INT_SHARED(n) \ extern "C" __device__ void agg_id_int##n##_shared(int##n##_t* agg, \ const int##n##_t val) { \ *agg = val; \ } DEF_AGG_ID_INT_SHARED(32) DEF_AGG_ID_INT_SHARED(16) DEF_AGG_ID_INT_SHARED(8) #undef DEF_AGG_ID_INT_SHARED extern "C" __device__ void agg_id_double_shared(int64_t* agg, const double val) { *agg = *(reinterpret_cast<const int64_t*>(&val)); } extern "C" __device__ int32_t checked_single_agg_id_double_shared(int64_t* agg, const double val, const double null_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(agg); unsigned long long int old = *address_as_ull, assumed; if (val == null_val) { return 0; } do { if (static_cast<int64_t>(old) != __double_as_longlong(null_val)) { if (static_cast<int64_t>(old) != __double_as_longlong(val)) { // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES return 15; } else { break; } } assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val)); } while (assumed != old); return 0; } extern "C" __device__ void agg_id_double_shared_slow(int64_t* agg, const double* val) { *agg = *(reinterpret_cast<const int64_t*>(val)); } extern "C" __device__ int32_t checked_single_agg_id_double_shared_slow(int64_t* agg, const double* valp, const double null_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(agg); unsigned long long int old = *address_as_ull, assumed; double val = *valp; if (val == null_val) { return 0; } do { if (static_cast<int64_t>(old) != __double_as_longlong(null_val)) { if (static_cast<int64_t>(old) != __double_as_longlong(val)) { // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES return 15; } else { break; } } assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val)); } while (assumed != old); return 0; } extern "C" __device__ void agg_id_float_shared(int32_t* agg, const float val) { *agg = __float_as_int(val); } extern "C" __device__ int32_t checked_single_agg_id_float_shared(int32_t* agg, const float val, const float null_val) { int* address_as_ull = reinterpret_cast<int*>(agg); int old = *address_as_ull, assumed; if (val == null_val) { return 0; } do { if (old != __float_as_int(null_val)) { if (old != __float_as_int(val)) { // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES return 15; } else { break; } } assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val)); } while (assumed != old); return 0; } #define DEF_SKIP_AGG(base_agg_func) \ extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \ ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \ if (val != skip_val) { \ return base_agg_func##_shared(agg, val); \ } \ return 0; \ } #define DATA_T int64_t #define ADDR_T uint64_t DEF_SKIP_AGG(agg_count) #undef DATA_T #undef ADDR_T #define DATA_T int32_t #define ADDR_T uint32_t DEF_SKIP_AGG(agg_count_int32) #undef DATA_T #undef ADDR_T // Initial value for nullable column is INT32_MIN extern "C" __device__ void agg_max_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { agg_max_int32_shared(agg, val); } } extern "C" __device__ void agg_max_int16_skip_val_shared(int16_t* agg, const int16_t val, const int16_t skip_val) { if (val != skip_val) { agg_max_int16_shared(agg, val); } } extern "C" __device__ void agg_min_int16_skip_val_shared(int16_t* agg, const int16_t val, const int16_t skip_val) { if (val != skip_val) { atomicMin16SkipVal(agg, val, skip_val); } } extern "C" __device__ void agg_max_int8_skip_val_shared(int8_t* agg, const int8_t val, const int8_t skip_val) { if (val != skip_val) { agg_max_int8_shared(agg, val); } } extern "C" __device__ void agg_min_int8_skip_val_shared(int8_t* agg, const int8_t val, const int8_t skip_val) { if (val != skip_val) { atomicMin8SkipVal(agg, val, skip_val); } } __device__ int32_t atomicMin32SkipVal(int32_t* address, int32_t val, const int32_t skip_val) { int32_t old = atomicExch(address, INT_MAX); return atomicMin(address, old == skip_val ? val : min(old, val)); } extern "C" __device__ void agg_min_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { atomicMin32SkipVal(agg, val, skip_val); } } __device__ int32_t atomicSum32SkipVal(int32_t* address, const int32_t val, const int32_t skip_val) { unsigned int* address_as_int = (unsigned int*)address; int32_t old = atomicExch(address_as_int, 0); int32_t old2 = atomicAdd(address_as_int, old == skip_val ? val : (val + old)); return old == skip_val ? old2 : (old2 + old); } extern "C" __device__ int32_t agg_sum_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { const int32_t old = atomicSum32SkipVal(agg, val, skip_val); return old; } return 0; } __device__ int64_t atomicSum64SkipVal(int64_t* address, const int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; int64_t old = atomicExch(address_as_ull, 0); int64_t old2 = atomicAdd(address_as_ull, old == skip_val ? val : (val + old)); return old == skip_val ? old2 : (old2 + old); } extern "C" __device__ int64_t agg_sum_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { return atomicSum64SkipVal(agg, val, skip_val); } return 0; } __device__ int64_t atomicMin64SkipVal(int64_t* address, int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val ? val : min((long long)val, (long long)assumed)); } while (assumed != old); return old; } extern "C" __device__ void agg_min_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { atomicMin64SkipVal(agg, val, skip_val); } } __device__ int64_t atomicMax64SkipVal(int64_t* address, int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val ? val : max((long long)val, (long long)assumed)); } while (assumed != old); return old; } extern "C" __device__ void agg_max_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { atomicMax64SkipVal(agg, val, skip_val); } } #undef DEF_SKIP_AGG #define DEF_SKIP_AGG(base_agg_func) \ extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \ ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \ if (val != skip_val) { \ return base_agg_func##_shared(agg, val); \ } \ return *agg; \ } #define DATA_T double #define ADDR_T uint64_t DEF_SKIP_AGG(agg_count_double) #undef ADDR_T #undef DATA_T #define DATA_T float #define ADDR_T uint32_t DEF_SKIP_AGG(agg_count_float) #undef ADDR_T #undef DATA_T // Initial value for nullable column is FLOAT_MIN extern "C" __device__ void agg_max_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { float old = atomicExch(reinterpret_cast<float*>(agg), -FLT_MAX); atomicMax(reinterpret_cast<float*>(agg), __float_as_int(old) == __float_as_int(skip_val) ? val : fmaxf(old, val)); } } __device__ float atomicMinFltSkipVal(int32_t* address, float val, const float skip_val) { float old = atomicExch(reinterpret_cast<float*>(address), FLT_MAX); return atomicMin( reinterpret_cast<float*>(address), __float_as_int(old) == __float_as_int(skip_val) ? val : fminf(old, val)); } extern "C" __device__ void agg_min_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { atomicMinFltSkipVal(agg, val, skip_val); } } __device__ void atomicSumFltSkipVal(float* address, const float val, const float skip_val) { float old = atomicExch(address, 0.f); atomicAdd(address, __float_as_int(old) == __float_as_int(skip_val) ? val : (val + old)); } extern "C" __device__ void agg_sum_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { atomicSumFltSkipVal(reinterpret_cast<float*>(agg), val, skip_val); } } __device__ void atomicSumDblSkipVal(double* address, const double val, const double skip_val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; double old = __longlong_as_double(atomicExch(address_as_ull, __double_as_longlong(0.))); atomicAdd( address, __double_as_longlong(old) == __double_as_longlong(skip_val) ? val : (val + old)); } extern "C" __device__ void agg_sum_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (__double_as_longlong(val) != __double_as_longlong(skip_val)) { atomicSumDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val); } } __device__ double atomicMinDblSkipVal(double* address, double val, const double skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull; unsigned long long int skip_val_as_ull = *reinterpret_cast<const unsigned long long*>(&skip_val); unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val_as_ull ? *reinterpret_cast<unsigned long long*>(&val) : __double_as_longlong(min(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } extern "C" __device__ void agg_min_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (val != skip_val) { atomicMinDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val); } } extern "C" __device__ void agg_max_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (__double_as_longlong(val) != __double_as_longlong(skip_val)) { double old = __longlong_as_double(atomicExch( reinterpret_cast<unsigned long long int*>(agg), __double_as_longlong(-DBL_MAX))); atomicMax(reinterpret_cast<double*>(agg), __double_as_longlong(old) == __double_as_longlong(skip_val) ? val : fmax(old, val)); } } #undef DEF_SKIP_AGG extern "C" __device__ bool slotEmptyKeyCAS(int64_t* slot, int64_t new_val, int64_t init_val) { auto slot_address = reinterpret_cast<unsigned long long int*>(slot); const auto empty_key = static_cast<unsigned long long int*>(static_cast<void*>(&init_val)); const auto new_val_cast = static_cast<unsigned long long int*>(static_cast<void*>(&new_val)); const auto old_val = atomicCAS(slot_address, *empty_key, *new_val_cast); if (old_val == *empty_key) { return true; } else { return false; } } extern "C" __device__ bool slotEmptyKeyCAS_int32(int32_t* slot, int32_t new_val, int32_t init_val) { unsigned int* slot_address = reinterpret_cast<unsigned int*>(slot); unsigned int compare_value = static_cast<unsigned int>(init_val); unsigned int swap_value = static_cast<unsigned int>(new_val); const unsigned int old_value = atomicCAS(slot_address, compare_value, swap_value); return old_value == compare_value; } extern "C" __device__ bool slotEmptyKeyCAS_int16(int16_t* slot, int16_t new_val, int16_t init_val) { unsigned int* base_slot_address = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(slot) & ~0x3); unsigned int old_value = *base_slot_address; unsigned int swap_value, compare_value; do { compare_value = old_value; // exit criteria: if init_val does not exist in the slot (some other thread has // succeeded) if (static_cast<unsigned int>(init_val) != __byte_perm( compare_value, 0, (reinterpret_cast<size_t>(slot) & 0x2 ? 0x3244 : 0x4410))) { return false; } swap_value = __byte_perm(compare_value, static_cast<unsigned int>(new_val), (reinterpret_cast<size_t>(slot) & 0x2) ? 0x5410 : 0x3254); old_value = atomicCAS(base_slot_address, compare_value, swap_value); } while (compare_value != old_value); return true; } extern "C" __device__ bool slotEmptyKeyCAS_int8(int8_t* slot, int8_t new_val, int8_t init_val) { // properly align the slot address: unsigned int* base_slot_address = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(slot) & ~0x3); constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int old_value = *base_slot_address; unsigned int swap_value, compare_value; do { compare_value = old_value; // exit criteria: if init_val does not exist in the slot (some other thread has // succeeded) if (static_cast<unsigned int>(init_val) != __byte_perm(compare_value, 0, (reinterpret_cast<size_t>(slot) & 0x3) | 0x4440)) { return false; } swap_value = __byte_perm(compare_value, static_cast<unsigned int>(new_val), byte_permutations[reinterpret_cast<size_t>(slot) & 0x3]); old_value = atomicCAS(base_slot_address, compare_value, swap_value); } while (compare_value != old_value); return true; } #include "../Utils/ChunkIter.cpp" #include "DateTruncate.cpp" #include "ExtractFromTime.cpp" #define EXECUTE_INCLUDE #include "ArrayOps.cpp" #include "DateAdd.cpp" #include "StringFunctions.cpp" #undef EXECUTE_INCLUDE #include "../Utils/Regexp.cpp" #include "../Utils/StringLike.cpp" extern "C" __device__ uint64_t string_decode(int8_t* chunk_iter_, int64_t pos) { // TODO(alex): de-dup, the x64 version is basically identical ChunkIter* chunk_iter = reinterpret_cast<ChunkIter*>(chunk_iter_); VarlenDatum vd; bool is_end; ChunkIter_get_nth(chunk_iter, pos, false, &vd, &is_end); return vd.is_null ? 0 : (reinterpret_cast<uint64_t>(vd.pointer) & 0xffffffffffff) | (static_cast<uint64_t>(vd.length) << 48); } extern "C" __device__ void linear_probabilistic_count(uint8_t* bitmap, const uint32_t bitmap_bytes, const uint8_t* key_bytes, const uint32_t key_len) { const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8); const uint32_t word_idx = bit_pos / 32; const uint32_t bit_idx = bit_pos % 32; atomicOr(((uint32_t*)bitmap) + word_idx, 1 << bit_idx); } extern "C" __device__ void agg_count_distinct_bitmap_gpu(int64_t* agg, const int64_t val, const int64_t min_val, const int64_t base_dev_addr, const int64_t base_host_addr, const uint64_t sub_bitmap_count, const uint64_t bitmap_bytes) { const uint64_t bitmap_idx = val - min_val; const uint32_t byte_idx = bitmap_idx >> 3; const uint32_t word_idx = byte_idx >> 2; const uint32_t byte_word_idx = byte_idx & 3; const int64_t host_addr = *agg; uint32_t* bitmap = (uint32_t*)(base_dev_addr + host_addr - base_host_addr + (threadIdx.x & (sub_bitmap_count - 1)) * bitmap_bytes); switch (byte_word_idx) { case 0: atomicOr(&bitmap[word_idx], 1 << (bitmap_idx & 7)); break; case 1: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 8)); break; case 2: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 16)); break; case 3: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 24)); break; default: break; } } extern "C" __device__ void agg_count_distinct_bitmap_skip_val_gpu( int64_t* agg, const int64_t val, const int64_t min_val, const int64_t skip_val, const int64_t base_dev_addr, const int64_t base_host_addr, const uint64_t sub_bitmap_count, const uint64_t bitmap_bytes) { if (val != skip_val) { agg_count_distinct_bitmap_gpu( agg, val, min_val, base_dev_addr, base_host_addr, sub_bitmap_count, bitmap_bytes); } } extern "C" __device__ void agg_approximate_count_distinct_gpu( int64_t* agg, const int64_t key, const uint32_t b, const int64_t base_dev_addr, const int64_t base_host_addr) { const uint64_t hash = MurmurHash64A(&key, sizeof(key), 0); const uint32_t index = hash >> (64 - b); const int32_t rank = get_rank(hash << b, 64 - b); const int64_t host_addr = *agg; int32_t* M = (int32_t*)(base_dev_addr + host_addr - base_host_addr); atomicMax(&M[index], rank); } extern "C" __device__ void force_sync() { __threadfence_block(); } extern "C" __device__ void sync_warp() { __syncwarp(); } /** * Protected warp synchornization to make sure all (or none) threads within a warp go * through a synchronization barrier. thread_pos: the current thread position to be used * for a memory access row_count: maximum number of rows to be processed The function * performs warp sync iff all 32 threads within that warp will process valid data NOTE: * it currently assumes that warp size is 32. */ extern "C" __device__ void sync_warp_protected(int64_t thread_pos, int64_t row_count) { // only syncing if NOT within the same warp as those threads experiencing the critical // edge if ((((row_count - 1) | 0x1F) - thread_pos) >= 32) { __syncwarp(); } } extern "C" __device__ void sync_threadblock() { __syncthreads(); } /* * Currently, we just use this function for handling non-grouped aggregates * with COUNT queries (with GPU shared memory used). Later, we should generate code for * this depending on the type of aggregate functions. * TODO: we should use one contiguous global memory buffer, rather than current default * behaviour of multiple buffers, each for one aggregate. Once that's resolved, we can * do much cleaner than this function */ extern "C" __device__ void write_back_non_grouped_agg(int64_t* input_buffer, int64_t* output_buffer, const int32_t agg_idx) { if (threadIdx.x == agg_idx) { agg_sum_shared(output_buffer, input_buffer[agg_idx]); } }
0782b4d6f87d8c6a283ed0c5768e81780b5eb6aa.cu
#include <cuda.h> #include <float.h> #include <stdint.h> #include <stdio.h> #include <limits> #include "BufferCompaction.h" #include "ExtensionFunctions.hpp" #include "GpuRtConstants.h" #include "HyperLogLogRank.h" #include "TableFunctions/TableFunctions.hpp" #if CUDA_VERSION < 10000 static_assert(false, "CUDA v10.0 or later is required."); #endif #if (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 350) static_assert(false, "CUDA Compute Capability of 3.5 or greater is required."); #endif extern "C" __device__ int64_t get_thread_index() { return threadIdx.x; } extern "C" __device__ int64_t get_block_index() { return blockIdx.x; } extern "C" __device__ int32_t pos_start_impl(const int32_t* row_index_resume) { return blockIdx.x * blockDim.x + threadIdx.x; } extern "C" __device__ int32_t group_buff_idx_impl() { return pos_start_impl(NULL); } extern "C" __device__ int32_t pos_step_impl() { return blockDim.x * gridDim.x; } extern "C" __device__ int8_t thread_warp_idx(const int8_t warp_sz) { return threadIdx.x % warp_sz; } extern "C" __device__ const int64_t* init_shared_mem_nop( const int64_t* groups_buffer, const int32_t groups_buffer_size) { return groups_buffer; } extern "C" __device__ void write_back_nop(int64_t* dest, int64_t* src, const int32_t sz) { } /* * Just declares and returns a dynamic shared memory pointer. Total size should be * properly set during kernel launch */ extern "C" __device__ int64_t* declare_dynamic_shared_memory() { extern __shared__ int64_t shared_mem_buffer[]; return shared_mem_buffer; } /** * Initializes the shared memory buffer for perfect hash group by. * In this function, we simply copy the global group by buffer (already initialized on the * host and transferred) to all shared memory group by buffers. */ extern "C" __device__ const int64_t* init_shared_mem(const int64_t* global_groups_buffer, const int32_t groups_buffer_size) { // dynamic shared memory declaration extern __shared__ int64_t shared_groups_buffer[]; // it is assumed that buffer size is aligned with 64-bit units // so it is safe to assign 64-bit to each thread const int32_t buffer_units = groups_buffer_size >> 3; for (int32_t pos = threadIdx.x; pos < buffer_units; pos += blockDim.x) { shared_groups_buffer[pos] = global_groups_buffer[pos]; } __syncthreads(); return shared_groups_buffer; } #define init_group_by_buffer_gpu_impl init_group_by_buffer_gpu #include "GpuInitGroups.cu" #undef init_group_by_buffer_gpu_impl // Dynamic watchdog: monitoring up to 64 SMs. E.g. GP100 config may have 60: // 6 Graphics Processing Clusters (GPCs) * 10 Streaming Multiprocessors // TODO(Saman): move these into a kernel parameter, allocated and initialized through CUDA __device__ int64_t dw_sm_cycle_start[128]; // Set from host before launching the kernel // TODO(Saman): make this cycle budget something constant in codegen level __device__ int64_t dw_cycle_budget = 0; // Set from host before launching the kernel __device__ int32_t dw_abort = 0; // TBD: set from host (async) __device__ int32_t runtime_interrupt_flag = 0; __inline__ __device__ uint32_t get_smid(void) { uint32_t ret; asm("mov.u32 %0, %%smid;" : "=r"(ret)); return ret; } /* * The main objective of this function is to return true, if any of the following two * scenarios happen: * 1. receives a host request for aborting the kernel execution * 2. kernel execution takes longer clock cycles than it was initially allowed * The assumption is that all (or none) threads within a block return true for the * watchdog, and the first thread within each block compares the recorded clock cycles for * its occupying SM with the allowed budget. It also assumes that all threads entering * this function are active (no critical edge exposure) * NOTE: dw_cycle_budget, dw_abort, and dw_sm_cycle_start[] are all variables in global * memory scope. */ extern "C" __device__ bool dynamic_watchdog() { // check for dynamic watchdog, if triggered all threads return true if (dw_cycle_budget == 0LL) { return false; // Uninitialized watchdog can't check time } if (dw_abort == 1) { return true; // Received host request to abort } uint32_t smid = get_smid(); if (smid >= 128) { return false; } __shared__ volatile int64_t dw_block_cycle_start; // Thread block shared cycle start __shared__ volatile bool dw_should_terminate; // all threads within a block should return together if // watchdog criteria is met // thread 0 either initializes or read the initial clock cycle, the result is stored // into shared memory. Since all threads wihtin a block shares the same SM, there's no // point in using more threads here. if (threadIdx.x == 0) { dw_block_cycle_start = 0LL; int64_t cycle_count = static_cast<int64_t>(clock64()); // Make sure the block hasn't switched SMs if (smid == get_smid()) { dw_block_cycle_start = static_cast<int64_t>( atomicCAS(reinterpret_cast<unsigned long long*>(&dw_sm_cycle_start[smid]), 0ULL, static_cast<unsigned long long>(cycle_count))); } int64_t cycles = cycle_count - dw_block_cycle_start; if ((smid == get_smid()) && (dw_block_cycle_start > 0LL) && (cycles > dw_cycle_budget)) { // Check if we're out of time on this particular SM dw_should_terminate = true; } else { dw_should_terminate = false; } } __syncthreads(); return dw_should_terminate; } extern "C" __device__ bool check_interrupt() { return (runtime_interrupt_flag == 1) ? true : false; } template <typename T = unsigned long long> inline __device__ T get_empty_key() { return EMPTY_KEY_64; } template <> inline __device__ unsigned int get_empty_key() { return EMPTY_KEY_32; } template <typename T> inline __device__ int64_t* get_matching_group_value(int64_t* groups_buffer, const uint32_t h, const T* key, const uint32_t key_count, const uint32_t row_size_quad) { const T empty_key = get_empty_key<T>(); uint32_t off = h * row_size_quad; auto row_ptr = reinterpret_cast<T*>(groups_buffer + off); { const T old = atomicCAS(row_ptr, empty_key, *key); if (empty_key == old && key_count > 1) { for (size_t i = 1; i <= key_count - 1; ++i) { atomicExch(row_ptr + i, key[i]); } } } if (key_count > 1) { while (atomicAdd(row_ptr + key_count - 1, 0) == empty_key) { // spin until the winning thread has finished writing the entire key and the init // value } } bool match = true; for (uint32_t i = 0; i < key_count; ++i) { if (row_ptr[i] != key[i]) { match = false; break; } } if (match) { auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count); return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8)); } return NULL; } extern "C" __device__ int64_t* get_matching_group_value(int64_t* groups_buffer, const uint32_t h, const int64_t* key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t* init_vals) { switch (key_width) { case 4: return get_matching_group_value(groups_buffer, h, reinterpret_cast<const unsigned int*>(key), key_count, row_size_quad); case 8: return get_matching_group_value(groups_buffer, h, reinterpret_cast<const unsigned long long*>(key), key_count, row_size_quad); default: return NULL; } } template <typename T> __device__ int32_t get_matching_group_value_columnar_slot(int64_t* groups_buffer, const uint32_t entry_count, const uint32_t h, const T* key, const uint32_t key_count) { const T empty_key = get_empty_key<T>(); const uint64_t old = atomicCAS(reinterpret_cast<T*>(groups_buffer + h), empty_key, *key); // the winner thread proceeds with writing the rest fo the keys if (old == empty_key) { uint32_t offset = h + entry_count; for (size_t i = 1; i < key_count; ++i) { *reinterpret_cast<T*>(groups_buffer + offset) = key[i]; offset += entry_count; } } __threadfence(); // for all threads except the winning thread, memory content of the keys // related to the hash offset are checked again. In case of a complete match // the hash offset is returned, otherwise -1 is returned if (old != empty_key) { uint32_t offset = h; for (uint32_t i = 0; i < key_count; ++i) { if (*reinterpret_cast<T*>(groups_buffer + offset) != key[i]) { return -1; } offset += entry_count; } } return h; } extern "C" __device__ int32_t get_matching_group_value_columnar_slot(int64_t* groups_buffer, const uint32_t entry_count, const uint32_t h, const int64_t* key, const uint32_t key_count, const uint32_t key_width) { switch (key_width) { case 4: return get_matching_group_value_columnar_slot( groups_buffer, entry_count, h, reinterpret_cast<const unsigned int*>(key), key_count); case 8: return get_matching_group_value_columnar_slot( groups_buffer, entry_count, h, reinterpret_cast<const unsigned long long*>(key), key_count); default: return -1; } } extern "C" __device__ int64_t* get_matching_group_value_columnar( int64_t* groups_buffer, const uint32_t h, const int64_t* key, const uint32_t key_qw_count, const size_t entry_count) { uint32_t off = h; { const uint64_t old = atomicCAS( reinterpret_cast<unsigned long long*>(groups_buffer + off), EMPTY_KEY_64, *key); if (EMPTY_KEY_64 == old) { for (size_t i = 0; i < key_qw_count; ++i) { groups_buffer[off] = key[i]; off += entry_count; } return &groups_buffer[off]; } } __syncthreads(); off = h; for (size_t i = 0; i < key_qw_count; ++i) { if (groups_buffer[off] != key[i]) { return NULL; } off += entry_count; } return &groups_buffer[off]; } #include "GroupByRuntime.cpp" #include "JoinHashTableQueryRuntime.cpp" #include "MurmurHash.cpp" #include "TopKRuntime.cpp" __device__ int64_t atomicMax64(int64_t* address, int64_t val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, max((long long)val, (long long)assumed)); } while (assumed != old); return old; } __device__ int64_t atomicMin64(int64_t* address, int64_t val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, min((long long)val, (long long)assumed)); } while (assumed != old); return old; } #if (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600) __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif __device__ double atomicMax(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(max(val, __longlong_as_double(assumed)))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } __device__ float atomicMax(float* address, float val) { int* address_as_int = (int*)address; int old = *address_as_int, assumed; do { assumed = old; old = atomicCAS( address_as_int, assumed, __float_as_int(max(val, __int_as_float(assumed)))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __int_as_float(old); } __device__ double atomicMin(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(min(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } __device__ double atomicMin(float* address, float val) { int* address_as_ull = (int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS( address_as_ull, assumed, __float_as_int(min(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } extern "C" __device__ uint64_t agg_count_shared(uint64_t* agg, const int64_t val) { return static_cast<uint64_t>(atomicAdd(reinterpret_cast<uint32_t*>(agg), 1UL)); } extern "C" __device__ uint32_t agg_count_int32_shared(uint32_t* agg, const int32_t val) { return atomicAdd(agg, 1UL); } extern "C" __device__ uint64_t agg_count_double_shared(uint64_t* agg, const double val) { return agg_count_shared(agg, val); } extern "C" __device__ uint32_t agg_count_float_shared(uint32_t* agg, const float val) { return agg_count_int32_shared(agg, val); } extern "C" __device__ int64_t agg_sum_shared(int64_t* agg, const int64_t val) { return atomicAdd(reinterpret_cast<unsigned long long*>(agg), val); } extern "C" __device__ int32_t agg_sum_int32_shared(int32_t* agg, const int32_t val) { return atomicAdd(agg, val); } extern "C" __device__ void agg_sum_float_shared(int32_t* agg, const float val) { atomicAdd(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_sum_double_shared(int64_t* agg, const double val) { atomicAdd(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_max_shared(int64_t* agg, const int64_t val) { atomicMax64(agg, val); } extern "C" __device__ void agg_max_int32_shared(int32_t* agg, const int32_t val) { atomicMax(agg, val); } extern "C" __device__ void agg_max_double_shared(int64_t* agg, const double val) { atomicMax(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_max_float_shared(int32_t* agg, const float val) { atomicMax(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_min_shared(int64_t* agg, const int64_t val) { atomicMin64(agg, val); } extern "C" __device__ void agg_min_int32_shared(int32_t* agg, const int32_t val) { atomicMin(agg, val); } #if CUDA_VERSION > 10000 && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __device__ void atomicMax16(int16_t* agg, const int16_t val) { unsigned short int* address_as_us = reinterpret_cast<unsigned short int*>(agg); unsigned short int old = *address_as_us, assumed; do { assumed = old; old = atomicCAS(address_as_us, assumed, static_cast<unsigned short>(max(static_cast<short int>(val), static_cast<short int>(assumed)))); } while (assumed != old); } #else __device__ void atomicMax16(int16_t* agg, const int16_t val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; swap_value = (reinterpret_cast<size_t>(agg) & 0x2) ? static_cast<unsigned int>(max(static_cast<int16_t>(old_value >> 16), val)) << 16 | (old_value & 0xFFFF) : (old_value & 0xFFFF0000) | static_cast<unsigned int>( max(static_cast<int16_t>(old_value & 0xFFFF), val)); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (old_value != compare_value); } #endif __device__ void atomicMax8(int8_t* agg, const int8_t val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); // __byte_perm(unsigned int A, unsigned int B, unsigned int s): // if s == 0x3214 returns {A[31..24], A[23..16], A[15..8], B[7..0]} // if s == 0x3240 returns {A[31..24], A[23..16], B[7...0], A[7..0]} // if s == 0x3410 returns {A[31..24], B[7....0], A[15..8], A[7..0]} // if s == 0x4210 returns {B[7....0], A[23..16], A[15..8], A[7..0]} constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; auto max_value = static_cast<unsigned int>( // compare val with its corresponding bits in the compare_value max(val, static_cast<int8_t>(__byte_perm( compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440)))); swap_value = __byte_perm( compare_value, max_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (compare_value != old_value); } #if CUDA_VERSION > 10000 && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __device__ void atomicMin16(int16_t* agg, const int16_t val) { unsigned short int* address_as_us = reinterpret_cast<unsigned short int*>(agg); unsigned short int old = *address_as_us, assumed; do { assumed = old; old = atomicCAS(address_as_us, assumed, static_cast<unsigned short>(min(static_cast<short int>(val), static_cast<short int>(assumed)))); } while (assumed != old); } #else __device__ void atomicMin16(int16_t* agg, const int16_t val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; swap_value = (reinterpret_cast<size_t>(agg) & 0x2) ? static_cast<unsigned int>(min(static_cast<int16_t>(old_value >> 16), val)) << 16 | (old_value & 0xFFFF) : (old_value & 0xFFFF0000) | static_cast<unsigned int>( min(static_cast<int16_t>(old_value & 0xFFFF), val)); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (old_value != compare_value); } #endif __device__ void atomicMin16SkipVal(int16_t* agg, const int16_t val, const int16_t skip_val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; int16_t selected_old_val = (reinterpret_cast<size_t>(agg) & 0x2) ? static_cast<int16_t>(old_value >> 16) : static_cast<int16_t>(old_value & 0xFFFF); swap_value = (reinterpret_cast<size_t>(agg) & 0x2) ? static_cast<unsigned int>( selected_old_val == skip_val ? val : min(selected_old_val, val)) << 16 | (old_value & 0xFFFF) : (old_value & 0xFFFF0000) | static_cast<unsigned int>( selected_old_val == skip_val ? val : min(selected_old_val, val)); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (old_value != compare_value); } __device__ void atomicMin8(int8_t* agg, const int8_t val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; auto min_value = static_cast<unsigned int>( min(val, static_cast<int8_t>(__byte_perm( compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440)))); swap_value = __byte_perm( compare_value, min_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (compare_value != old_value); } __device__ void atomicMin8SkipVal(int8_t* agg, const int8_t val, const int8_t skip_val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; int8_t selected_old_val = static_cast<int8_t>( __byte_perm(compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440)); auto min_value = static_cast<unsigned int>( selected_old_val == skip_val ? val : min(val, selected_old_val)); swap_value = __byte_perm( compare_value, min_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (compare_value != old_value); } extern "C" __device__ void agg_max_int16_shared(int16_t* agg, const int16_t val) { return atomicMax16(agg, val); } extern "C" __device__ void agg_max_int8_shared(int8_t* agg, const int8_t val) { return atomicMax8(agg, val); } extern "C" __device__ void agg_min_int16_shared(int16_t* agg, const int16_t val) { return atomicMin16(agg, val); } extern "C" __device__ void agg_min_int8_shared(int8_t* agg, const int8_t val) { return atomicMin8(agg, val); } extern "C" __device__ void agg_min_double_shared(int64_t* agg, const double val) { atomicMin(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_min_float_shared(int32_t* agg, const float val) { atomicMin(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_id_shared(int64_t* agg, const int64_t val) { *agg = val; } extern "C" __device__ int32_t checked_single_agg_id_shared(int64_t* agg, const int64_t val, const int64_t null_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(agg); unsigned long long int old = *address_as_ull, assumed; if (val == null_val) { return 0; } do { if (static_cast<int64_t>(old) != null_val) { if (static_cast<int64_t>(old) != val) { // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES return 15; } else { break; } } assumed = old; old = atomicCAS(address_as_ull, assumed, val); } while (assumed != old); return 0; } #define DEF_AGG_ID_INT_SHARED(n) \ extern "C" __device__ void agg_id_int##n##_shared(int##n##_t* agg, \ const int##n##_t val) { \ *agg = val; \ } DEF_AGG_ID_INT_SHARED(32) DEF_AGG_ID_INT_SHARED(16) DEF_AGG_ID_INT_SHARED(8) #undef DEF_AGG_ID_INT_SHARED extern "C" __device__ void agg_id_double_shared(int64_t* agg, const double val) { *agg = *(reinterpret_cast<const int64_t*>(&val)); } extern "C" __device__ int32_t checked_single_agg_id_double_shared(int64_t* agg, const double val, const double null_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(agg); unsigned long long int old = *address_as_ull, assumed; if (val == null_val) { return 0; } do { if (static_cast<int64_t>(old) != __double_as_longlong(null_val)) { if (static_cast<int64_t>(old) != __double_as_longlong(val)) { // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES return 15; } else { break; } } assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val)); } while (assumed != old); return 0; } extern "C" __device__ void agg_id_double_shared_slow(int64_t* agg, const double* val) { *agg = *(reinterpret_cast<const int64_t*>(val)); } extern "C" __device__ int32_t checked_single_agg_id_double_shared_slow(int64_t* agg, const double* valp, const double null_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(agg); unsigned long long int old = *address_as_ull, assumed; double val = *valp; if (val == null_val) { return 0; } do { if (static_cast<int64_t>(old) != __double_as_longlong(null_val)) { if (static_cast<int64_t>(old) != __double_as_longlong(val)) { // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES return 15; } else { break; } } assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val)); } while (assumed != old); return 0; } extern "C" __device__ void agg_id_float_shared(int32_t* agg, const float val) { *agg = __float_as_int(val); } extern "C" __device__ int32_t checked_single_agg_id_float_shared(int32_t* agg, const float val, const float null_val) { int* address_as_ull = reinterpret_cast<int*>(agg); int old = *address_as_ull, assumed; if (val == null_val) { return 0; } do { if (old != __float_as_int(null_val)) { if (old != __float_as_int(val)) { // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES return 15; } else { break; } } assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val)); } while (assumed != old); return 0; } #define DEF_SKIP_AGG(base_agg_func) \ extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \ ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \ if (val != skip_val) { \ return base_agg_func##_shared(agg, val); \ } \ return 0; \ } #define DATA_T int64_t #define ADDR_T uint64_t DEF_SKIP_AGG(agg_count) #undef DATA_T #undef ADDR_T #define DATA_T int32_t #define ADDR_T uint32_t DEF_SKIP_AGG(agg_count_int32) #undef DATA_T #undef ADDR_T // Initial value for nullable column is INT32_MIN extern "C" __device__ void agg_max_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { agg_max_int32_shared(agg, val); } } extern "C" __device__ void agg_max_int16_skip_val_shared(int16_t* agg, const int16_t val, const int16_t skip_val) { if (val != skip_val) { agg_max_int16_shared(agg, val); } } extern "C" __device__ void agg_min_int16_skip_val_shared(int16_t* agg, const int16_t val, const int16_t skip_val) { if (val != skip_val) { atomicMin16SkipVal(agg, val, skip_val); } } extern "C" __device__ void agg_max_int8_skip_val_shared(int8_t* agg, const int8_t val, const int8_t skip_val) { if (val != skip_val) { agg_max_int8_shared(agg, val); } } extern "C" __device__ void agg_min_int8_skip_val_shared(int8_t* agg, const int8_t val, const int8_t skip_val) { if (val != skip_val) { atomicMin8SkipVal(agg, val, skip_val); } } __device__ int32_t atomicMin32SkipVal(int32_t* address, int32_t val, const int32_t skip_val) { int32_t old = atomicExch(address, INT_MAX); return atomicMin(address, old == skip_val ? val : min(old, val)); } extern "C" __device__ void agg_min_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { atomicMin32SkipVal(agg, val, skip_val); } } __device__ int32_t atomicSum32SkipVal(int32_t* address, const int32_t val, const int32_t skip_val) { unsigned int* address_as_int = (unsigned int*)address; int32_t old = atomicExch(address_as_int, 0); int32_t old2 = atomicAdd(address_as_int, old == skip_val ? val : (val + old)); return old == skip_val ? old2 : (old2 + old); } extern "C" __device__ int32_t agg_sum_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { const int32_t old = atomicSum32SkipVal(agg, val, skip_val); return old; } return 0; } __device__ int64_t atomicSum64SkipVal(int64_t* address, const int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; int64_t old = atomicExch(address_as_ull, 0); int64_t old2 = atomicAdd(address_as_ull, old == skip_val ? val : (val + old)); return old == skip_val ? old2 : (old2 + old); } extern "C" __device__ int64_t agg_sum_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { return atomicSum64SkipVal(agg, val, skip_val); } return 0; } __device__ int64_t atomicMin64SkipVal(int64_t* address, int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val ? val : min((long long)val, (long long)assumed)); } while (assumed != old); return old; } extern "C" __device__ void agg_min_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { atomicMin64SkipVal(agg, val, skip_val); } } __device__ int64_t atomicMax64SkipVal(int64_t* address, int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val ? val : max((long long)val, (long long)assumed)); } while (assumed != old); return old; } extern "C" __device__ void agg_max_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { atomicMax64SkipVal(agg, val, skip_val); } } #undef DEF_SKIP_AGG #define DEF_SKIP_AGG(base_agg_func) \ extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \ ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \ if (val != skip_val) { \ return base_agg_func##_shared(agg, val); \ } \ return *agg; \ } #define DATA_T double #define ADDR_T uint64_t DEF_SKIP_AGG(agg_count_double) #undef ADDR_T #undef DATA_T #define DATA_T float #define ADDR_T uint32_t DEF_SKIP_AGG(agg_count_float) #undef ADDR_T #undef DATA_T // Initial value for nullable column is FLOAT_MIN extern "C" __device__ void agg_max_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { float old = atomicExch(reinterpret_cast<float*>(agg), -FLT_MAX); atomicMax(reinterpret_cast<float*>(agg), __float_as_int(old) == __float_as_int(skip_val) ? val : fmaxf(old, val)); } } __device__ float atomicMinFltSkipVal(int32_t* address, float val, const float skip_val) { float old = atomicExch(reinterpret_cast<float*>(address), FLT_MAX); return atomicMin( reinterpret_cast<float*>(address), __float_as_int(old) == __float_as_int(skip_val) ? val : fminf(old, val)); } extern "C" __device__ void agg_min_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { atomicMinFltSkipVal(agg, val, skip_val); } } __device__ void atomicSumFltSkipVal(float* address, const float val, const float skip_val) { float old = atomicExch(address, 0.f); atomicAdd(address, __float_as_int(old) == __float_as_int(skip_val) ? val : (val + old)); } extern "C" __device__ void agg_sum_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { atomicSumFltSkipVal(reinterpret_cast<float*>(agg), val, skip_val); } } __device__ void atomicSumDblSkipVal(double* address, const double val, const double skip_val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; double old = __longlong_as_double(atomicExch(address_as_ull, __double_as_longlong(0.))); atomicAdd( address, __double_as_longlong(old) == __double_as_longlong(skip_val) ? val : (val + old)); } extern "C" __device__ void agg_sum_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (__double_as_longlong(val) != __double_as_longlong(skip_val)) { atomicSumDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val); } } __device__ double atomicMinDblSkipVal(double* address, double val, const double skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull; unsigned long long int skip_val_as_ull = *reinterpret_cast<const unsigned long long*>(&skip_val); unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val_as_ull ? *reinterpret_cast<unsigned long long*>(&val) : __double_as_longlong(min(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } extern "C" __device__ void agg_min_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (val != skip_val) { atomicMinDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val); } } extern "C" __device__ void agg_max_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (__double_as_longlong(val) != __double_as_longlong(skip_val)) { double old = __longlong_as_double(atomicExch( reinterpret_cast<unsigned long long int*>(agg), __double_as_longlong(-DBL_MAX))); atomicMax(reinterpret_cast<double*>(agg), __double_as_longlong(old) == __double_as_longlong(skip_val) ? val : fmax(old, val)); } } #undef DEF_SKIP_AGG extern "C" __device__ bool slotEmptyKeyCAS(int64_t* slot, int64_t new_val, int64_t init_val) { auto slot_address = reinterpret_cast<unsigned long long int*>(slot); const auto empty_key = static_cast<unsigned long long int*>(static_cast<void*>(&init_val)); const auto new_val_cast = static_cast<unsigned long long int*>(static_cast<void*>(&new_val)); const auto old_val = atomicCAS(slot_address, *empty_key, *new_val_cast); if (old_val == *empty_key) { return true; } else { return false; } } extern "C" __device__ bool slotEmptyKeyCAS_int32(int32_t* slot, int32_t new_val, int32_t init_val) { unsigned int* slot_address = reinterpret_cast<unsigned int*>(slot); unsigned int compare_value = static_cast<unsigned int>(init_val); unsigned int swap_value = static_cast<unsigned int>(new_val); const unsigned int old_value = atomicCAS(slot_address, compare_value, swap_value); return old_value == compare_value; } extern "C" __device__ bool slotEmptyKeyCAS_int16(int16_t* slot, int16_t new_val, int16_t init_val) { unsigned int* base_slot_address = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(slot) & ~0x3); unsigned int old_value = *base_slot_address; unsigned int swap_value, compare_value; do { compare_value = old_value; // exit criteria: if init_val does not exist in the slot (some other thread has // succeeded) if (static_cast<unsigned int>(init_val) != __byte_perm( compare_value, 0, (reinterpret_cast<size_t>(slot) & 0x2 ? 0x3244 : 0x4410))) { return false; } swap_value = __byte_perm(compare_value, static_cast<unsigned int>(new_val), (reinterpret_cast<size_t>(slot) & 0x2) ? 0x5410 : 0x3254); old_value = atomicCAS(base_slot_address, compare_value, swap_value); } while (compare_value != old_value); return true; } extern "C" __device__ bool slotEmptyKeyCAS_int8(int8_t* slot, int8_t new_val, int8_t init_val) { // properly align the slot address: unsigned int* base_slot_address = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(slot) & ~0x3); constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int old_value = *base_slot_address; unsigned int swap_value, compare_value; do { compare_value = old_value; // exit criteria: if init_val does not exist in the slot (some other thread has // succeeded) if (static_cast<unsigned int>(init_val) != __byte_perm(compare_value, 0, (reinterpret_cast<size_t>(slot) & 0x3) | 0x4440)) { return false; } swap_value = __byte_perm(compare_value, static_cast<unsigned int>(new_val), byte_permutations[reinterpret_cast<size_t>(slot) & 0x3]); old_value = atomicCAS(base_slot_address, compare_value, swap_value); } while (compare_value != old_value); return true; } #include "../Utils/ChunkIter.cpp" #include "DateTruncate.cpp" #include "ExtractFromTime.cpp" #define EXECUTE_INCLUDE #include "ArrayOps.cpp" #include "DateAdd.cpp" #include "StringFunctions.cpp" #undef EXECUTE_INCLUDE #include "../Utils/Regexp.cpp" #include "../Utils/StringLike.cpp" extern "C" __device__ uint64_t string_decode(int8_t* chunk_iter_, int64_t pos) { // TODO(alex): de-dup, the x64 version is basically identical ChunkIter* chunk_iter = reinterpret_cast<ChunkIter*>(chunk_iter_); VarlenDatum vd; bool is_end; ChunkIter_get_nth(chunk_iter, pos, false, &vd, &is_end); return vd.is_null ? 0 : (reinterpret_cast<uint64_t>(vd.pointer) & 0xffffffffffff) | (static_cast<uint64_t>(vd.length) << 48); } extern "C" __device__ void linear_probabilistic_count(uint8_t* bitmap, const uint32_t bitmap_bytes, const uint8_t* key_bytes, const uint32_t key_len) { const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8); const uint32_t word_idx = bit_pos / 32; const uint32_t bit_idx = bit_pos % 32; atomicOr(((uint32_t*)bitmap) + word_idx, 1 << bit_idx); } extern "C" __device__ void agg_count_distinct_bitmap_gpu(int64_t* agg, const int64_t val, const int64_t min_val, const int64_t base_dev_addr, const int64_t base_host_addr, const uint64_t sub_bitmap_count, const uint64_t bitmap_bytes) { const uint64_t bitmap_idx = val - min_val; const uint32_t byte_idx = bitmap_idx >> 3; const uint32_t word_idx = byte_idx >> 2; const uint32_t byte_word_idx = byte_idx & 3; const int64_t host_addr = *agg; uint32_t* bitmap = (uint32_t*)(base_dev_addr + host_addr - base_host_addr + (threadIdx.x & (sub_bitmap_count - 1)) * bitmap_bytes); switch (byte_word_idx) { case 0: atomicOr(&bitmap[word_idx], 1 << (bitmap_idx & 7)); break; case 1: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 8)); break; case 2: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 16)); break; case 3: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 24)); break; default: break; } } extern "C" __device__ void agg_count_distinct_bitmap_skip_val_gpu( int64_t* agg, const int64_t val, const int64_t min_val, const int64_t skip_val, const int64_t base_dev_addr, const int64_t base_host_addr, const uint64_t sub_bitmap_count, const uint64_t bitmap_bytes) { if (val != skip_val) { agg_count_distinct_bitmap_gpu( agg, val, min_val, base_dev_addr, base_host_addr, sub_bitmap_count, bitmap_bytes); } } extern "C" __device__ void agg_approximate_count_distinct_gpu( int64_t* agg, const int64_t key, const uint32_t b, const int64_t base_dev_addr, const int64_t base_host_addr) { const uint64_t hash = MurmurHash64A(&key, sizeof(key), 0); const uint32_t index = hash >> (64 - b); const int32_t rank = get_rank(hash << b, 64 - b); const int64_t host_addr = *agg; int32_t* M = (int32_t*)(base_dev_addr + host_addr - base_host_addr); atomicMax(&M[index], rank); } extern "C" __device__ void force_sync() { __threadfence_block(); } extern "C" __device__ void sync_warp() { __syncwarp(); } /** * Protected warp synchornization to make sure all (or none) threads within a warp go * through a synchronization barrier. thread_pos: the current thread position to be used * for a memory access row_count: maximum number of rows to be processed The function * performs warp sync iff all 32 threads within that warp will process valid data NOTE: * it currently assumes that warp size is 32. */ extern "C" __device__ void sync_warp_protected(int64_t thread_pos, int64_t row_count) { // only syncing if NOT within the same warp as those threads experiencing the critical // edge if ((((row_count - 1) | 0x1F) - thread_pos) >= 32) { __syncwarp(); } } extern "C" __device__ void sync_threadblock() { __syncthreads(); } /* * Currently, we just use this function for handling non-grouped aggregates * with COUNT queries (with GPU shared memory used). Later, we should generate code for * this depending on the type of aggregate functions. * TODO: we should use one contiguous global memory buffer, rather than current default * behaviour of multiple buffers, each for one aggregate. Once that's resolved, we can * do much cleaner than this function */ extern "C" __device__ void write_back_non_grouped_agg(int64_t* input_buffer, int64_t* output_buffer, const int32_t agg_idx) { if (threadIdx.x == agg_idx) { agg_sum_shared(output_buffer, input_buffer[agg_idx]); } }
b4c8c874fc734e7e34273b6723be1fa8551314c1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "layer_params.h" void ConvLayerParams::initializeValues(cudnnHandle_t cudnn_handle, ConvDescriptor *user_params, cudnnDataType_t data_type, int batch_size, cudnnTensorFormat_t tensor_format, size_t data_type_size, LayerDimension &output_size) { // create tensor, filter, conv descriptor checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&output_tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&bias_desc)); checkCUDNN(cudnnCreateFilterDescriptor(&filter_desc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&conv_desc)); C_in = user_params->input_channels; C_out = user_params->output_channels; filter_h = user_params->kernel_h; filter_w = user_params->kernel_w; checkCUDNN(cudnnSetTensor4dDescriptor( input_tensor, tensor_format, data_type, batch_size, user_params->input_channels, user_params->input_h, user_params->input_w)); checkCUDNN(cudnnSetFilter4dDescriptor( filter_desc, data_type, tensor_format, user_params->output_channels, user_params->input_channels, user_params->kernel_h, user_params->kernel_w)); int dilation_h = 1, dilation_w = 1; checkCUDNN(cudnnSetConvolution2dDescriptor( conv_desc, user_params->pad_h, user_params->pad_w, user_params->stride_y, user_params->stride_x, dilation_h, dilation_w, CUDNN_CROSS_CORRELATION, data_type)); int output_batch_size, output_channels, output_h, output_w; checkCUDNN(cudnnGetConvolution2dForwardOutputDim( conv_desc, input_tensor, filter_desc, &output_batch_size, &output_channels, &output_h, &output_w)); checkCUDNN(cudnnSetTensor4dDescriptor(output_tensor, tensor_format, data_type, output_batch_size, output_channels, output_h, output_w)); checkCUDNN(cudnnSetTensor4dDescriptor(bias_desc, tensor_format, data_type, 1, output_channels, 1, 1)); fwd_req_count = 10; fwd_perf = (cudnnConvolutionFwdAlgoPerf_t *)malloc( fwd_req_count * sizeof(cudnnConvolutionFwdAlgoPerf_t)); checkCUDNN(cudnnFindConvolutionForwardAlgorithm( cudnn_handle, input_tensor, filter_desc, conv_desc, output_tensor, fwd_req_count, &fwd_ret_count, fwd_perf)); bwd_filter_req_count = 10; bwd_filter_perf = (cudnnConvolutionBwdFilterAlgoPerf_t *)malloc( bwd_filter_req_count * sizeof(cudnnConvolutionBwdFilterAlgoPerf_t)); checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithm( cudnn_handle, input_tensor, output_tensor, conv_desc, filter_desc, bwd_filter_req_count, &bwd_filter_ret_count, bwd_filter_perf)); // std::cout << "Printing bwdfilter conv algo perf\n"; // for (int i = 0; i < bwd_filter_ret_count; i++) { // std::cout << i << std::endl; // std::cout << "algo: " << bwd_filter_perf[i].algo << std::endl; // std::cout << "status: " << // cudnnGetErrorString(bwd_filter_perf[i].status) << std::endl; // std::cout << "time(ms): " << bwd_filter_perf[i].time << std::endl; // std::cout << "memory(bytes): " << bwd_filter_perf[i].memory << // std::endl; // std::cout << "mathType: " << bwd_filter_perf[i].mathType << std::endl; // std::cout << std::endl; // } bwd_data_req_count = 10; bwd_data_perf = (cudnnConvolutionBwdDataAlgoPerf_t *)malloc( bwd_data_req_count * sizeof(cudnnConvolutionBwdDataAlgoPerf_t)); checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithm( cudnn_handle, filter_desc, output_tensor, conv_desc, input_tensor, bwd_data_req_count, &bwd_data_ret_count, bwd_data_perf)); output_size.N = output_batch_size, output_size.C = output_channels, output_size.H = output_h, output_size.W = output_w; } void ConvLayerParams::allocateSpace(hiprandGenerator_t curand_gen, cudnnDataType_t data_type, size_t data_type_size, float std_dev, size_t &free_bytes) { int kernel_size = C_out * C_in * filter_h * filter_w; if (kernel_size % 2 != 0) kernel_size += 1; checkCudaErrors(hipMalloc(&W, kernel_size * data_type_size)); checkCudaErrors(hipMalloc(&b, C_out * data_type_size)); checkCudaErrors(hipMalloc(&dW, kernel_size * data_type_size)); checkCudaErrors(hipMalloc(&db, C_out * data_type_size)); if (data_type == CUDNN_DATA_FLOAT) { checkCURAND( hiprandGenerateNormal(curand_gen, (float *)W, kernel_size, 0, std_dev)); hipLaunchKernelGGL(( fillValue<float>), dim3(ceil(1.0 * C_out / BW)), dim3(BW), 0, 0, (float *)b, C_out, 0); } else { checkCURAND(hiprandGenerateNormalDouble(curand_gen, (double *)W, kernel_size, 0, std_dev)); hipLaunchKernelGGL(( fillValue<double>), dim3(ceil(1.0 * C_out / BW)), dim3(BW), 0, 0, (double *)b, C_out, 0); } free_bytes = free_bytes - 2 * (kernel_size + C_out) * data_type_size; } void ConvLayerParams::getWorkspaceSize(size_t &workspace_size, size_t &free_bytes, ConvAlgo conv_algo) { int min_time; int min_index; size_t memory; size_t max_memory; min_time = std::numeric_limits<int>::max(); min_index = -1; if (conv_algo == CONV_ALGO_AUTO) { for (int i = 0; i < fwd_ret_count; i++) { if (fwd_perf[i].status == CUDNN_STATUS_SUCCESS && fwd_perf[i].memory < free_bytes && fwd_perf[i].time < min_time) { min_time = fwd_perf[i].time; min_index = i; } } memory = fwd_perf[min_index].memory; fwd_algo = fwd_perf[min_index].algo; max_memory = memory; min_time = std::numeric_limits<int>::max(); min_index = -1; for (int i = 0; i < bwd_filter_ret_count; i++) { if (bwd_filter_perf[i].status == CUDNN_STATUS_SUCCESS && bwd_filter_perf[i].memory < free_bytes && bwd_filter_perf[i].time < min_time) { min_time = bwd_filter_perf[i].time; min_index = i; } } memory = bwd_filter_perf[min_index].memory; bwd_filter_algo = bwd_filter_perf[min_index].algo; // std::cout << "ConvLayerParams: workspace, filter_algo: memory: " << // memory << " algo: " << bwd_filter_algo << std::endl; max_memory = (memory > max_memory) ? memory : max_memory; min_time = std::numeric_limits<int>::max(); min_index = -1; for (int i = 0; i < bwd_data_ret_count; i++) { if (bwd_data_perf[i].status == CUDNN_STATUS_SUCCESS && bwd_data_perf[i].memory < free_bytes && bwd_data_perf[i].time < min_time) { min_time = bwd_data_perf[i].time; min_index = i; } } memory = bwd_data_perf[min_index].memory; bwd_data_algo = bwd_data_perf[min_index].algo; max_memory = (memory > max_memory) ? memory : max_memory; } else if (conv_algo == CONV_ALGO_MEMORY_OPTIMAL) { for (int i = 0; i < fwd_ret_count; i++) { if (fwd_perf[i].algo == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM) { if (fwd_perf[i].status == CUDNN_STATUS_SUCCESS && fwd_perf[i].memory < free_bytes) { fwd_algo = fwd_perf[i].algo; memory = fwd_perf[i].memory; max_memory = memory; break; } else { std::cout << "workspace: bad_status or out of memory\n"; exit(0); } } } for (int i = 0; i < bwd_filter_ret_count; i++) { if (bwd_filter_perf[i].algo == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1) { if (bwd_filter_perf[i].status == CUDNN_STATUS_SUCCESS && bwd_filter_perf[i].memory < free_bytes) { bwd_filter_algo = bwd_filter_perf[i].algo; // std::cout << "Free bytes " << free_bytes << std::endl; // std::cout << "bwd_filter_perf[i].memory " << // bwd_filter_perf[i].memory << std::endl; memory = bwd_filter_perf[i].memory; max_memory = (memory > max_memory) ? memory : max_memory; break; } else { std::cout << "workspace: bad_status or out of memory\n"; exit(0); } } } for (int i = 0; i < bwd_data_ret_count; i++) { if (bwd_data_perf[i].algo == CUDNN_CONVOLUTION_BWD_DATA_ALGO_1) { if (bwd_data_perf[i].status == CUDNN_STATUS_SUCCESS && bwd_data_perf[i].memory < free_bytes) { bwd_data_algo = bwd_data_perf[i].algo; memory = bwd_data_perf[i].memory; max_memory = (memory > max_memory) ? memory : max_memory; break; } else { std::cout << "workspace: bad_status or out of memory\n"; exit(0); } } } } else if (conv_algo == CONV_ALGO_PERFORMANCE_OPTIMAL) { if (fwd_perf[0].status == CUDNN_STATUS_SUCCESS && fwd_perf[0].memory < free_bytes) { fwd_algo = fwd_perf[0].algo; memory = fwd_perf[0].memory; max_memory = memory; } else { std::cout << "workspace: bad_status or out of memory\n"; exit(0); } if (bwd_filter_perf[0].status == CUDNN_STATUS_SUCCESS && bwd_filter_perf[0].memory < free_bytes) { bwd_filter_algo = bwd_filter_perf[0].algo; // std::cout << "Free bytes " << free_bytes << std::endl; // std::cout << "bwd_filter_perf[i].memory " << bwd_filter_perf[i].memory // << std::endl; memory = bwd_filter_perf[0].memory; max_memory = (memory > max_memory) ? memory : max_memory; } else { std::cout << "workspace: bad_status or out of memory\n"; exit(0); } if (bwd_data_perf[0].status == CUDNN_STATUS_SUCCESS && bwd_data_perf[0].memory < free_bytes) { bwd_data_algo = bwd_data_perf[0].algo; memory = bwd_data_perf[0].memory; max_memory = (memory > max_memory) ? memory : max_memory; } else { std::cout << "workspace: bad_status or out of memory\n"; exit(0); } } workspace_size = max_memory; } void FCLayerParams::initializeValues(FCDescriptor *user_params, int batch_size, size_t data_type_size, LayerDimension &output_size) { C_in = user_params->input_channels; C_out = user_params->output_channels; output_size.N = batch_size, output_size.C = C_out, output_size.H = output_size.W = 1; } void FCLayerParams::allocateSpace(hiprandGenerator_t curand_gen, cudnnDataType_t data_type, size_t data_type_size, float std_dev, size_t &free_bytes) { int wt_alloc_size = C_in * C_out; if (wt_alloc_size % 2 != 0) wt_alloc_size += 1; checkCudaErrors(hipMalloc(&W, wt_alloc_size * data_type_size)); checkCudaErrors(hipMalloc(&b, C_out * data_type_size)); checkCudaErrors(hipMalloc(&dW, wt_alloc_size * data_type_size)); checkCudaErrors(hipMalloc(&db, C_out * data_type_size)); if (data_type == CUDNN_DATA_FLOAT) { checkCURAND(hiprandGenerateNormal(curand_gen, (float *)W, wt_alloc_size, 0, std_dev)); hipLaunchKernelGGL(( fillValue<float>), dim3(ceil(1.0 * C_out / BW)), dim3(BW), 0, 0, (float *)b, C_out, 0); } else if (data_type == CUDNN_DATA_DOUBLE) { checkCURAND(hiprandGenerateNormalDouble(curand_gen, (double *)W, wt_alloc_size, 0, std_dev)); hipLaunchKernelGGL(( fillValue<double>), dim3(ceil(1.0 * C_out / BW)), dim3(BW), 0, 0, (double *)b, C_out, 0); } free_bytes = free_bytes - 2 * (C_in * C_out + C_out) * data_type_size; } void DropoutLayerParams::initializeValues(cudnnHandle_t cudnn_handle, DropoutDescriptor *user_params, cudnnDataType_t data_type, int batch_size, cudnnTensorFormat_t tensor_format, LayerDimension &output_size) { checkCUDNN(cudnnCreateDropoutDescriptor(&dropout_desc)); checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor)); checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type, batch_size, user_params->channels, user_params->h, user_params->w)); checkCUDNN(cudnnDropoutGetStatesSize(cudnn_handle, &state_size)); checkCUDNN( cudnnDropoutGetReserveSpaceSize(input_tensor, &reserved_space_size)); output_size.N = batch_size, output_size.C = user_params->channels, output_size.H = user_params->h, output_size.W = user_params->w; } void DropoutLayerParams::allocateSpace(size_t &free_bytes, cudnnHandle_t cudnn_handle, DropoutDescriptor *user_params, long long seed) { checkCudaErrors(hipMalloc(&state, state_size)); checkCudaErrors(hipMalloc(&reserved_space, reserved_space_size)); checkCUDNN(cudnnSetDropoutDescriptor(dropout_desc, cudnn_handle, user_params->dropout_value, state, state_size, seed)); free_bytes = free_bytes - (state_size + reserved_space_size); } void BatchNormLayerParams::initializeValues(BatchNormDescriptor *user_params, cudnnDataType_t data_type, cudnnTensorFormat_t tensor_format, int batch_size, LayerDimension &output_size) { checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&sbmv_desc)); c = user_params->channels, h = user_params->h, w = user_params->w; if (user_params->mode == BATCHNORM_PER_ACTIVATION) { mode = CUDNN_BATCHNORM_PER_ACTIVATION; checkCUDNN(cudnnSetTensor4dDescriptor(sbmv_desc, tensor_format, data_type, 1, user_params->channels, user_params->h, user_params->w)); sbmv_size = c * h * w; } else if (user_params->mode == BATCHNORM_SPATIAL) { mode = CUDNN_BATCHNORM_SPATIAL; checkCUDNN(cudnnSetTensor4dDescriptor(sbmv_desc, tensor_format, data_type, 1, user_params->channels, 1, 1)); sbmv_size = c; } checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type, batch_size, user_params->channels, user_params->h, user_params->w)); factor = user_params->factor; epsilon = user_params->epsilon; output_size.N = batch_size, output_size.C = user_params->channels, output_size.H = user_params->h, output_size.W = user_params->w; } void BatchNormLayerParams::allocateSpace(cudnnDataType_t data_type, size_t data_type_size, size_t &free_bytes) { size_t allocation_size; if (mode == CUDNN_BATCHNORM_PER_ACTIVATION) allocation_size = c * h * w; else allocation_size = c; allocation_size *= data_type_size; checkCudaErrors(hipMalloc(&scale, allocation_size)); checkCudaErrors(hipMalloc(&bias, allocation_size)); checkCudaErrors(hipMalloc(&dscale, allocation_size)); checkCudaErrors(hipMalloc(&dbias, allocation_size)); checkCudaErrors(hipMalloc(&running_mean, allocation_size)); checkCudaErrors(hipMalloc(&running_variance, allocation_size)); checkCudaErrors(hipMalloc(&result_save_mean, allocation_size)); checkCudaErrors(hipMalloc(&result_save_inv_var, allocation_size)); int num_elements = allocation_size / data_type_size; if (data_type == CUDNN_DATA_FLOAT) { hipLaunchKernelGGL(( fillValue<float>), dim3(ceil(1.0 * num_elements / BW)), dim3(BW), 0, 0, (float *)scale, num_elements, 1); hipLaunchKernelGGL(( fillValue<float>), dim3(ceil(1.0 * num_elements / BW)), dim3(BW), 0, 0, (float *)bias, num_elements, 1); } else if (data_type == CUDNN_DATA_DOUBLE) { hipLaunchKernelGGL(( fillValue<double>), dim3(ceil(1.0 * num_elements / BW)), dim3(BW), 0, 0, (double *)scale, num_elements, 1); hipLaunchKernelGGL(( fillValue<double>), dim3(ceil(1.0 * num_elements / BW)), dim3(BW), 0, 0, (double *)bias, num_elements, 1); } free_bytes = free_bytes - 6 * allocation_size; } void PoolingLayerParams::initializeValues(PoolingDescriptor *user_params, cudnnDataType_t data_type, cudnnTensorFormat_t tensor_format, int batch_size, LayerDimension &output_size) { checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&output_tensor)); checkCUDNN(cudnnSetTensor4dDescriptor( input_tensor, tensor_format, data_type, batch_size, user_params->input_channels, user_params->input_h, user_params->input_w)); checkCUDNN(cudnnCreatePoolingDescriptor(&pool_desc)); cudnnPoolingMode_t mode; if (user_params->mode == POOLING_MAX) mode = CUDNN_POOLING_MAX; else if (user_params->mode == POOLING_AVERAGE_COUNT_INCLUDE_PADDING) mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING; else if (user_params->mode == POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) mode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; checkCUDNN(cudnnSetPooling2dDescriptor( pool_desc, mode, CUDNN_PROPAGATE_NAN, user_params->kernel_h, user_params->kernel_w, user_params->pad_h, user_params->pad_w, user_params->stride_y, user_params->stride_x)); int output_batch_size, output_channels, output_h, output_w; checkCUDNN(cudnnGetPooling2dForwardOutputDim( pool_desc, input_tensor, &output_batch_size, &output_channels, &output_h, &output_w)); checkCUDNN(cudnnSetTensor4dDescriptor(output_tensor, tensor_format, data_type, output_batch_size, output_channels, output_h, output_w)); output_size.N = output_batch_size, output_size.C = output_channels, output_size.H = output_h, output_size.W = output_w; } void PoolingLayerParams::allocateSpace(size_t &free_bytes) {} void ActivationLayerParams::initializeValues(ActivationDescriptor *user_params, cudnnDataType_t data_type, cudnnTensorFormat_t tensor_format, int batch_size, LayerDimension &output_size) { checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor)); checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type, batch_size, user_params->channels, user_params->h, user_params->w)); cudnnActivationMode_t mode; if (user_params->mode == SIGMOID) mode = CUDNN_ACTIVATION_SIGMOID; else if (user_params->mode == RELU) mode = CUDNN_ACTIVATION_RELU; else if (user_params->mode == TANH) mode = CUDNN_ACTIVATION_TANH; else if (user_params->mode == CLIPPED_RELU) mode = CUDNN_ACTIVATION_CLIPPED_RELU; else if (user_params->mode == ELU) mode = CUDNN_ACTIVATION_ELU; checkCUDNN(cudnnCreateActivationDescriptor(&actv_desc)); checkCUDNN(cudnnSetActivationDescriptor(actv_desc, mode, CUDNN_PROPAGATE_NAN, user_params->coef)); output_size.N = batch_size, output_size.C = user_params->channels, output_size.H = user_params->h, output_size.W = user_params->w; } void ActivationLayerParams::allocateSpace(size_t &free_bytes) {} void SoftmaxLayerParams::initializeValues(SoftmaxDescriptor *user_params, cudnnDataType_t data_type, cudnnTensorFormat_t tensor_format, int batch_size, LayerDimension &output_size) { if (user_params->algo == SOFTMAX_FAST) algo = CUDNN_SOFTMAX_FAST; else if (user_params->algo == SOFTMAX_ACCURATE) algo = CUDNN_SOFTMAX_ACCURATE; if (user_params->mode == SOFTMAX_MODE_INSTANCE) mode = CUDNN_SOFTMAX_MODE_INSTANCE; else if (user_params->mode == SOFTMAX_MODE_CHANNEL) { mode = CUDNN_SOFTMAX_MODE_CHANNEL; } checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor)); checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type, batch_size, user_params->channels, user_params->h, user_params->w)); output_size.N = batch_size, output_size.C = user_params->channels, output_size.H = user_params->h, output_size.W = user_params->w; } void SoftmaxLayerParams::allocateSpace(size_t &free_bytes) {}
b4c8c874fc734e7e34273b6723be1fa8551314c1.cu
#include "layer_params.h" void ConvLayerParams::initializeValues(cudnnHandle_t cudnn_handle, ConvDescriptor *user_params, cudnnDataType_t data_type, int batch_size, cudnnTensorFormat_t tensor_format, size_t data_type_size, LayerDimension &output_size) { // create tensor, filter, conv descriptor checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&output_tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&bias_desc)); checkCUDNN(cudnnCreateFilterDescriptor(&filter_desc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&conv_desc)); C_in = user_params->input_channels; C_out = user_params->output_channels; filter_h = user_params->kernel_h; filter_w = user_params->kernel_w; checkCUDNN(cudnnSetTensor4dDescriptor( input_tensor, tensor_format, data_type, batch_size, user_params->input_channels, user_params->input_h, user_params->input_w)); checkCUDNN(cudnnSetFilter4dDescriptor( filter_desc, data_type, tensor_format, user_params->output_channels, user_params->input_channels, user_params->kernel_h, user_params->kernel_w)); int dilation_h = 1, dilation_w = 1; checkCUDNN(cudnnSetConvolution2dDescriptor( conv_desc, user_params->pad_h, user_params->pad_w, user_params->stride_y, user_params->stride_x, dilation_h, dilation_w, CUDNN_CROSS_CORRELATION, data_type)); int output_batch_size, output_channels, output_h, output_w; checkCUDNN(cudnnGetConvolution2dForwardOutputDim( conv_desc, input_tensor, filter_desc, &output_batch_size, &output_channels, &output_h, &output_w)); checkCUDNN(cudnnSetTensor4dDescriptor(output_tensor, tensor_format, data_type, output_batch_size, output_channels, output_h, output_w)); checkCUDNN(cudnnSetTensor4dDescriptor(bias_desc, tensor_format, data_type, 1, output_channels, 1, 1)); fwd_req_count = 10; fwd_perf = (cudnnConvolutionFwdAlgoPerf_t *)malloc( fwd_req_count * sizeof(cudnnConvolutionFwdAlgoPerf_t)); checkCUDNN(cudnnFindConvolutionForwardAlgorithm( cudnn_handle, input_tensor, filter_desc, conv_desc, output_tensor, fwd_req_count, &fwd_ret_count, fwd_perf)); bwd_filter_req_count = 10; bwd_filter_perf = (cudnnConvolutionBwdFilterAlgoPerf_t *)malloc( bwd_filter_req_count * sizeof(cudnnConvolutionBwdFilterAlgoPerf_t)); checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithm( cudnn_handle, input_tensor, output_tensor, conv_desc, filter_desc, bwd_filter_req_count, &bwd_filter_ret_count, bwd_filter_perf)); // std::cout << "Printing bwdfilter conv algo perf\n"; // for (int i = 0; i < bwd_filter_ret_count; i++) { // std::cout << i << std::endl; // std::cout << "algo: " << bwd_filter_perf[i].algo << std::endl; // std::cout << "status: " << // cudnnGetErrorString(bwd_filter_perf[i].status) << std::endl; // std::cout << "time(ms): " << bwd_filter_perf[i].time << std::endl; // std::cout << "memory(bytes): " << bwd_filter_perf[i].memory << // std::endl; // std::cout << "mathType: " << bwd_filter_perf[i].mathType << std::endl; // std::cout << std::endl; // } bwd_data_req_count = 10; bwd_data_perf = (cudnnConvolutionBwdDataAlgoPerf_t *)malloc( bwd_data_req_count * sizeof(cudnnConvolutionBwdDataAlgoPerf_t)); checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithm( cudnn_handle, filter_desc, output_tensor, conv_desc, input_tensor, bwd_data_req_count, &bwd_data_ret_count, bwd_data_perf)); output_size.N = output_batch_size, output_size.C = output_channels, output_size.H = output_h, output_size.W = output_w; } void ConvLayerParams::allocateSpace(curandGenerator_t curand_gen, cudnnDataType_t data_type, size_t data_type_size, float std_dev, size_t &free_bytes) { int kernel_size = C_out * C_in * filter_h * filter_w; if (kernel_size % 2 != 0) kernel_size += 1; checkCudaErrors(cudaMalloc(&W, kernel_size * data_type_size)); checkCudaErrors(cudaMalloc(&b, C_out * data_type_size)); checkCudaErrors(cudaMalloc(&dW, kernel_size * data_type_size)); checkCudaErrors(cudaMalloc(&db, C_out * data_type_size)); if (data_type == CUDNN_DATA_FLOAT) { checkCURAND( curandGenerateNormal(curand_gen, (float *)W, kernel_size, 0, std_dev)); fillValue<float><<<ceil(1.0 * C_out / BW), BW>>>((float *)b, C_out, 0); } else { checkCURAND(curandGenerateNormalDouble(curand_gen, (double *)W, kernel_size, 0, std_dev)); fillValue<double><<<ceil(1.0 * C_out / BW), BW>>>((double *)b, C_out, 0); } free_bytes = free_bytes - 2 * (kernel_size + C_out) * data_type_size; } void ConvLayerParams::getWorkspaceSize(size_t &workspace_size, size_t &free_bytes, ConvAlgo conv_algo) { int min_time; int min_index; size_t memory; size_t max_memory; min_time = std::numeric_limits<int>::max(); min_index = -1; if (conv_algo == CONV_ALGO_AUTO) { for (int i = 0; i < fwd_ret_count; i++) { if (fwd_perf[i].status == CUDNN_STATUS_SUCCESS && fwd_perf[i].memory < free_bytes && fwd_perf[i].time < min_time) { min_time = fwd_perf[i].time; min_index = i; } } memory = fwd_perf[min_index].memory; fwd_algo = fwd_perf[min_index].algo; max_memory = memory; min_time = std::numeric_limits<int>::max(); min_index = -1; for (int i = 0; i < bwd_filter_ret_count; i++) { if (bwd_filter_perf[i].status == CUDNN_STATUS_SUCCESS && bwd_filter_perf[i].memory < free_bytes && bwd_filter_perf[i].time < min_time) { min_time = bwd_filter_perf[i].time; min_index = i; } } memory = bwd_filter_perf[min_index].memory; bwd_filter_algo = bwd_filter_perf[min_index].algo; // std::cout << "ConvLayerParams: workspace, filter_algo: memory: " << // memory << " algo: " << bwd_filter_algo << std::endl; max_memory = (memory > max_memory) ? memory : max_memory; min_time = std::numeric_limits<int>::max(); min_index = -1; for (int i = 0; i < bwd_data_ret_count; i++) { if (bwd_data_perf[i].status == CUDNN_STATUS_SUCCESS && bwd_data_perf[i].memory < free_bytes && bwd_data_perf[i].time < min_time) { min_time = bwd_data_perf[i].time; min_index = i; } } memory = bwd_data_perf[min_index].memory; bwd_data_algo = bwd_data_perf[min_index].algo; max_memory = (memory > max_memory) ? memory : max_memory; } else if (conv_algo == CONV_ALGO_MEMORY_OPTIMAL) { for (int i = 0; i < fwd_ret_count; i++) { if (fwd_perf[i].algo == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM) { if (fwd_perf[i].status == CUDNN_STATUS_SUCCESS && fwd_perf[i].memory < free_bytes) { fwd_algo = fwd_perf[i].algo; memory = fwd_perf[i].memory; max_memory = memory; break; } else { std::cout << "workspace: bad_status or out of memory\n"; exit(0); } } } for (int i = 0; i < bwd_filter_ret_count; i++) { if (bwd_filter_perf[i].algo == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1) { if (bwd_filter_perf[i].status == CUDNN_STATUS_SUCCESS && bwd_filter_perf[i].memory < free_bytes) { bwd_filter_algo = bwd_filter_perf[i].algo; // std::cout << "Free bytes " << free_bytes << std::endl; // std::cout << "bwd_filter_perf[i].memory " << // bwd_filter_perf[i].memory << std::endl; memory = bwd_filter_perf[i].memory; max_memory = (memory > max_memory) ? memory : max_memory; break; } else { std::cout << "workspace: bad_status or out of memory\n"; exit(0); } } } for (int i = 0; i < bwd_data_ret_count; i++) { if (bwd_data_perf[i].algo == CUDNN_CONVOLUTION_BWD_DATA_ALGO_1) { if (bwd_data_perf[i].status == CUDNN_STATUS_SUCCESS && bwd_data_perf[i].memory < free_bytes) { bwd_data_algo = bwd_data_perf[i].algo; memory = bwd_data_perf[i].memory; max_memory = (memory > max_memory) ? memory : max_memory; break; } else { std::cout << "workspace: bad_status or out of memory\n"; exit(0); } } } } else if (conv_algo == CONV_ALGO_PERFORMANCE_OPTIMAL) { if (fwd_perf[0].status == CUDNN_STATUS_SUCCESS && fwd_perf[0].memory < free_bytes) { fwd_algo = fwd_perf[0].algo; memory = fwd_perf[0].memory; max_memory = memory; } else { std::cout << "workspace: bad_status or out of memory\n"; exit(0); } if (bwd_filter_perf[0].status == CUDNN_STATUS_SUCCESS && bwd_filter_perf[0].memory < free_bytes) { bwd_filter_algo = bwd_filter_perf[0].algo; // std::cout << "Free bytes " << free_bytes << std::endl; // std::cout << "bwd_filter_perf[i].memory " << bwd_filter_perf[i].memory // << std::endl; memory = bwd_filter_perf[0].memory; max_memory = (memory > max_memory) ? memory : max_memory; } else { std::cout << "workspace: bad_status or out of memory\n"; exit(0); } if (bwd_data_perf[0].status == CUDNN_STATUS_SUCCESS && bwd_data_perf[0].memory < free_bytes) { bwd_data_algo = bwd_data_perf[0].algo; memory = bwd_data_perf[0].memory; max_memory = (memory > max_memory) ? memory : max_memory; } else { std::cout << "workspace: bad_status or out of memory\n"; exit(0); } } workspace_size = max_memory; } void FCLayerParams::initializeValues(FCDescriptor *user_params, int batch_size, size_t data_type_size, LayerDimension &output_size) { C_in = user_params->input_channels; C_out = user_params->output_channels; output_size.N = batch_size, output_size.C = C_out, output_size.H = output_size.W = 1; } void FCLayerParams::allocateSpace(curandGenerator_t curand_gen, cudnnDataType_t data_type, size_t data_type_size, float std_dev, size_t &free_bytes) { int wt_alloc_size = C_in * C_out; if (wt_alloc_size % 2 != 0) wt_alloc_size += 1; checkCudaErrors(cudaMalloc(&W, wt_alloc_size * data_type_size)); checkCudaErrors(cudaMalloc(&b, C_out * data_type_size)); checkCudaErrors(cudaMalloc(&dW, wt_alloc_size * data_type_size)); checkCudaErrors(cudaMalloc(&db, C_out * data_type_size)); if (data_type == CUDNN_DATA_FLOAT) { checkCURAND(curandGenerateNormal(curand_gen, (float *)W, wt_alloc_size, 0, std_dev)); fillValue<float><<<ceil(1.0 * C_out / BW), BW>>>((float *)b, C_out, 0); } else if (data_type == CUDNN_DATA_DOUBLE) { checkCURAND(curandGenerateNormalDouble(curand_gen, (double *)W, wt_alloc_size, 0, std_dev)); fillValue<double><<<ceil(1.0 * C_out / BW), BW>>>((double *)b, C_out, 0); } free_bytes = free_bytes - 2 * (C_in * C_out + C_out) * data_type_size; } void DropoutLayerParams::initializeValues(cudnnHandle_t cudnn_handle, DropoutDescriptor *user_params, cudnnDataType_t data_type, int batch_size, cudnnTensorFormat_t tensor_format, LayerDimension &output_size) { checkCUDNN(cudnnCreateDropoutDescriptor(&dropout_desc)); checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor)); checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type, batch_size, user_params->channels, user_params->h, user_params->w)); checkCUDNN(cudnnDropoutGetStatesSize(cudnn_handle, &state_size)); checkCUDNN( cudnnDropoutGetReserveSpaceSize(input_tensor, &reserved_space_size)); output_size.N = batch_size, output_size.C = user_params->channels, output_size.H = user_params->h, output_size.W = user_params->w; } void DropoutLayerParams::allocateSpace(size_t &free_bytes, cudnnHandle_t cudnn_handle, DropoutDescriptor *user_params, long long seed) { checkCudaErrors(cudaMalloc(&state, state_size)); checkCudaErrors(cudaMalloc(&reserved_space, reserved_space_size)); checkCUDNN(cudnnSetDropoutDescriptor(dropout_desc, cudnn_handle, user_params->dropout_value, state, state_size, seed)); free_bytes = free_bytes - (state_size + reserved_space_size); } void BatchNormLayerParams::initializeValues(BatchNormDescriptor *user_params, cudnnDataType_t data_type, cudnnTensorFormat_t tensor_format, int batch_size, LayerDimension &output_size) { checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&sbmv_desc)); c = user_params->channels, h = user_params->h, w = user_params->w; if (user_params->mode == BATCHNORM_PER_ACTIVATION) { mode = CUDNN_BATCHNORM_PER_ACTIVATION; checkCUDNN(cudnnSetTensor4dDescriptor(sbmv_desc, tensor_format, data_type, 1, user_params->channels, user_params->h, user_params->w)); sbmv_size = c * h * w; } else if (user_params->mode == BATCHNORM_SPATIAL) { mode = CUDNN_BATCHNORM_SPATIAL; checkCUDNN(cudnnSetTensor4dDescriptor(sbmv_desc, tensor_format, data_type, 1, user_params->channels, 1, 1)); sbmv_size = c; } checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type, batch_size, user_params->channels, user_params->h, user_params->w)); factor = user_params->factor; epsilon = user_params->epsilon; output_size.N = batch_size, output_size.C = user_params->channels, output_size.H = user_params->h, output_size.W = user_params->w; } void BatchNormLayerParams::allocateSpace(cudnnDataType_t data_type, size_t data_type_size, size_t &free_bytes) { size_t allocation_size; if (mode == CUDNN_BATCHNORM_PER_ACTIVATION) allocation_size = c * h * w; else allocation_size = c; allocation_size *= data_type_size; checkCudaErrors(cudaMalloc(&scale, allocation_size)); checkCudaErrors(cudaMalloc(&bias, allocation_size)); checkCudaErrors(cudaMalloc(&dscale, allocation_size)); checkCudaErrors(cudaMalloc(&dbias, allocation_size)); checkCudaErrors(cudaMalloc(&running_mean, allocation_size)); checkCudaErrors(cudaMalloc(&running_variance, allocation_size)); checkCudaErrors(cudaMalloc(&result_save_mean, allocation_size)); checkCudaErrors(cudaMalloc(&result_save_inv_var, allocation_size)); int num_elements = allocation_size / data_type_size; if (data_type == CUDNN_DATA_FLOAT) { fillValue<float><<<ceil(1.0 * num_elements / BW), BW>>>((float *)scale, num_elements, 1); fillValue<float><<<ceil(1.0 * num_elements / BW), BW>>>((float *)bias, num_elements, 1); } else if (data_type == CUDNN_DATA_DOUBLE) { fillValue<double><<<ceil(1.0 * num_elements / BW), BW>>>((double *)scale, num_elements, 1); fillValue<double><<<ceil(1.0 * num_elements / BW), BW>>>((double *)bias, num_elements, 1); } free_bytes = free_bytes - 6 * allocation_size; } void PoolingLayerParams::initializeValues(PoolingDescriptor *user_params, cudnnDataType_t data_type, cudnnTensorFormat_t tensor_format, int batch_size, LayerDimension &output_size) { checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&output_tensor)); checkCUDNN(cudnnSetTensor4dDescriptor( input_tensor, tensor_format, data_type, batch_size, user_params->input_channels, user_params->input_h, user_params->input_w)); checkCUDNN(cudnnCreatePoolingDescriptor(&pool_desc)); cudnnPoolingMode_t mode; if (user_params->mode == POOLING_MAX) mode = CUDNN_POOLING_MAX; else if (user_params->mode == POOLING_AVERAGE_COUNT_INCLUDE_PADDING) mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING; else if (user_params->mode == POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) mode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; checkCUDNN(cudnnSetPooling2dDescriptor( pool_desc, mode, CUDNN_PROPAGATE_NAN, user_params->kernel_h, user_params->kernel_w, user_params->pad_h, user_params->pad_w, user_params->stride_y, user_params->stride_x)); int output_batch_size, output_channels, output_h, output_w; checkCUDNN(cudnnGetPooling2dForwardOutputDim( pool_desc, input_tensor, &output_batch_size, &output_channels, &output_h, &output_w)); checkCUDNN(cudnnSetTensor4dDescriptor(output_tensor, tensor_format, data_type, output_batch_size, output_channels, output_h, output_w)); output_size.N = output_batch_size, output_size.C = output_channels, output_size.H = output_h, output_size.W = output_w; } void PoolingLayerParams::allocateSpace(size_t &free_bytes) {} void ActivationLayerParams::initializeValues(ActivationDescriptor *user_params, cudnnDataType_t data_type, cudnnTensorFormat_t tensor_format, int batch_size, LayerDimension &output_size) { checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor)); checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type, batch_size, user_params->channels, user_params->h, user_params->w)); cudnnActivationMode_t mode; if (user_params->mode == SIGMOID) mode = CUDNN_ACTIVATION_SIGMOID; else if (user_params->mode == RELU) mode = CUDNN_ACTIVATION_RELU; else if (user_params->mode == TANH) mode = CUDNN_ACTIVATION_TANH; else if (user_params->mode == CLIPPED_RELU) mode = CUDNN_ACTIVATION_CLIPPED_RELU; else if (user_params->mode == ELU) mode = CUDNN_ACTIVATION_ELU; checkCUDNN(cudnnCreateActivationDescriptor(&actv_desc)); checkCUDNN(cudnnSetActivationDescriptor(actv_desc, mode, CUDNN_PROPAGATE_NAN, user_params->coef)); output_size.N = batch_size, output_size.C = user_params->channels, output_size.H = user_params->h, output_size.W = user_params->w; } void ActivationLayerParams::allocateSpace(size_t &free_bytes) {} void SoftmaxLayerParams::initializeValues(SoftmaxDescriptor *user_params, cudnnDataType_t data_type, cudnnTensorFormat_t tensor_format, int batch_size, LayerDimension &output_size) { if (user_params->algo == SOFTMAX_FAST) algo = CUDNN_SOFTMAX_FAST; else if (user_params->algo == SOFTMAX_ACCURATE) algo = CUDNN_SOFTMAX_ACCURATE; if (user_params->mode == SOFTMAX_MODE_INSTANCE) mode = CUDNN_SOFTMAX_MODE_INSTANCE; else if (user_params->mode == SOFTMAX_MODE_CHANNEL) { mode = CUDNN_SOFTMAX_MODE_CHANNEL; } checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor)); checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type, batch_size, user_params->channels, user_params->h, user_params->w)); output_size.N = batch_size, output_size.C = user_params->channels, output_size.H = user_params->h, output_size.W = user_params->w; } void SoftmaxLayerParams::allocateSpace(size_t &free_bytes) {}
af9f289fb621ff6b0c920307766c92b1517e9513.hip
// !!! This is a file automatically generated by hipify!!! #pragma once #include <vector> #include <string> #include <cassert> #include "Vector3.cuh.cu" namespace RayTracing { class Image { private: int m_width; int m_height; hipResourceDesc m_cudaTextureResourceDesc; hipTextureDesc m_cudaTextureDesc; hipArray *m_buffer_d = nullptr; public: std::vector<Color> buffer; hipTextureObject_t cudaTexture; public: Image(const std::string &fileName); template<bool isGPU> void Init() { assert(("Not implemented", false)); } template<bool isGPU, typename T> T GetResource() { assert(("Not implemented", false)); } Color GetColor(const float u, const float v) const; void Deinit(); }; };
af9f289fb621ff6b0c920307766c92b1517e9513.cu
#pragma once #include <vector> #include <string> #include <cassert> #include "Vector3.cuh.cu" namespace RayTracing { class Image { private: int m_width; int m_height; cudaResourceDesc m_cudaTextureResourceDesc; cudaTextureDesc m_cudaTextureDesc; cudaArray *m_buffer_d = nullptr; public: std::vector<Color> buffer; cudaTextureObject_t cudaTexture; public: Image(const std::string &fileName); template<bool isGPU> void Init() { assert(("Not implemented", false)); } template<bool isGPU, typename T> T GetResource() { assert(("Not implemented", false)); } Color GetColor(const float u, const float v) const; void Deinit(); }; };
f717a381aa289d4d44db510426cd63a6e7658cad.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <math_constants.h> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <c10/hip/HIPException.h> at::hip::HIPStreamMasqueradingAsCUDA stream() { return at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); }
f717a381aa289d4d44db510426cd63a6e7658cad.cu
#include <math.h> #include <math_constants.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <c10/cuda/CUDAException.h> at::cuda::CUDAStream stream() { return at::cuda::getCurrentCUDAStream(); }
459bb54800ee5bfaaf93efb284a76000f752019f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "sqr_mag_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *data = NULL; hipMalloc(&data, XSIZE*YSIZE); float *result = NULL; hipMalloc(&result, XSIZE*YSIZE); int total = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( sqr_mag_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data,result,total); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( sqr_mag_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data,result,total); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( sqr_mag_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data,result,total); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
459bb54800ee5bfaaf93efb284a76000f752019f.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "sqr_mag_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *data = NULL; cudaMalloc(&data, XSIZE*YSIZE); float *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); int total = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); sqr_mag_kernel<<<gridBlock,threadBlock>>>(data,result,total); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { sqr_mag_kernel<<<gridBlock,threadBlock>>>(data,result,total); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { sqr_mag_kernel<<<gridBlock,threadBlock>>>(data,result,total); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
04f641b5bb00b346a047e3fce3aac0a1620541f8.hip
// !!! This is a file automatically generated by hipify!!! //------------------------------------------------------------------------------------------ // // // Created on: 1/31/2015 // Author: Nghia Truong // //------------------------------------------------------------------------------------------ #include "helper_cuda.h" #include <math.h> #include "cutil_math_ext.h" #include "memory_manager.h" #include "definitions.h" #include "monitor.h" MemoryManager::MemoryManager(SimulationParameters& simParams): simParams_(simParams) { Monitor::recordEvent("Allocate memory"); // printf("total : %d\n", simParams.num_total_particle); // printf("sph: %d\n", simParams.num_sph_particle); // printf("pd: %d\n", simParams.num_pd_particle); // all particles size_t sizeREAL = sizeof(real_t) * simParams.num_sph_particle; size_t sizeInt = sizeof(int) * simParams.num_sph_particle; sizeMap[SPH_TIMESTEP] = sizeInt; typeMap[SPH_TIMESTEP] = INT32_TYPE; sizeMap[SPH_VALIDITY] = sizeInt; typeMap[SPH_VALIDITY] = INT32_TYPE; sizeMap[SPH_ACTIVITY] = sizeInt; typeMap[SPH_ACTIVITY] = INT32_TYPE; sizeMap[SPH_POSITION] = sizeREAL * 4; typeMap[SPH_POSITION] = REAL4_TYPE; sizeMap[SPH_VELOCITY] = sizeREAL * 4; typeMap[SPH_VELOCITY] = REAL4_TYPE; sizeMap[SPH_FORCE] = sizeREAL * 4; typeMap[SPH_FORCE] = REAL4_TYPE; sizeMap[SPH_SORTED_POSITION] = sizeREAL * 4; typeMap[SPH_SORTED_POSITION] = REAL4_TYPE; sizeMap[SPH_SORTED_VELOCITY] = sizeREAL * 4; typeMap[SPH_SORTED_VELOCITY] = REAL4_TYPE; sizeMap[SPH_SORTED_DENSITY] = sizeREAL; typeMap[SPH_SORTED_DENSITY] = REAL_TYPE; sizeMap[SPH_SORTED_DENSITY_NORMALIZED] = sizeREAL; typeMap[SPH_SORTED_DENSITY_NORMALIZED] = REAL_TYPE; sizeMap[SPH_SORTED_PRESSURE] = sizeREAL; typeMap[SPH_SORTED_PRESSURE] = REAL_TYPE; sizeREAL = sizeof(real_t) * (2 * (simParams.boundaryPlaneSize + 2) * (simParams.boundaryPlaneSize + 2) + 4 * simParams.boundaryPlaneSize * (simParams.boundaryPlaneSize + 1)); sizeMap[SPH_BOUNDARY_POSITION] = sizeREAL * 4; typeMap[SPH_BOUNDARY_POSITION] = REAL4_TYPE; // Peridynamics particles only sizeREAL = sizeof(real_t) * simParams.num_pd_particle; sizeInt = sizeof(int) * simParams.num_pd_particle; sizeMap[PD_ACTIVITY] = sizeInt; typeMap[PD_ACTIVITY] = INT32_TYPE; sizeMap[PD_POSITION] = sizeREAL * 4; typeMap[PD_POSITION] = REAL4_TYPE; sizeMap[PD_POSITION_BACKUP] = sizeREAL * 4; typeMap[PD_POSITION_BACKUP] = REAL4_TYPE; sizeMap[PD_VELOCITY] = sizeREAL * 4; typeMap[PD_VELOCITY] = REAL4_TYPE; sizeMap[PD_VELOCITY_BACKUP] = sizeREAL * 4; typeMap[PD_VELOCITY_BACKUP] = REAL4_TYPE; sizeMap[PD_FORCE] = sizeREAL * 4; typeMap[PD_FORCE] = REAL4_TYPE; sizeMap[PD_SORTED_POSITION] = sizeREAL * 4; typeMap[PD_SORTED_POSITION] = REAL4_TYPE; sizeMap[PD_SORTED_VELOCITY] = sizeREAL * 4; typeMap[PD_SORTED_VELOCITY] = REAL4_TYPE; sizeMap[PD_ORIGINAL_POSITION] = sizeREAL * 4; typeMap[PD_ORIGINAL_POSITION] = REAL4_TYPE; sizeMap[PD_ORIGINAL_STRETCH] = sizeREAL; typeMap[PD_ORIGINAL_STRETCH] = REAL4_TYPE; sizeMap[PD_ORIGINAL_BOND_LIST_TOP] = sizeInt; typeMap[PD_ORIGINAL_BOND_LIST_TOP] = INT32_TYPE; sizeMap[PD_STRETCH] = sizeREAL; typeMap[PD_STRETCH] = REAL_TYPE; sizeMap[PD_NEW_STRETCH] = sizeREAL; typeMap[PD_NEW_STRETCH] = REAL_TYPE; sizeMap[PD_BOND_LIST_TOP] = sizeInt; typeMap[PD_BOND_LIST_TOP] = INT32_TYPE; sizeMap[PD_BOND_LIST_TOP_BACKUP] = sizeInt; typeMap[PD_BOND_LIST_TOP_BACKUP] = INT32_TYPE; sizeMap[PD_BOND_LIST] = sizeInt * MAX_PD_BOND_COUNT; typeMap[PD_BOND_LIST] = INT32_TYPE; sizeMap[PD_CLIST] = sizeof(Clist) * simParams.num_clists; typeMap[PD_CLIST] = STRUCT; sizeMap[PD_SYSTEM_MATRIX] = simParams.num_pd_particle * MAX_PD_BOND_COUNT * SIZE_MAT_3X3; typeMap[PD_SYSTEM_MATRIX] = STRUCT; sizeMap[PD_SYSTEM_VECTOR] = sizeREAL * 4; typeMap[PD_SYSTEM_VECTOR] = REAL4_TYPE; sizeMap[PD_SYSTEM_SOLUTION] = sizeREAL * 4; typeMap[PD_SYSTEM_SOLUTION] = REAL4_TYPE; sizeMap[PD_HAS_BROKEN_BOND] = sizeof(int); typeMap[PD_HAS_BROKEN_BOND] = INT32_TYPE; // grid cells sizeInt = sizeof(int) * simParams.num_cells; sizeMap[CELL_PARTICLE_TYPE] = sizeInt; typeMap[CELL_PARTICLE_TYPE] = INT32_TYPE; sizeInt = sizeof(int) * simParams.num_sph_particle; sizeMap[SPH_PARTICLE_TO_CELL_HASH] = sizeInt; typeMap[SPH_PARTICLE_TO_CELL_HASH] = INT32_TYPE; sizeMap[SPH_PARTICLE_UNSORTED_INDEX] = sizeInt; typeMap[SPH_PARTICLE_UNSORTED_INDEX] = INT32_TYPE; sizeInt = sizeof(int) * simParams.num_cells; sizeMap[SPH_CELL_START_INDEX] = sizeInt; typeMap[SPH_CELL_START_INDEX] = INT32_TYPE; sizeMap[SPH_CELL_END_INDEX] = sizeInt; typeMap[SPH_CELL_END_INDEX] = INT32_TYPE; sizeInt = sizeof(int) * simParams.num_pd_particle; sizeMap[PD_PARTICLE_TO_CELL_HASH] = sizeInt; typeMap[PD_PARTICLE_TO_CELL_HASH] = INT32_TYPE; sizeMap[PD_PARTICLE_UNSORTED_INDEX] = sizeInt; typeMap[PD_PARTICLE_UNSORTED_INDEX] = INT32_TYPE; sizeInt = sizeof(int) * simParams.num_cells; sizeMap[PD_CELL_START_INDEX] = sizeInt; typeMap[PD_CELL_START_INDEX] = INT32_TYPE; sizeMap[PD_CELL_END_INDEX] = sizeInt; typeMap[PD_CELL_END_INDEX] = INT32_TYPE; TRUE_OR_DIE(sizeMap.size() == NUM_VARIABLES, "Ohh, you've omitted to initialize some variables...."); TRUE_OR_DIE(typeMap.size() == NUM_VARIABLES, "Ohh, you've omitted to initialize some variables...."); // map variable to string variable2NameMap[SPH_TIMESTEP] = "SPH_TIMESTEP"; variable2NameMap[SPH_VALIDITY] = "SPH_VALIDITY"; variable2NameMap[SPH_ACTIVITY] = "SPH_ACTIVITY"; variable2NameMap[SPH_POSITION] = "SPH_POSITION"; variable2NameMap[SPH_VELOCITY] = "SPH_VELOCITY"; variable2NameMap[SPH_FORCE] = "SPH_FORCE"; variable2NameMap[SPH_SORTED_POSITION] = "SPH_SORTED_POSITION"; variable2NameMap[SPH_SORTED_VELOCITY] = "SPH_SORTED_VELOCITY"; variable2NameMap[SPH_SORTED_DENSITY] = "SPH_SORTED_DENSITY"; variable2NameMap[SPH_SORTED_DENSITY_NORMALIZED] = "SPH_SORTED_NORMALIZED_DENSITY"; variable2NameMap[SPH_SORTED_PRESSURE] = "SPH_SORTED_PRESSURE"; variable2NameMap[PD_ACTIVITY] = "PD_ACTIVITY"; variable2NameMap[PD_POSITION] = "PD_POSITION"; variable2NameMap[PD_VELOCITY] = "PD_VELOCITY"; variable2NameMap[PD_FORCE] = "PD_FORCE"; variable2NameMap[PD_SORTED_POSITION] = "PD_SORTED_POSITION"; variable2NameMap[PD_SORTED_VELOCITY] = "PD_SORTED_VELOCITY"; variable2NameMap[PD_ORIGINAL_POSITION] = "PD_ORIGINAL_POSITION"; variable2NameMap[PD_ORIGINAL_STRETCH] = "PD_ORIGINAL_STRETCH"; variable2NameMap[PD_ORIGINAL_BOND_LIST_TOP] = "PD_ORIGINAL_BOND_COUNT"; variable2NameMap[PD_STRETCH] = "PD_STRETCH"; variable2NameMap[PD_BOND_LIST] = "PD_BOND_LIST"; variable2NameMap[PD_BOND_LIST_TOP] = "PD_BOND_LIST_TOP"; variable2NameMap[PD_BOND_LIST_TOP_BACKUP] = "PD_BOND_LIST_TOP_BACKUP"; variable2NameMap[PD_CLIST] = "PD_NEIGHBOR_LIST"; variable2NameMap[CELL_PARTICLE_TYPE] = "CELL_MIXED_PARTICLE"; // allocation memory allocateHostMemory(); allocateDeviceMemory(); } //------------------------------------------------------------------------------------------ MemoryManager::~MemoryManager() { for(std::map<Variables, void*>::iterator ptr = hostPointerMap.begin(); ptr != hostPointerMap.end(); ++ptr) { delete[] (ptr->second); } for(std::map<Variables, void*>::iterator ptr = devicePointerMap.begin(); ptr != devicePointerMap.end(); ++ptr) { freeDeviceArray(ptr->second); } } //------------------------------------------------------------------------------------------ void MemoryManager::uploadToDevice(MemoryManager::Variables _variable, bool _scale) { void* source = hostPointerMap[_variable]; void* dest = devicePointerMap[_variable]; size_t size = sizeMap[_variable]; if(_scale) { if(_variable == SPH_POSITION || _variable == PD_POSITION || _variable == SPH_SORTED_POSITION || _variable == PD_SORTED_POSITION || _variable == PD_ORIGINAL_POSITION || _variable == SPH_BOUNDARY_POSITION) { real4_t* parPos = (real4_t*) source; int numElements = sizeMap[_variable] / (4 * sizeof(real_t)); scaleParticle(parPos, numElements, 1.0 / simParams_.scaleFactor); } } checkCudaErrors(hipMemcpy(dest, source, size, hipMemcpyHostToDevice)); } //------------------------------------------------------------------------------------------ void MemoryManager::uploadAllArrayToDevice(bool _scale) { for(int i = 0; i < NUM_VARIABLES; ++i) { Variables variable = static_cast<Variables>(i); uploadToDevice(variable, _scale); } } //------------------------------------------------------------------------------------------ void MemoryManager::downloadFromDevice(MemoryManager::Variables _variable) { void* source = devicePointerMap[_variable]; void* dest = hostPointerMap[_variable]; size_t size = sizeMap[_variable]; checkCudaErrors(hipMemcpy(dest, source, size, hipMemcpyDeviceToHost)); // scale position, if needed // if(_scale) { if(_variable == SPH_POSITION || _variable == PD_POSITION || _variable == SPH_SORTED_POSITION || _variable == PD_SORTED_POSITION || _variable == PD_ORIGINAL_POSITION || _variable == SPH_BOUNDARY_POSITION) { real4_t* parPos = (real4_t*) dest; int numElements = sizeMap[_variable] / (4 * sizeof(real_t)); scaleParticle(parPos, numElements, simParams_.scaleFactor); } } } //------------------------------------------------------------------------------------------ void MemoryManager::downloadAllArrayFromDevice() { for(int i = 0; i < NUM_VARIABLES; ++i) { Variables variable = static_cast<Variables>(i); downloadFromDevice(variable); } } //------------------------------------------------------------------------------------------ void MemoryManager::printArray(MemoryManager::Variables _variable, int _size) { // download first downloadFromDevice(_variable); // then print printHostArray(_variable, _size); } //------------------------------------------------------------------------------------------ void MemoryManager::printHostArray(MemoryManager::Variables _variable, int _size) { VariableTypes type = typeMap[_variable]; int size = _size; std::cout << "==================== " << getVariableName(_variable) << " ====================" << std::endl; switch(type) { case REAL_TYPE: { if(size == 0) { size = sizeMap[_variable] / sizeof(real_t); } real_t* data = (real_t*)hostPointerMap[_variable]; for(int i = 0; i < size; ++i) { std::cout << "[" << i << "] " << std::scientific << data[i] << std::endl; } } break; case REAL4_TYPE: { if(size == 0) { size = sizeMap[_variable] / sizeof(real4_t); } real4_t* data = (real4_t*)hostPointerMap[_variable]; for(int i = 0; i < size; ++i) { std::cout << "[" << i << "] " << std::scientific << data[i].x << ", " << data[i].y << ", " << data[i].z << std::endl; } } break; case INT32_TYPE: { if(size == 0) { size = sizeMap[_variable] / sizeof(int32_t); } int* data = (int*) hostPointerMap[_variable]; for(int i = 0; i < size; ++i) { std::cout << "[" << i << "] " << data[i] << std::endl; } } break; case STRUCT: { if(size == 0) { size = sizeMap[_variable] / sizeof(Clist); } Clist* data = (Clist*) hostPointerMap[_variable]; for(int i = 0; i < size; ++i) { std::cout << "[" << i << "] (" << data[i].plist_top + 1 << ") "; for(int j = 0; j < data[i].plist_top + 1; ++j) { std::cout << data[i].plist[j] << ", "; } std::cout << std::endl; } } break; } } //------------------------------------------------------------------------------------------ void MemoryManager::printPositiveIntegerArray(MemoryManager::Variables _variable, int _size) { // then print VariableTypes type = typeMap[_variable]; if(type != INT32_TYPE) { return; } downloadFromDevice(_variable); int size = _size; std::cout << "==================== " << getVariableName(_variable) << " ====================" << std::endl; if(size == 0) { size = sizeMap[_variable] / sizeof(int); } int* data = (int*) hostPointerMap[_variable]; for(int i = 0; i < size; ++i) { if(data[i] > 0) { std::cout << "[" << i << "] " << data[i] << std::endl; } } } //------------------------------------------------------------------------------------------ void* MemoryManager::getHostPointer(MemoryManager::Variables _variable) { return hostPointerMap[_variable]; } //------------------------------------------------------------------------------------------ void* MemoryManager::getDevicePointer(MemoryManager::Variables _variable) { return devicePointerMap[_variable]; } //------------------------------------------------------------------------------------------ size_t MemoryManager::getArraySize(MemoryManager::Variables _variable) { return sizeMap[_variable]; } //------------------------------------------------------------------------------------------ char* MemoryManager::getVariableName(MemoryManager::Variables _variable) { return variable2NameMap[_variable]; } //------------------------------------------------------------------------------------------ void MemoryManager::countMemory() { size_t totalMemory = 0; for(std::map<Variables, size_t>::iterator ptr = sizeMap.begin(); ptr != sizeMap.end(); ++ptr) { size_t size = ptr->second; if(size == 0) { continue; } totalMemory += size; } std::cout << Monitor::PADDING << "Total memory allocation: " << totalMemory / 1048576 << "(MB)" << std::endl; } //------------------------------------------------------------------------------------------ void MemoryManager::backupBondListTopIndex() { checkCudaErrors(hipMemcpy(devicePointerMap[PD_BOND_LIST_TOP_BACKUP], devicePointerMap[PD_BOND_LIST_TOP], sizeMap[PD_BOND_LIST_TOP], hipMemcpyDeviceToDevice)); } //------------------------------------------------------------------------------------------ void MemoryManager::restoreBondListTopIndex() { checkCudaErrors(hipMemcpy(devicePointerMap[PD_BOND_LIST_TOP], devicePointerMap[PD_BOND_LIST_TOP_BACKUP], sizeMap[PD_BOND_LIST_TOP], hipMemcpyDeviceToDevice)); } //------------------------------------------------------------------------------------------ void MemoryManager::backupPDPosition() { checkCudaErrors(hipMemcpy(devicePointerMap[PD_POSITION_BACKUP], devicePointerMap[PD_POSITION], sizeMap[PD_POSITION], hipMemcpyDeviceToDevice)); } //------------------------------------------------------------------------------------------ void MemoryManager::restorePDPosition() { checkCudaErrors(hipMemcpy(devicePointerMap[PD_POSITION], devicePointerMap[PD_POSITION_BACKUP], sizeMap[PD_POSITION], hipMemcpyDeviceToDevice)); } //------------------------------------------------------------------------------------------ void MemoryManager::backupPDVelocity() { checkCudaErrors(hipMemcpy(devicePointerMap[PD_VELOCITY_BACKUP], devicePointerMap[PD_VELOCITY], sizeMap[PD_VELOCITY], hipMemcpyDeviceToDevice)); } //------------------------------------------------------------------------------------------ void MemoryManager::restorePDVelocity() { checkCudaErrors(hipMemcpy(devicePointerMap[PD_VELOCITY], devicePointerMap[PD_VELOCITY_BACKUP], sizeMap[PD_VELOCITY], hipMemcpyDeviceToDevice)); } //------------------------------------------------------------------------------------------ void MemoryManager::transferData(MemoryManager::Variables dest_var, MemoryManager::Variables source_var) { void* source = devicePointerMap[source_var]; void* dest = devicePointerMap[dest_var]; size_t size = sizeMap[source_var]; checkCudaErrors(hipMemcpy(dest, source, size, hipMemcpyDeviceToDevice)); } //------------------------------------------------------------------------------------------ void MemoryManager::allocateHostMemory() { for(std::map<Variables, size_t>::iterator ptr = sizeMap.begin(); ptr != sizeMap.end(); ++ptr) { Variables variable = ptr->first; size_t size = ptr->second; if(size == 0) { continue; } hostPointerMap[variable] = malloc(size); } } //------------------------------------------------------------------------------------------ void MemoryManager::allocateDeviceMemory() { for(std::map<Variables, size_t>::iterator ptr = sizeMap.begin(); ptr != sizeMap.end(); ++ptr) { Variables variable = ptr->first; size_t size = ptr->second; if(size == 0) { continue; } void* devPtr; allocateDeviceArray((void**)&devPtr, size); devicePointerMap[variable] = devPtr; // std::cout << "alloc dev " << variable << " size " << size << ", p " << devPtr << // std::endl; } } //------------------------------------------------------------------------------------------ void MemoryManager::scaleParticle(real4_t* _parPos, int _numParticles, real_t _scaleFactor) { for(int i = 0; i < _numParticles; ++i) { real4_t pos = _parPos[i]; pos.x *= _scaleFactor; pos.y *= _scaleFactor; pos.z *= _scaleFactor; _parPos[i] = pos; } }
04f641b5bb00b346a047e3fce3aac0a1620541f8.cu
//------------------------------------------------------------------------------------------ // // // Created on: 1/31/2015 // Author: Nghia Truong // //------------------------------------------------------------------------------------------ #include "helper_cuda.h" #include <math.h> #include "cutil_math_ext.h" #include "memory_manager.h" #include "definitions.h" #include "monitor.h" MemoryManager::MemoryManager(SimulationParameters& simParams): simParams_(simParams) { Monitor::recordEvent("Allocate memory"); // printf("total : %d\n", simParams.num_total_particle); // printf("sph: %d\n", simParams.num_sph_particle); // printf("pd: %d\n", simParams.num_pd_particle); // all particles size_t sizeREAL = sizeof(real_t) * simParams.num_sph_particle; size_t sizeInt = sizeof(int) * simParams.num_sph_particle; sizeMap[SPH_TIMESTEP] = sizeInt; typeMap[SPH_TIMESTEP] = INT32_TYPE; sizeMap[SPH_VALIDITY] = sizeInt; typeMap[SPH_VALIDITY] = INT32_TYPE; sizeMap[SPH_ACTIVITY] = sizeInt; typeMap[SPH_ACTIVITY] = INT32_TYPE; sizeMap[SPH_POSITION] = sizeREAL * 4; typeMap[SPH_POSITION] = REAL4_TYPE; sizeMap[SPH_VELOCITY] = sizeREAL * 4; typeMap[SPH_VELOCITY] = REAL4_TYPE; sizeMap[SPH_FORCE] = sizeREAL * 4; typeMap[SPH_FORCE] = REAL4_TYPE; sizeMap[SPH_SORTED_POSITION] = sizeREAL * 4; typeMap[SPH_SORTED_POSITION] = REAL4_TYPE; sizeMap[SPH_SORTED_VELOCITY] = sizeREAL * 4; typeMap[SPH_SORTED_VELOCITY] = REAL4_TYPE; sizeMap[SPH_SORTED_DENSITY] = sizeREAL; typeMap[SPH_SORTED_DENSITY] = REAL_TYPE; sizeMap[SPH_SORTED_DENSITY_NORMALIZED] = sizeREAL; typeMap[SPH_SORTED_DENSITY_NORMALIZED] = REAL_TYPE; sizeMap[SPH_SORTED_PRESSURE] = sizeREAL; typeMap[SPH_SORTED_PRESSURE] = REAL_TYPE; sizeREAL = sizeof(real_t) * (2 * (simParams.boundaryPlaneSize + 2) * (simParams.boundaryPlaneSize + 2) + 4 * simParams.boundaryPlaneSize * (simParams.boundaryPlaneSize + 1)); sizeMap[SPH_BOUNDARY_POSITION] = sizeREAL * 4; typeMap[SPH_BOUNDARY_POSITION] = REAL4_TYPE; // Peridynamics particles only sizeREAL = sizeof(real_t) * simParams.num_pd_particle; sizeInt = sizeof(int) * simParams.num_pd_particle; sizeMap[PD_ACTIVITY] = sizeInt; typeMap[PD_ACTIVITY] = INT32_TYPE; sizeMap[PD_POSITION] = sizeREAL * 4; typeMap[PD_POSITION] = REAL4_TYPE; sizeMap[PD_POSITION_BACKUP] = sizeREAL * 4; typeMap[PD_POSITION_BACKUP] = REAL4_TYPE; sizeMap[PD_VELOCITY] = sizeREAL * 4; typeMap[PD_VELOCITY] = REAL4_TYPE; sizeMap[PD_VELOCITY_BACKUP] = sizeREAL * 4; typeMap[PD_VELOCITY_BACKUP] = REAL4_TYPE; sizeMap[PD_FORCE] = sizeREAL * 4; typeMap[PD_FORCE] = REAL4_TYPE; sizeMap[PD_SORTED_POSITION] = sizeREAL * 4; typeMap[PD_SORTED_POSITION] = REAL4_TYPE; sizeMap[PD_SORTED_VELOCITY] = sizeREAL * 4; typeMap[PD_SORTED_VELOCITY] = REAL4_TYPE; sizeMap[PD_ORIGINAL_POSITION] = sizeREAL * 4; typeMap[PD_ORIGINAL_POSITION] = REAL4_TYPE; sizeMap[PD_ORIGINAL_STRETCH] = sizeREAL; typeMap[PD_ORIGINAL_STRETCH] = REAL4_TYPE; sizeMap[PD_ORIGINAL_BOND_LIST_TOP] = sizeInt; typeMap[PD_ORIGINAL_BOND_LIST_TOP] = INT32_TYPE; sizeMap[PD_STRETCH] = sizeREAL; typeMap[PD_STRETCH] = REAL_TYPE; sizeMap[PD_NEW_STRETCH] = sizeREAL; typeMap[PD_NEW_STRETCH] = REAL_TYPE; sizeMap[PD_BOND_LIST_TOP] = sizeInt; typeMap[PD_BOND_LIST_TOP] = INT32_TYPE; sizeMap[PD_BOND_LIST_TOP_BACKUP] = sizeInt; typeMap[PD_BOND_LIST_TOP_BACKUP] = INT32_TYPE; sizeMap[PD_BOND_LIST] = sizeInt * MAX_PD_BOND_COUNT; typeMap[PD_BOND_LIST] = INT32_TYPE; sizeMap[PD_CLIST] = sizeof(Clist) * simParams.num_clists; typeMap[PD_CLIST] = STRUCT; sizeMap[PD_SYSTEM_MATRIX] = simParams.num_pd_particle * MAX_PD_BOND_COUNT * SIZE_MAT_3X3; typeMap[PD_SYSTEM_MATRIX] = STRUCT; sizeMap[PD_SYSTEM_VECTOR] = sizeREAL * 4; typeMap[PD_SYSTEM_VECTOR] = REAL4_TYPE; sizeMap[PD_SYSTEM_SOLUTION] = sizeREAL * 4; typeMap[PD_SYSTEM_SOLUTION] = REAL4_TYPE; sizeMap[PD_HAS_BROKEN_BOND] = sizeof(int); typeMap[PD_HAS_BROKEN_BOND] = INT32_TYPE; // grid cells sizeInt = sizeof(int) * simParams.num_cells; sizeMap[CELL_PARTICLE_TYPE] = sizeInt; typeMap[CELL_PARTICLE_TYPE] = INT32_TYPE; sizeInt = sizeof(int) * simParams.num_sph_particle; sizeMap[SPH_PARTICLE_TO_CELL_HASH] = sizeInt; typeMap[SPH_PARTICLE_TO_CELL_HASH] = INT32_TYPE; sizeMap[SPH_PARTICLE_UNSORTED_INDEX] = sizeInt; typeMap[SPH_PARTICLE_UNSORTED_INDEX] = INT32_TYPE; sizeInt = sizeof(int) * simParams.num_cells; sizeMap[SPH_CELL_START_INDEX] = sizeInt; typeMap[SPH_CELL_START_INDEX] = INT32_TYPE; sizeMap[SPH_CELL_END_INDEX] = sizeInt; typeMap[SPH_CELL_END_INDEX] = INT32_TYPE; sizeInt = sizeof(int) * simParams.num_pd_particle; sizeMap[PD_PARTICLE_TO_CELL_HASH] = sizeInt; typeMap[PD_PARTICLE_TO_CELL_HASH] = INT32_TYPE; sizeMap[PD_PARTICLE_UNSORTED_INDEX] = sizeInt; typeMap[PD_PARTICLE_UNSORTED_INDEX] = INT32_TYPE; sizeInt = sizeof(int) * simParams.num_cells; sizeMap[PD_CELL_START_INDEX] = sizeInt; typeMap[PD_CELL_START_INDEX] = INT32_TYPE; sizeMap[PD_CELL_END_INDEX] = sizeInt; typeMap[PD_CELL_END_INDEX] = INT32_TYPE; TRUE_OR_DIE(sizeMap.size() == NUM_VARIABLES, "Ohh, you've omitted to initialize some variables...."); TRUE_OR_DIE(typeMap.size() == NUM_VARIABLES, "Ohh, you've omitted to initialize some variables...."); // map variable to string variable2NameMap[SPH_TIMESTEP] = "SPH_TIMESTEP"; variable2NameMap[SPH_VALIDITY] = "SPH_VALIDITY"; variable2NameMap[SPH_ACTIVITY] = "SPH_ACTIVITY"; variable2NameMap[SPH_POSITION] = "SPH_POSITION"; variable2NameMap[SPH_VELOCITY] = "SPH_VELOCITY"; variable2NameMap[SPH_FORCE] = "SPH_FORCE"; variable2NameMap[SPH_SORTED_POSITION] = "SPH_SORTED_POSITION"; variable2NameMap[SPH_SORTED_VELOCITY] = "SPH_SORTED_VELOCITY"; variable2NameMap[SPH_SORTED_DENSITY] = "SPH_SORTED_DENSITY"; variable2NameMap[SPH_SORTED_DENSITY_NORMALIZED] = "SPH_SORTED_NORMALIZED_DENSITY"; variable2NameMap[SPH_SORTED_PRESSURE] = "SPH_SORTED_PRESSURE"; variable2NameMap[PD_ACTIVITY] = "PD_ACTIVITY"; variable2NameMap[PD_POSITION] = "PD_POSITION"; variable2NameMap[PD_VELOCITY] = "PD_VELOCITY"; variable2NameMap[PD_FORCE] = "PD_FORCE"; variable2NameMap[PD_SORTED_POSITION] = "PD_SORTED_POSITION"; variable2NameMap[PD_SORTED_VELOCITY] = "PD_SORTED_VELOCITY"; variable2NameMap[PD_ORIGINAL_POSITION] = "PD_ORIGINAL_POSITION"; variable2NameMap[PD_ORIGINAL_STRETCH] = "PD_ORIGINAL_STRETCH"; variable2NameMap[PD_ORIGINAL_BOND_LIST_TOP] = "PD_ORIGINAL_BOND_COUNT"; variable2NameMap[PD_STRETCH] = "PD_STRETCH"; variable2NameMap[PD_BOND_LIST] = "PD_BOND_LIST"; variable2NameMap[PD_BOND_LIST_TOP] = "PD_BOND_LIST_TOP"; variable2NameMap[PD_BOND_LIST_TOP_BACKUP] = "PD_BOND_LIST_TOP_BACKUP"; variable2NameMap[PD_CLIST] = "PD_NEIGHBOR_LIST"; variable2NameMap[CELL_PARTICLE_TYPE] = "CELL_MIXED_PARTICLE"; // allocation memory allocateHostMemory(); allocateDeviceMemory(); } //------------------------------------------------------------------------------------------ MemoryManager::~MemoryManager() { for(std::map<Variables, void*>::iterator ptr = hostPointerMap.begin(); ptr != hostPointerMap.end(); ++ptr) { delete[] (ptr->second); } for(std::map<Variables, void*>::iterator ptr = devicePointerMap.begin(); ptr != devicePointerMap.end(); ++ptr) { freeDeviceArray(ptr->second); } } //------------------------------------------------------------------------------------------ void MemoryManager::uploadToDevice(MemoryManager::Variables _variable, bool _scale) { void* source = hostPointerMap[_variable]; void* dest = devicePointerMap[_variable]; size_t size = sizeMap[_variable]; if(_scale) { if(_variable == SPH_POSITION || _variable == PD_POSITION || _variable == SPH_SORTED_POSITION || _variable == PD_SORTED_POSITION || _variable == PD_ORIGINAL_POSITION || _variable == SPH_BOUNDARY_POSITION) { real4_t* parPos = (real4_t*) source; int numElements = sizeMap[_variable] / (4 * sizeof(real_t)); scaleParticle(parPos, numElements, 1.0 / simParams_.scaleFactor); } } checkCudaErrors(cudaMemcpy(dest, source, size, cudaMemcpyHostToDevice)); } //------------------------------------------------------------------------------------------ void MemoryManager::uploadAllArrayToDevice(bool _scale) { for(int i = 0; i < NUM_VARIABLES; ++i) { Variables variable = static_cast<Variables>(i); uploadToDevice(variable, _scale); } } //------------------------------------------------------------------------------------------ void MemoryManager::downloadFromDevice(MemoryManager::Variables _variable) { void* source = devicePointerMap[_variable]; void* dest = hostPointerMap[_variable]; size_t size = sizeMap[_variable]; checkCudaErrors(cudaMemcpy(dest, source, size, cudaMemcpyDeviceToHost)); // scale position, if needed // if(_scale) { if(_variable == SPH_POSITION || _variable == PD_POSITION || _variable == SPH_SORTED_POSITION || _variable == PD_SORTED_POSITION || _variable == PD_ORIGINAL_POSITION || _variable == SPH_BOUNDARY_POSITION) { real4_t* parPos = (real4_t*) dest; int numElements = sizeMap[_variable] / (4 * sizeof(real_t)); scaleParticle(parPos, numElements, simParams_.scaleFactor); } } } //------------------------------------------------------------------------------------------ void MemoryManager::downloadAllArrayFromDevice() { for(int i = 0; i < NUM_VARIABLES; ++i) { Variables variable = static_cast<Variables>(i); downloadFromDevice(variable); } } //------------------------------------------------------------------------------------------ void MemoryManager::printArray(MemoryManager::Variables _variable, int _size) { // download first downloadFromDevice(_variable); // then print printHostArray(_variable, _size); } //------------------------------------------------------------------------------------------ void MemoryManager::printHostArray(MemoryManager::Variables _variable, int _size) { VariableTypes type = typeMap[_variable]; int size = _size; std::cout << "==================== " << getVariableName(_variable) << " ====================" << std::endl; switch(type) { case REAL_TYPE: { if(size == 0) { size = sizeMap[_variable] / sizeof(real_t); } real_t* data = (real_t*)hostPointerMap[_variable]; for(int i = 0; i < size; ++i) { std::cout << "[" << i << "] " << std::scientific << data[i] << std::endl; } } break; case REAL4_TYPE: { if(size == 0) { size = sizeMap[_variable] / sizeof(real4_t); } real4_t* data = (real4_t*)hostPointerMap[_variable]; for(int i = 0; i < size; ++i) { std::cout << "[" << i << "] " << std::scientific << data[i].x << ", " << data[i].y << ", " << data[i].z << std::endl; } } break; case INT32_TYPE: { if(size == 0) { size = sizeMap[_variable] / sizeof(int32_t); } int* data = (int*) hostPointerMap[_variable]; for(int i = 0; i < size; ++i) { std::cout << "[" << i << "] " << data[i] << std::endl; } } break; case STRUCT: { if(size == 0) { size = sizeMap[_variable] / sizeof(Clist); } Clist* data = (Clist*) hostPointerMap[_variable]; for(int i = 0; i < size; ++i) { std::cout << "[" << i << "] (" << data[i].plist_top + 1 << ") "; for(int j = 0; j < data[i].plist_top + 1; ++j) { std::cout << data[i].plist[j] << ", "; } std::cout << std::endl; } } break; } } //------------------------------------------------------------------------------------------ void MemoryManager::printPositiveIntegerArray(MemoryManager::Variables _variable, int _size) { // then print VariableTypes type = typeMap[_variable]; if(type != INT32_TYPE) { return; } downloadFromDevice(_variable); int size = _size; std::cout << "==================== " << getVariableName(_variable) << " ====================" << std::endl; if(size == 0) { size = sizeMap[_variable] / sizeof(int); } int* data = (int*) hostPointerMap[_variable]; for(int i = 0; i < size; ++i) { if(data[i] > 0) { std::cout << "[" << i << "] " << data[i] << std::endl; } } } //------------------------------------------------------------------------------------------ void* MemoryManager::getHostPointer(MemoryManager::Variables _variable) { return hostPointerMap[_variable]; } //------------------------------------------------------------------------------------------ void* MemoryManager::getDevicePointer(MemoryManager::Variables _variable) { return devicePointerMap[_variable]; } //------------------------------------------------------------------------------------------ size_t MemoryManager::getArraySize(MemoryManager::Variables _variable) { return sizeMap[_variable]; } //------------------------------------------------------------------------------------------ char* MemoryManager::getVariableName(MemoryManager::Variables _variable) { return variable2NameMap[_variable]; } //------------------------------------------------------------------------------------------ void MemoryManager::countMemory() { size_t totalMemory = 0; for(std::map<Variables, size_t>::iterator ptr = sizeMap.begin(); ptr != sizeMap.end(); ++ptr) { size_t size = ptr->second; if(size == 0) { continue; } totalMemory += size; } std::cout << Monitor::PADDING << "Total memory allocation: " << totalMemory / 1048576 << "(MB)" << std::endl; } //------------------------------------------------------------------------------------------ void MemoryManager::backupBondListTopIndex() { checkCudaErrors(cudaMemcpy(devicePointerMap[PD_BOND_LIST_TOP_BACKUP], devicePointerMap[PD_BOND_LIST_TOP], sizeMap[PD_BOND_LIST_TOP], cudaMemcpyDeviceToDevice)); } //------------------------------------------------------------------------------------------ void MemoryManager::restoreBondListTopIndex() { checkCudaErrors(cudaMemcpy(devicePointerMap[PD_BOND_LIST_TOP], devicePointerMap[PD_BOND_LIST_TOP_BACKUP], sizeMap[PD_BOND_LIST_TOP], cudaMemcpyDeviceToDevice)); } //------------------------------------------------------------------------------------------ void MemoryManager::backupPDPosition() { checkCudaErrors(cudaMemcpy(devicePointerMap[PD_POSITION_BACKUP], devicePointerMap[PD_POSITION], sizeMap[PD_POSITION], cudaMemcpyDeviceToDevice)); } //------------------------------------------------------------------------------------------ void MemoryManager::restorePDPosition() { checkCudaErrors(cudaMemcpy(devicePointerMap[PD_POSITION], devicePointerMap[PD_POSITION_BACKUP], sizeMap[PD_POSITION], cudaMemcpyDeviceToDevice)); } //------------------------------------------------------------------------------------------ void MemoryManager::backupPDVelocity() { checkCudaErrors(cudaMemcpy(devicePointerMap[PD_VELOCITY_BACKUP], devicePointerMap[PD_VELOCITY], sizeMap[PD_VELOCITY], cudaMemcpyDeviceToDevice)); } //------------------------------------------------------------------------------------------ void MemoryManager::restorePDVelocity() { checkCudaErrors(cudaMemcpy(devicePointerMap[PD_VELOCITY], devicePointerMap[PD_VELOCITY_BACKUP], sizeMap[PD_VELOCITY], cudaMemcpyDeviceToDevice)); } //------------------------------------------------------------------------------------------ void MemoryManager::transferData(MemoryManager::Variables dest_var, MemoryManager::Variables source_var) { void* source = devicePointerMap[source_var]; void* dest = devicePointerMap[dest_var]; size_t size = sizeMap[source_var]; checkCudaErrors(cudaMemcpy(dest, source, size, cudaMemcpyDeviceToDevice)); } //------------------------------------------------------------------------------------------ void MemoryManager::allocateHostMemory() { for(std::map<Variables, size_t>::iterator ptr = sizeMap.begin(); ptr != sizeMap.end(); ++ptr) { Variables variable = ptr->first; size_t size = ptr->second; if(size == 0) { continue; } hostPointerMap[variable] = malloc(size); } } //------------------------------------------------------------------------------------------ void MemoryManager::allocateDeviceMemory() { for(std::map<Variables, size_t>::iterator ptr = sizeMap.begin(); ptr != sizeMap.end(); ++ptr) { Variables variable = ptr->first; size_t size = ptr->second; if(size == 0) { continue; } void* devPtr; allocateDeviceArray((void**)&devPtr, size); devicePointerMap[variable] = devPtr; // std::cout << "alloc dev " << variable << " size " << size << ", p " << devPtr << // std::endl; } } //------------------------------------------------------------------------------------------ void MemoryManager::scaleParticle(real4_t* _parPos, int _numParticles, real_t _scaleFactor) { for(int i = 0; i < _numParticles; ++i) { real4_t pos = _parPos[i]; pos.x *= _scaleFactor; pos.y *= _scaleFactor; pos.z *= _scaleFactor; _parPos[i] = pos; } }
56a611aaf1d8a9300a718a6e9af80edd084c61b9.hip
// !!! This is a file automatically generated by hipify!!! // CUDA Device Query #include <stdio.h> // Print device properties void printDevProp(hipDeviceProp_t devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u\n", devProp.totalGlobalMem); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Shared mem per mp: %u\n", devProp.sharedMemPerBlock); printf("Registers per mp: %u\n", devProp.regsPerBlock); return; } int main() { // Number of CUDA devices int devCount; hipGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, i); printDevProp(devProp); } printf("\nPress any key to exit..."); char c; scanf("%c", &c); return 0; }
56a611aaf1d8a9300a718a6e9af80edd084c61b9.cu
// CUDA Device Query #include <stdio.h> // Print device properties void printDevProp(cudaDeviceProp devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u\n", devProp.totalGlobalMem); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Shared mem per mp: %u\n", devProp.sharedMemPerBlock); printf("Registers per mp: %u\n", devProp.regsPerBlock); return; } int main() { // Number of CUDA devices int devCount; cudaGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); printDevProp(devProp); } printf("\nPress any key to exit..."); char c; scanf("%c", &c); return 0; }
b518a148b00b3ec87a0ad34807d164e911e9ff83.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "MaxPoolLayer.cuh" uint32 mycnn::MaxPoolLayer::getKernelSize() { return kernelSize; } cnn::gpu::GpuBuffer* mycnn::MaxPoolLayer::getOutput() { return output; } mycnn::MaxPoolLayer::MaxPoolLayer(uint32 kernelSize, uint32 inputWidht, uint32 inputHeight, uint32 inputSize) { output=new cnn::gpu::GpuBuffer(); errorRates=new cnn::gpu::GpuBuffer(); this->kernelSize=kernelSize; this->inputSize=inputSize; this->inputWidth=inputWidht; this->inputHeight=inputHeight; output->allocate(getOutputSize()*sizeof(float)); errorRates->allocate(inputSize*sizeof(float)); } mycnn::MaxPoolLayer::~MaxPoolLayer() { output->free(); errorRates->free(); delete output; delete errorRates; } uint32 mycnn::MaxPoolLayer::getOutputWidth() { return inputWidth/kernelSize; } uint32 mycnn::MaxPoolLayer::getInputImagesCount() { return inputSize/(inputWidth*inputHeight); } uint32 mycnn::MaxPoolLayer::getOutputHeight() { return inputHeight/kernelSize; } uint32 mycnn::MaxPoolLayer::getOutputSize() { return getOutputWidth()*getOutputHeight()*getInputImagesCount(); } void mycnn::MaxPoolLayer::pooling(cnn::gpu::GpuBuffer* input, uint32 offset) { int blockSize=getOutputHeight()*getOutputWidth(); dim3 block(getOutputWidth(), getOutputHeight());//blok - jeden kernel z jednym obrazem hipLaunchKernelGGL(( cnn::cuda::maxPooling), dim3(getInputImagesCount()), dim3(block), 0, 0, input->getDataPtr<float>(), output->getDataPtr<float>(), kernelSize, inputWidth, inputHeight, getOutputWidth(), getOutputHeight(), errorRates->getDataPtr<float>()); } uint32 mycnn::MaxPoolLayer::getImageSize() { return getOutputHeight()*getOutputWidth(); } void mycnn::MaxPoolLayer::run(cnn::gpu::GpuBuffer* buffer, uint32 offset) { pooling(buffer, offset); } void mycnn::MaxPoolLayer::teach(cnn::gpu::GpuBuffer* errorProp, cnn::gpu::GpuBuffer* input, float learningRate, uint32 offset) { int blockSize=getOutputHeight()*getOutputWidth(); dim3 block(getOutputWidth(), getOutputHeight());//blok - jeden kernel z jednym obrazem hipLaunchKernelGGL(( cnn::cuda::maxError), dim3(getInputImagesCount()), dim3(block), 0, 0, errorRates->getDataPtr<float>(), output->getDataPtr<float>(), kernelSize, inputWidth, inputHeight, getOutputWidth(), getOutputHeight(), errorProp->getDataPtr<float>()); } cnn::gpu::GpuBuffer* mycnn::MaxPoolLayer::getError() { return errorRates; } void mycnn::MaxPoolLayer::updateWeights() {} void mycnn::MaxPoolLayer::resetWeightsUpdates() {}
b518a148b00b3ec87a0ad34807d164e911e9ff83.cu
#include "MaxPoolLayer.cuh" uint32 mycnn::MaxPoolLayer::getKernelSize() { return kernelSize; } cnn::gpu::GpuBuffer* mycnn::MaxPoolLayer::getOutput() { return output; } mycnn::MaxPoolLayer::MaxPoolLayer(uint32 kernelSize, uint32 inputWidht, uint32 inputHeight, uint32 inputSize) { output=new cnn::gpu::GpuBuffer(); errorRates=new cnn::gpu::GpuBuffer(); this->kernelSize=kernelSize; this->inputSize=inputSize; this->inputWidth=inputWidht; this->inputHeight=inputHeight; output->allocate(getOutputSize()*sizeof(float)); errorRates->allocate(inputSize*sizeof(float)); } mycnn::MaxPoolLayer::~MaxPoolLayer() { output->free(); errorRates->free(); delete output; delete errorRates; } uint32 mycnn::MaxPoolLayer::getOutputWidth() { return inputWidth/kernelSize; } uint32 mycnn::MaxPoolLayer::getInputImagesCount() { return inputSize/(inputWidth*inputHeight); } uint32 mycnn::MaxPoolLayer::getOutputHeight() { return inputHeight/kernelSize; } uint32 mycnn::MaxPoolLayer::getOutputSize() { return getOutputWidth()*getOutputHeight()*getInputImagesCount(); } void mycnn::MaxPoolLayer::pooling(cnn::gpu::GpuBuffer* input, uint32 offset) { int blockSize=getOutputHeight()*getOutputWidth(); dim3 block(getOutputWidth(), getOutputHeight());//blok - jeden kernel z jednym obrazem cnn::cuda::maxPooling<<<getInputImagesCount(), block>>>(input->getDataPtr<float>(), output->getDataPtr<float>(), kernelSize, inputWidth, inputHeight, getOutputWidth(), getOutputHeight(), errorRates->getDataPtr<float>()); } uint32 mycnn::MaxPoolLayer::getImageSize() { return getOutputHeight()*getOutputWidth(); } void mycnn::MaxPoolLayer::run(cnn::gpu::GpuBuffer* buffer, uint32 offset) { pooling(buffer, offset); } void mycnn::MaxPoolLayer::teach(cnn::gpu::GpuBuffer* errorProp, cnn::gpu::GpuBuffer* input, float learningRate, uint32 offset) { int blockSize=getOutputHeight()*getOutputWidth(); dim3 block(getOutputWidth(), getOutputHeight());//blok - jeden kernel z jednym obrazem cnn::cuda::maxError<<<getInputImagesCount(), block>>>(errorRates->getDataPtr<float>(), output->getDataPtr<float>(), kernelSize, inputWidth, inputHeight, getOutputWidth(), getOutputHeight(), errorProp->getDataPtr<float>()); } cnn::gpu::GpuBuffer* mycnn::MaxPoolLayer::getError() { return errorRates; } void mycnn::MaxPoolLayer::updateWeights() {} void mycnn::MaxPoolLayer::resetWeightsUpdates() {}
74bd1c1f7f159e28ad9ccf82cd1b561f8157bd6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * Carlos Roman Rivera - A01700820 * * Programming Languages - Cuda Quiz * */ #include <stdio.h> #define N 9 #define K N/3 #define ThreadsPerBlock K #define NumBlocks K __global__ void compress(float *mat, int n, float *comp, int k){ int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x + blockIdx.x * blockDim.x; if (row < k && col < k) { comp[col + row * k] = 0; for (int i_row = 0 ; i_row < k ; i_row++) { for (int j_col = 0 ; j_col < k ; j_col++) { comp[col + row * k] += mat[(col + j_col) + (row + i_row) * n]; } } } } void print_mat(float *mat, int n){ for (int i = 0; i < n; i++){ for (int j = 0; j < n; j++){ printf("%.1f\t", mat[i*n+j]); } printf("\n"); } printf("\n"); } void fill_mat(float *mat, int n){ int c = 0; for (int i = 0; i < n; i++){ for (int j = 0; j < n; j++){ mat[i*n+j] = c++; } } } int main(){ float *h_compress, *h_matrix; float *d_compress, *d_matrix; h_compress = (float *)malloc(sizeof(float) * K * K); h_matrix = (float *)malloc(sizeof(float) * N * N); fill_mat(h_matrix, N); fill_mat(h_compress, K); printf("Input matrix:\n"); print_mat(h_matrix, N); hipMemcpy(d_matrix, h_matrix, sizeof(float) * N * N, hipMemcpyHostToDevice); hipMemcpy(d_compress, h_compress, sizeof(float) * K * K, hipMemcpyHostToDevice); dim3 Blocks(K,K); dim3 Threads(K,K); hipLaunchKernelGGL(( compress), dim3(Blocks), dim3(Threads), 0, 0, d_matrix, N, d_compress, K); hipMemcpy(h_compress, d_compress, sizeof(float) * K * K, hipMemcpyDeviceToHost); printf("Compressed matrix:\n"); print_mat(h_compress, K); free(h_matrix); free(h_compress); hipFree(d_matrix); hipFree(d_compress); }
74bd1c1f7f159e28ad9ccf82cd1b561f8157bd6f.cu
/* * * Carlos Roman Rivera - A01700820 * * Programming Languages - Cuda Quiz * */ #include <stdio.h> #define N 9 #define K N/3 #define ThreadsPerBlock K #define NumBlocks K __global__ void compress(float *mat, int n, float *comp, int k){ int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x + blockIdx.x * blockDim.x; if (row < k && col < k) { comp[col + row * k] = 0; for (int i_row = 0 ; i_row < k ; i_row++) { for (int j_col = 0 ; j_col < k ; j_col++) { comp[col + row * k] += mat[(col + j_col) + (row + i_row) * n]; } } } } void print_mat(float *mat, int n){ for (int i = 0; i < n; i++){ for (int j = 0; j < n; j++){ printf("%.1f\t", mat[i*n+j]); } printf("\n"); } printf("\n"); } void fill_mat(float *mat, int n){ int c = 0; for (int i = 0; i < n; i++){ for (int j = 0; j < n; j++){ mat[i*n+j] = c++; } } } int main(){ float *h_compress, *h_matrix; float *d_compress, *d_matrix; h_compress = (float *)malloc(sizeof(float) * K * K); h_matrix = (float *)malloc(sizeof(float) * N * N); fill_mat(h_matrix, N); fill_mat(h_compress, K); printf("Input matrix:\n"); print_mat(h_matrix, N); cudaMemcpy(d_matrix, h_matrix, sizeof(float) * N * N, cudaMemcpyHostToDevice); cudaMemcpy(d_compress, h_compress, sizeof(float) * K * K, cudaMemcpyHostToDevice); dim3 Blocks(K,K); dim3 Threads(K,K); compress<<<Blocks, Threads>>>(d_matrix, N, d_compress, K); cudaMemcpy(h_compress, d_compress, sizeof(float) * K * K, cudaMemcpyDeviceToHost); printf("Compressed matrix:\n"); print_mat(h_compress, K); free(h_matrix); free(h_compress); cudaFree(d_matrix); cudaFree(d_compress); }
8b14900e842f9849a920ff8a1ef2a3c9655bcc6d.hip
// !!! This is a file automatically generated by hipify!!! #include"common.h" #include"mmio_highlevel.h" //#include"mmio.h" #include"utils.h" #include "ccsb_spmv.h" typedef struct { MAT_VAL_TYPE *value; int *columnindex; MAT_PTR_TYPE *rowpointer; }SMatrix; void DivideSub(SMatrix A,int start,int over,SMatrix submatrix[],int sub,int nnznum[],int cbnum) { int i,j,k; j=A.rowpointer[start]; int *num; num=(int*)malloc((cbnum)*sizeof(int)); //count=(int*)malloc((SubNum)*sizeof(int)); memset(num,0,(cbnum)*sizeof(int)); int temp=1; for (i=start;i<over;i++) { while(j<A.rowpointer[i+1]){ for (k=0;k<cbnum;k++){ if (nnznum[sub+k]!=0){ if (k==cbnum-1){ num[k]++; submatrix[sub+k].value[num[k]-1]=A.value[j]; submatrix[sub+k].columnindex[num[k]-1]=A.columnindex[j]-k*BLOCK_SIZE; } else if (A.columnindex[j]>=k*BLOCK_SIZE&&A.columnindex[j]<(k+1)*BLOCK_SIZE){ num[k]++; submatrix[sub+k].value[num[k]-1]=A.value[j]; submatrix[sub+k].columnindex[num[k]-1]=A.columnindex[j]-k*BLOCK_SIZE; break; } } } j++; } for (int p=0;p<cbnum;p++){ if (nnznum[sub+p]!=0) submatrix[sub+p].rowpointer[temp]=num[p]; } temp++; //printf("%d\n",num[p]); } free(num); } void nnzNum(SMatrix A,int nnz[],int start,int over,int i,int cbnum) { //printf("start=%d,over=%d\n",start,over); //printf("Subcol=%d\n",SubCol); int j; for (j=A.rowpointer[start];j<A.rowpointer[over];j++){ for (int k=0;k<cbnum-1;k++){ if (A.columnindex[j]>=k*BLOCK_SIZE&&A.columnindex[j]<(k+1)*BLOCK_SIZE){ nnz[i+k]++; break; } } } int m=0; for (int p=i;p<i+cbnum-1;p++) m+= nnz[p]; nnz[i+cbnum-1]=A.rowpointer[over]-A.rowpointer[start]-m; } int main(int argc, char ** argv) { if (argc < 2) { printf("Run the code by './test matrix.mtx'.\n"); return 0; } printf("--------------------------------!!!!!!!!------------------------------------\n"); struct timeval t1, t2; int rowA; int colA; MAT_PTR_TYPE nnzA; int isSymmetricA; SMatrix matrixA; char *filename; filename = argv[1]; printf("MAT: -------------- %s --------------\n", filename); // load mtx A data to the csr format gettimeofday(&t1, NULL); mmio_allinone(&rowA, &colA, &nnzA, &isSymmetricA, &matrixA.rowpointer, &matrixA.columnindex, &matrixA.value, filename); gettimeofday(&t2, NULL); double time_loadmat = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; printf("input matrix A: ( %i, %i ) nnz = %i\n loadfile time = %4.5f sec\n", rowA, colA, nnzA, time_loadmat/1000.0); for (int i = 0; i < nnzA; i++) matrixA.value[i] = i % 10; if (rowA != colA) { printf("This code only computes square matrices.\n Exit.\n"); return 0; } rowA=(rowA/BLOCK_SIZE) * BLOCK_SIZE; nnzA=matrixA.rowpointer[rowA]; // MAT_PTR_TYPE *cscColPtrA = (MAT_PTR_TYPE *)malloc((colA+1) * sizeof(MAT_PTR_TYPE)); // int *cscRowIdxA = (int *)malloc(nnzA * sizeof(int)); // MAT_VAL_TYPE *cscValA = (MAT_VAL_TYPE *)malloc(nnzA * sizeof(MAT_VAL_TYPE)); // transpose A from csr to csc // matrix_transposition(rowA, colA, nnzA, matrixA.rowpointer, matrixA.columnindex, matrixA.value,cscRowIdxA, cscColPtrA, cscValA); /* SMatrix matrixB; int rowB=colA; int colB=rowA; matrixB.rowpointer = cscColPtrA; matrixB.columnindex = cscRowIdxA; matrixB.value = cscValA; */ if (BLOCK_SIZE>rowA){ printf("Error!\n"); return 0; } int rbnum=0; int cbnum=0; if (rowA%BLOCK_SIZE==0) rbnum=rowA/BLOCK_SIZE; else rbnum=(rowA/BLOCK_SIZE)+1; if (colA%BLOCK_SIZE==0) cbnum=colA/BLOCK_SIZE; else cbnum=(colA/BLOCK_SIZE)+1; SMatrix *submatrixA; // SMatrix *submatrixB; // SMatrix *submatrixC; submatrixA=(SMatrix*)malloc((rbnum*cbnum)*sizeof(SMatrix)); // submatrixB=(SMatrix*)malloc((rbnum*cbnum)*sizeof(SMatrix)); // submatrixC=(SMatrix*)malloc((rbnum*cbnum)*sizeof(SMatrix)); int *nnzAnum,*nnzBnum; nnzAnum=(int*)malloc((rbnum*cbnum)*sizeof(int)); // nnzBnum=(int*)malloc((rbnum*cbnum)*sizeof(int)); //nnzCnum=(int*)malloc((SubNum*SubNum)*sizeof(int)); memset(nnzAnum,0,(rbnum*cbnum)*sizeof(int)); // memset(nnzBnum,0,(rbnum*cbnum)*sizeof(int)); //calculate nnz in each block #pragma omp parallel for for (int i=0;i<rbnum-1;i++) { nnzNum(matrixA,nnzAnum,i*BLOCK_SIZE,(i+1)*BLOCK_SIZE,i*cbnum,cbnum); } nnzNum(matrixA,nnzAnum,(rbnum-1)*BLOCK_SIZE,rowA,(rbnum-1)*cbnum,cbnum); /* for (int i=0;i<cbnum-1;i++) { nnzNum(matrixB,nnzBnum,i*BLOCK_SIZE,(i+1)*BLOCK_SIZE,i*cbnum,rbnum); } nnzNum(matrixB,nnzBnum,(cbnum-1)*BLOCK_SIZE,rowB,(cbnum-1)*cbnum,rbnum); */ int nnzbl=0; for (int i=0;i<rbnum*cbnum;i++) //calculate number of non-zero blocks { if (nnzAnum[i]!=0) { nnzbl++; } } /* for (int i=0;i<rbnum;i++) { for(int j=0;j<cbnum;j++) { if (nnzAnum[i*cbnum+j]!=0) printf("%d ",nnzAnum[i*cbnum+j]); } printf("!!!!!!!!!!!!!\n"); } */ /* for (int i=0;i<SubNum*SubNum;i++) { writeresults("blocksize.csv", filename, rowA, colA,nnzA,i,nnzAnum[i]); } */ for (int i=0;i<rbnum*cbnum;i++) { int rowlength; if (nnzAnum[i]!=0) { submatrixA[i].value=(MAT_VAL_TYPE*)malloc((nnzAnum[i])*sizeof(MAT_VAL_TYPE)); submatrixA[i].columnindex=(int*)malloc((nnzAnum[i])*sizeof(int)); if (i<cbnum*(rbnum-1)){ rowlength=BLOCK_SIZE; } else{ rowlength=rowA-(rbnum-1)*BLOCK_SIZE; } submatrixA[i].rowpointer=(MAT_PTR_TYPE *)malloc((rowlength+1)*sizeof(MAT_PTR_TYPE)); memset(submatrixA[i].rowpointer,0,(rowlength+1)*sizeof(MAT_PTR_TYPE)); } /* if (nnzBnum[i]!=0) { submatrixB[i].value=(MAT_VAL_TYPE*)malloc((nnzBnum[i])*sizeof(MAT_VAL_TYPE)); submatrixB[i].columnindex=(int*)malloc((nnzBnum[i])*sizeof(int)); if (i<rbnum*(cbnum-1)){ rowlength=BLOCK_SIZE; } else{ rowlength=rowB-(cbnum-1)*BLOCK_SIZE; } submatrixB[i].rowpointer=(MAT_PTR_TYPE*)malloc((rowlength+1)*sizeof(MAT_PTR_TYPE)); memset(submatrixB[i].rowpointer,0,(rowlength+1)*sizeof(MAT_PTR_TYPE)); } */ } MAT_PTR_TYPE *rowblock_ptr; //block rowpointer of A int *columnid; // block columnindex of A int *nnzb_A; int colid=0; rowblock_ptr=(MAT_PTR_TYPE *)malloc((rbnum+1)*sizeof(MAT_PTR_TYPE)); columnid=(int *)malloc(nnzbl*sizeof(int)); nnzb_A=(int *)malloc((nnzbl+1)*sizeof(int)); memset(rowblock_ptr,0,(rbnum+1)*sizeof(MAT_PTR_TYPE)); for (int i=0;i<rbnum;i++) { for (int j=0;j<cbnum;j++) { if (nnzAnum[i*cbnum+j]!=0) { columnid[colid]=j; nnzb_A[colid]=nnzAnum[i*cbnum+j]; rowblock_ptr[i+1]++; colid++; } } } for (int i=1;i<rbnum+1;i++) { rowblock_ptr[i]+=rowblock_ptr[i-1]; } /* for (int i=0;i<nnzbl;i++) { printf("%d ",nnzb[i]); } */ exclusive_scan(nnzb_A,nnzbl+1); /* MAT_PTR_TYPE *colblock_ptr; //block columnpointer of A int *rowid; // block rowindex of B int *nnzb_B; int rid=0; colblock_ptr=(MAT_PTR_TYPE *)malloc((rbnum+1)*sizeof(MAT_PTR_TYPE)); rowid=(int *)malloc(nnzbl*sizeof(int)); nnzb_B=(int *)malloc(nnzbl*sizeof(int)); memset(colblock_ptr,0,(rbnum+1)*sizeof(MAT_PTR_TYPE)); for (int i=0;i<rbnum;i++) { for (int j=0;j<cbnum;j++) { if (nnzBnum[i%rbnum+j*rbnum]!=0) { rowid[rid]=j; nnzb_B[rid]=nnzBnum[i%rbnum+j*rbnum]; colblock_ptr[i+1]++; rid++; } } } for (int i=1;i<rbnum+1;i++) { colblock_ptr[i]+=colblock_ptr[i-1]; } */ /* for (int i=0;i<cbnum+1;i++) { printf("%d ",colblock_ptr[i]); } printf("\n"); */ for (int i=0;i<rbnum-1;i++) { DivideSub(matrixA,i*BLOCK_SIZE,(i+1)*BLOCK_SIZE,submatrixA,i*cbnum,nnzAnum,cbnum); } DivideSub(matrixA,(rbnum-1)*BLOCK_SIZE,rowA,submatrixA,(rbnum-1)*cbnum,nnzAnum,cbnum); /* for (int i=0;i<cbnum-1;i++) { DivideSub(matrixB,i*BLOCK_SIZE,(i+1)*BLOCK_SIZE,submatrixB,i*cbnum,nnzBnum,rbnum); } DivideSub(matrixB,(cbnum-1)*BLOCK_SIZE,rowB,submatrixB,(cbnum-1)*cbnum,nnzBnum,rbnum); */ /* for (int i=0;i<rbnum*cbnum;i++) { for (int j=0;j<nnzBnum[i];j++) { printf("%d ",submatrixB[i].columnindex[j]); } printf("\n"); } */ MAT_VAL_TYPE *BlockA_Val=(MAT_VAL_TYPE*)malloc((nnzA)*sizeof(MAT_VAL_TYPE)); char *BlockA_Col=(char*)malloc((nnzA)*sizeof(char)); char *BlockA_Ptr; int vAid=0; int pAid=0; int ptrA_length=0; //calculate BlockA_Ptr length for (int i=0;i<rbnum;i++) { int rowlength; for (int j=rowblock_ptr[i];j<rowblock_ptr[i+1];j++) { int block=i*cbnum+columnid[j]; if (block<(rbnum-1)*cbnum) { rowlength=BLOCK_SIZE; } else{ rowlength=rowA-(rbnum-1)*BLOCK_SIZE; } ptrA_length+=(rowlength); } } // printf("ptrA length=%d\n",ptrA_length); BlockA_Ptr=(char*)malloc((ptrA_length)*sizeof(char)); for (int i=0;i<rbnum;i++) { int rowlength; for (int j=rowblock_ptr[i];j<rowblock_ptr[i+1];j++) { int block=i*cbnum+columnid[j]; if (block<(rbnum-1)*cbnum) { rowlength=BLOCK_SIZE; } else{ rowlength=rowA-(rbnum-1)*BLOCK_SIZE; } for (int k=0;k<submatrixA[block].rowpointer[rowlength];k++) { BlockA_Val[vAid]=submatrixA[block].value[k]; BlockA_Col[vAid]=submatrixA[block].columnindex[k]; vAid++; } for (int jid=0;jid<rowlength;jid++) { BlockA_Ptr[pAid]=submatrixA[block].rowpointer[jid]; pAid++; } } } /* for (int i=0;i<nnzA;i++) { printf("%f ",BlockA_Val[i]); } printf("\n"); for (int i=0;i<nnzA;i++) { printf("%d ",BlockA_Col[i]); } printf("\n"); for (int i=0;i<ptrA_length;i++) { printf("%d ",BlockA_Ptr[i]); } printf("\n"); */ /* MAT_VAL_TYPE *BlockB_Val=(MAT_VAL_TYPE*)malloc((nnzA)*sizeof(MAT_VAL_TYPE)); char *BlockB_Col=(char*)malloc((nnzA)*sizeof(char)); char *BlockB_Ptr; int vBid=0; int pBid=0; int ptrB_length=0; for (int i=0;i<rbnum;i++) { int rowlength; for (int j=colblock_ptr[i];j<colblock_ptr[i+1];j++) { int block=i%rbnum+rowid[j]*rbnum; if (block<(cbnum-1)*rbnum) { rowlength=BLOCK_SIZE; } else{ rowlength=rowB-(cbnum-1)*BLOCK_SIZE; } ptrB_length+=(rowlength); } } printf("ptrB length=%d\n",ptrB_length); BlockB_Ptr=(char*)malloc((ptrB_length)*sizeof(char)); for (int i=0;i<rbnum;i++) { int rowlength; for (int j=colblock_ptr[i];j<colblock_ptr[i+1];j++) { int block=i%rbnum+rowid[j]*rbnum; if (block<(cbnum-1)*rbnum) { rowlength=BLOCK_SIZE; } else{ rowlength=rowB-(cbnum-1)*BLOCK_SIZE; } for (int k=0;k<submatrixB[block].rowpointer[rowlength];k++) { BlockB_Val[vBid]=submatrixB[block].value[k]; BlockB_Col[vBid]=submatrixB[block].columnindex[k]; vBid++; } for (int jid=0;jid<rowlength;jid++) { BlockB_Ptr[pBid]=submatrixB[block].rowpointer[jid]; pBid++; } } } */ /* for (int i=0;i<nnzA;i++) { printf("%f ",BlockB_Val[i]); } printf("\n"); for (int i=0;i<nnzA;i++) { printf("%d ",BlockB_Col[i]); } printf("\n"); for (int i=0;i<ptrB_length;i++) { printf("%d ",BlockB_Ptr[i]); } printf("\n"); */ for (int i=0;i<rbnum*cbnum;i++){ if (nnzAnum[i]!=0){ free(submatrixA[i].value); free(submatrixA[i].columnindex); free(submatrixA[i].rowpointer); } /* if (nnzBnum[i]!=0){ free(submatrixB[i].value); free(submatrixB[i].columnindex); free(submatrixB[i].rowpointer); } */ } // spmv using csr MAT_VAL_TYPE *x = (MAT_VAL_TYPE *)malloc(sizeof(MAT_VAL_TYPE) * colA); for (int i = 0; i < colA; i++) { x[i] = i % 10; } MAT_VAL_TYPE *y_golden = (MAT_VAL_TYPE *)malloc(sizeof(MAT_VAL_TYPE) * rowA); gettimeofday(&t1, NULL); for (int i = 0; i < BENCH_REPEAT; i++) { for (int i = 0; i < rowA; i++) { MAT_VAL_TYPE sum = 0; for (int j = matrixA.rowpointer[i]; j < matrixA.rowpointer[i+1]; j++) { sum += matrixA.value[j] * x[matrixA.columnindex[j]]; } y_golden[i] = sum; } } gettimeofday(&t2, NULL); double time_csr_spmv = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; time_csr_spmv /= BENCH_REPEAT; printf(" CPU CSR SpMV %4.2f GFlops\n", 2 * (double)nnzA * 1.0e-6 / time_csr_spmv); // spmv using block csr MAT_VAL_TYPE *y = (MAT_VAL_TYPE *)malloc(sizeof(MAT_VAL_TYPE) * rowA); // for each block row // parallel for // printf("rbnum=%d\n",rbnum); /* for(int i=0;i<rbnum+1;i++) { printf("%d ",rowblock_ptr[i]); } printf("\n"); */ for (int blki = 0; blki < rbnum; blki++) { printf("rowblockid = %i, #blocks = %i\n", blki, rowblock_ptr[blki+1]-rowblock_ptr[blki]); for (int blkj = rowblock_ptr[blki]; blkj < rowblock_ptr[blki+1]; blkj++) { printf("%i, ", nnzb_A[blkj+1]-nnzb_A[blkj]); } printf("\n"); } gettimeofday(&t1, NULL); for (int i = 0; i < BENCH_REPEAT; i++) { for (int blki = 0; blki < rbnum; blki++) { // clear y covered by the block row // int blocksize; // blocksize= blki == (rbnum-1) ? for (int ri = 0; ri < BLOCK_SIZE; ri++) { y[blki * BLOCK_SIZE + ri] = 0; } // for each block in the block row for (int blkj = rowblock_ptr[blki]; blkj < rowblock_ptr[blki+1]; blkj++) { int x_offset = columnid[blkj] * BLOCK_SIZE; // for each row in the block for (int ri = 0; ri < BLOCK_SIZE; ri++) { MAT_VAL_TYPE sum = 0; // for each nonzero in the row of the block // the last row uses nnzlocal int stop = ri == BLOCK_SIZE - 1 ? (nnzb_A[blkj+1]-nnzb_A[blkj]) : BlockA_Ptr[ri+1+blkj*BLOCK_SIZE]; for (int rj = BlockA_Ptr[blkj*BLOCK_SIZE+ri]; rj < stop; rj++) { sum += x[x_offset + BlockA_Col[nnzb_A[blkj]+rj]] * BlockA_Val[nnzb_A[blkj]+rj]; } y[blki * BLOCK_SIZE + ri] += sum; } } } } gettimeofday(&t2, NULL); double time_ccsb_spmv = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; time_ccsb_spmv /= BENCH_REPEAT; printf(" CPU CCSB SpMV %4.2f GFlops\n", 2 * (double)nnzA * 1.0e-6 / time_ccsb_spmv); // check results int errcount = 0; for (int i = 0; i < rowA; i++) { if (y[i] != y_golden[i]) { errcount++; //printf("%f %f,%d\n",y[i],y_golden[i],i); } } printf("spmv errcount = %i\n", errcount); // run gpu // set device int device_id = 0; hipSetDevice(device_id); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device_id); printf("---------------------------------------------------------------\n"); printf("Device [ %i ] %s @ %4.2f MHz\n", device_id, deviceProp.name, deviceProp.clockRate * 1e-3f); ccsb_spmv_cuda(rowblock_ptr, columnid, nnzb_A, BlockA_Ptr, BlockA_Col, BlockA_Val, x, y_golden, rowA, colA, nnzA, rbnum, nnzbl, ptrA_length); }
8b14900e842f9849a920ff8a1ef2a3c9655bcc6d.cu
#include"common.h" #include"mmio_highlevel.h" //#include"mmio.h" #include"utils.h" #include "ccsb_spmv.h" typedef struct { MAT_VAL_TYPE *value; int *columnindex; MAT_PTR_TYPE *rowpointer; }SMatrix; void DivideSub(SMatrix A,int start,int over,SMatrix submatrix[],int sub,int nnznum[],int cbnum) { int i,j,k; j=A.rowpointer[start]; int *num; num=(int*)malloc((cbnum)*sizeof(int)); //count=(int*)malloc((SubNum)*sizeof(int)); memset(num,0,(cbnum)*sizeof(int)); int temp=1; for (i=start;i<over;i++) { while(j<A.rowpointer[i+1]){ for (k=0;k<cbnum;k++){ if (nnznum[sub+k]!=0){ if (k==cbnum-1){ num[k]++; submatrix[sub+k].value[num[k]-1]=A.value[j]; submatrix[sub+k].columnindex[num[k]-1]=A.columnindex[j]-k*BLOCK_SIZE; } else if (A.columnindex[j]>=k*BLOCK_SIZE&&A.columnindex[j]<(k+1)*BLOCK_SIZE){ num[k]++; submatrix[sub+k].value[num[k]-1]=A.value[j]; submatrix[sub+k].columnindex[num[k]-1]=A.columnindex[j]-k*BLOCK_SIZE; break; } } } j++; } for (int p=0;p<cbnum;p++){ if (nnznum[sub+p]!=0) submatrix[sub+p].rowpointer[temp]=num[p]; } temp++; //printf("%d\n",num[p]); } free(num); } void nnzNum(SMatrix A,int nnz[],int start,int over,int i,int cbnum) { //printf("start=%d,over=%d\n",start,over); //printf("Subcol=%d\n",SubCol); int j; for (j=A.rowpointer[start];j<A.rowpointer[over];j++){ for (int k=0;k<cbnum-1;k++){ if (A.columnindex[j]>=k*BLOCK_SIZE&&A.columnindex[j]<(k+1)*BLOCK_SIZE){ nnz[i+k]++; break; } } } int m=0; for (int p=i;p<i+cbnum-1;p++) m+= nnz[p]; nnz[i+cbnum-1]=A.rowpointer[over]-A.rowpointer[start]-m; } int main(int argc, char ** argv) { if (argc < 2) { printf("Run the code by './test matrix.mtx'.\n"); return 0; } printf("--------------------------------!!!!!!!!------------------------------------\n"); struct timeval t1, t2; int rowA; int colA; MAT_PTR_TYPE nnzA; int isSymmetricA; SMatrix matrixA; char *filename; filename = argv[1]; printf("MAT: -------------- %s --------------\n", filename); // load mtx A data to the csr format gettimeofday(&t1, NULL); mmio_allinone(&rowA, &colA, &nnzA, &isSymmetricA, &matrixA.rowpointer, &matrixA.columnindex, &matrixA.value, filename); gettimeofday(&t2, NULL); double time_loadmat = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; printf("input matrix A: ( %i, %i ) nnz = %i\n loadfile time = %4.5f sec\n", rowA, colA, nnzA, time_loadmat/1000.0); for (int i = 0; i < nnzA; i++) matrixA.value[i] = i % 10; if (rowA != colA) { printf("This code only computes square matrices.\n Exit.\n"); return 0; } rowA=(rowA/BLOCK_SIZE) * BLOCK_SIZE; nnzA=matrixA.rowpointer[rowA]; // MAT_PTR_TYPE *cscColPtrA = (MAT_PTR_TYPE *)malloc((colA+1) * sizeof(MAT_PTR_TYPE)); // int *cscRowIdxA = (int *)malloc(nnzA * sizeof(int)); // MAT_VAL_TYPE *cscValA = (MAT_VAL_TYPE *)malloc(nnzA * sizeof(MAT_VAL_TYPE)); // transpose A from csr to csc // matrix_transposition(rowA, colA, nnzA, matrixA.rowpointer, matrixA.columnindex, matrixA.value,cscRowIdxA, cscColPtrA, cscValA); /* SMatrix matrixB; int rowB=colA; int colB=rowA; matrixB.rowpointer = cscColPtrA; matrixB.columnindex = cscRowIdxA; matrixB.value = cscValA; */ if (BLOCK_SIZE>rowA){ printf("Error!\n"); return 0; } int rbnum=0; int cbnum=0; if (rowA%BLOCK_SIZE==0) rbnum=rowA/BLOCK_SIZE; else rbnum=(rowA/BLOCK_SIZE)+1; if (colA%BLOCK_SIZE==0) cbnum=colA/BLOCK_SIZE; else cbnum=(colA/BLOCK_SIZE)+1; SMatrix *submatrixA; // SMatrix *submatrixB; // SMatrix *submatrixC; submatrixA=(SMatrix*)malloc((rbnum*cbnum)*sizeof(SMatrix)); // submatrixB=(SMatrix*)malloc((rbnum*cbnum)*sizeof(SMatrix)); // submatrixC=(SMatrix*)malloc((rbnum*cbnum)*sizeof(SMatrix)); int *nnzAnum,*nnzBnum; nnzAnum=(int*)malloc((rbnum*cbnum)*sizeof(int)); // nnzBnum=(int*)malloc((rbnum*cbnum)*sizeof(int)); //nnzCnum=(int*)malloc((SubNum*SubNum)*sizeof(int)); memset(nnzAnum,0,(rbnum*cbnum)*sizeof(int)); // memset(nnzBnum,0,(rbnum*cbnum)*sizeof(int)); //calculate nnz in each block #pragma omp parallel for for (int i=0;i<rbnum-1;i++) { nnzNum(matrixA,nnzAnum,i*BLOCK_SIZE,(i+1)*BLOCK_SIZE,i*cbnum,cbnum); } nnzNum(matrixA,nnzAnum,(rbnum-1)*BLOCK_SIZE,rowA,(rbnum-1)*cbnum,cbnum); /* for (int i=0;i<cbnum-1;i++) { nnzNum(matrixB,nnzBnum,i*BLOCK_SIZE,(i+1)*BLOCK_SIZE,i*cbnum,rbnum); } nnzNum(matrixB,nnzBnum,(cbnum-1)*BLOCK_SIZE,rowB,(cbnum-1)*cbnum,rbnum); */ int nnzbl=0; for (int i=0;i<rbnum*cbnum;i++) //calculate number of non-zero blocks { if (nnzAnum[i]!=0) { nnzbl++; } } /* for (int i=0;i<rbnum;i++) { for(int j=0;j<cbnum;j++) { if (nnzAnum[i*cbnum+j]!=0) printf("%d ",nnzAnum[i*cbnum+j]); } printf("!!!!!!!!!!!!!\n"); } */ /* for (int i=0;i<SubNum*SubNum;i++) { writeresults("blocksize.csv", filename, rowA, colA,nnzA,i,nnzAnum[i]); } */ for (int i=0;i<rbnum*cbnum;i++) { int rowlength; if (nnzAnum[i]!=0) { submatrixA[i].value=(MAT_VAL_TYPE*)malloc((nnzAnum[i])*sizeof(MAT_VAL_TYPE)); submatrixA[i].columnindex=(int*)malloc((nnzAnum[i])*sizeof(int)); if (i<cbnum*(rbnum-1)){ rowlength=BLOCK_SIZE; } else{ rowlength=rowA-(rbnum-1)*BLOCK_SIZE; } submatrixA[i].rowpointer=(MAT_PTR_TYPE *)malloc((rowlength+1)*sizeof(MAT_PTR_TYPE)); memset(submatrixA[i].rowpointer,0,(rowlength+1)*sizeof(MAT_PTR_TYPE)); } /* if (nnzBnum[i]!=0) { submatrixB[i].value=(MAT_VAL_TYPE*)malloc((nnzBnum[i])*sizeof(MAT_VAL_TYPE)); submatrixB[i].columnindex=(int*)malloc((nnzBnum[i])*sizeof(int)); if (i<rbnum*(cbnum-1)){ rowlength=BLOCK_SIZE; } else{ rowlength=rowB-(cbnum-1)*BLOCK_SIZE; } submatrixB[i].rowpointer=(MAT_PTR_TYPE*)malloc((rowlength+1)*sizeof(MAT_PTR_TYPE)); memset(submatrixB[i].rowpointer,0,(rowlength+1)*sizeof(MAT_PTR_TYPE)); } */ } MAT_PTR_TYPE *rowblock_ptr; //block rowpointer of A int *columnid; // block columnindex of A int *nnzb_A; int colid=0; rowblock_ptr=(MAT_PTR_TYPE *)malloc((rbnum+1)*sizeof(MAT_PTR_TYPE)); columnid=(int *)malloc(nnzbl*sizeof(int)); nnzb_A=(int *)malloc((nnzbl+1)*sizeof(int)); memset(rowblock_ptr,0,(rbnum+1)*sizeof(MAT_PTR_TYPE)); for (int i=0;i<rbnum;i++) { for (int j=0;j<cbnum;j++) { if (nnzAnum[i*cbnum+j]!=0) { columnid[colid]=j; nnzb_A[colid]=nnzAnum[i*cbnum+j]; rowblock_ptr[i+1]++; colid++; } } } for (int i=1;i<rbnum+1;i++) { rowblock_ptr[i]+=rowblock_ptr[i-1]; } /* for (int i=0;i<nnzbl;i++) { printf("%d ",nnzb[i]); } */ exclusive_scan(nnzb_A,nnzbl+1); /* MAT_PTR_TYPE *colblock_ptr; //block columnpointer of A int *rowid; // block rowindex of B int *nnzb_B; int rid=0; colblock_ptr=(MAT_PTR_TYPE *)malloc((rbnum+1)*sizeof(MAT_PTR_TYPE)); rowid=(int *)malloc(nnzbl*sizeof(int)); nnzb_B=(int *)malloc(nnzbl*sizeof(int)); memset(colblock_ptr,0,(rbnum+1)*sizeof(MAT_PTR_TYPE)); for (int i=0;i<rbnum;i++) { for (int j=0;j<cbnum;j++) { if (nnzBnum[i%rbnum+j*rbnum]!=0) { rowid[rid]=j; nnzb_B[rid]=nnzBnum[i%rbnum+j*rbnum]; colblock_ptr[i+1]++; rid++; } } } for (int i=1;i<rbnum+1;i++) { colblock_ptr[i]+=colblock_ptr[i-1]; } */ /* for (int i=0;i<cbnum+1;i++) { printf("%d ",colblock_ptr[i]); } printf("\n"); */ for (int i=0;i<rbnum-1;i++) { DivideSub(matrixA,i*BLOCK_SIZE,(i+1)*BLOCK_SIZE,submatrixA,i*cbnum,nnzAnum,cbnum); } DivideSub(matrixA,(rbnum-1)*BLOCK_SIZE,rowA,submatrixA,(rbnum-1)*cbnum,nnzAnum,cbnum); /* for (int i=0;i<cbnum-1;i++) { DivideSub(matrixB,i*BLOCK_SIZE,(i+1)*BLOCK_SIZE,submatrixB,i*cbnum,nnzBnum,rbnum); } DivideSub(matrixB,(cbnum-1)*BLOCK_SIZE,rowB,submatrixB,(cbnum-1)*cbnum,nnzBnum,rbnum); */ /* for (int i=0;i<rbnum*cbnum;i++) { for (int j=0;j<nnzBnum[i];j++) { printf("%d ",submatrixB[i].columnindex[j]); } printf("\n"); } */ MAT_VAL_TYPE *BlockA_Val=(MAT_VAL_TYPE*)malloc((nnzA)*sizeof(MAT_VAL_TYPE)); char *BlockA_Col=(char*)malloc((nnzA)*sizeof(char)); char *BlockA_Ptr; int vAid=0; int pAid=0; int ptrA_length=0; //calculate BlockA_Ptr length for (int i=0;i<rbnum;i++) { int rowlength; for (int j=rowblock_ptr[i];j<rowblock_ptr[i+1];j++) { int block=i*cbnum+columnid[j]; if (block<(rbnum-1)*cbnum) { rowlength=BLOCK_SIZE; } else{ rowlength=rowA-(rbnum-1)*BLOCK_SIZE; } ptrA_length+=(rowlength); } } // printf("ptrA length=%d\n",ptrA_length); BlockA_Ptr=(char*)malloc((ptrA_length)*sizeof(char)); for (int i=0;i<rbnum;i++) { int rowlength; for (int j=rowblock_ptr[i];j<rowblock_ptr[i+1];j++) { int block=i*cbnum+columnid[j]; if (block<(rbnum-1)*cbnum) { rowlength=BLOCK_SIZE; } else{ rowlength=rowA-(rbnum-1)*BLOCK_SIZE; } for (int k=0;k<submatrixA[block].rowpointer[rowlength];k++) { BlockA_Val[vAid]=submatrixA[block].value[k]; BlockA_Col[vAid]=submatrixA[block].columnindex[k]; vAid++; } for (int jid=0;jid<rowlength;jid++) { BlockA_Ptr[pAid]=submatrixA[block].rowpointer[jid]; pAid++; } } } /* for (int i=0;i<nnzA;i++) { printf("%f ",BlockA_Val[i]); } printf("\n"); for (int i=0;i<nnzA;i++) { printf("%d ",BlockA_Col[i]); } printf("\n"); for (int i=0;i<ptrA_length;i++) { printf("%d ",BlockA_Ptr[i]); } printf("\n"); */ /* MAT_VAL_TYPE *BlockB_Val=(MAT_VAL_TYPE*)malloc((nnzA)*sizeof(MAT_VAL_TYPE)); char *BlockB_Col=(char*)malloc((nnzA)*sizeof(char)); char *BlockB_Ptr; int vBid=0; int pBid=0; int ptrB_length=0; for (int i=0;i<rbnum;i++) { int rowlength; for (int j=colblock_ptr[i];j<colblock_ptr[i+1];j++) { int block=i%rbnum+rowid[j]*rbnum; if (block<(cbnum-1)*rbnum) { rowlength=BLOCK_SIZE; } else{ rowlength=rowB-(cbnum-1)*BLOCK_SIZE; } ptrB_length+=(rowlength); } } printf("ptrB length=%d\n",ptrB_length); BlockB_Ptr=(char*)malloc((ptrB_length)*sizeof(char)); for (int i=0;i<rbnum;i++) { int rowlength; for (int j=colblock_ptr[i];j<colblock_ptr[i+1];j++) { int block=i%rbnum+rowid[j]*rbnum; if (block<(cbnum-1)*rbnum) { rowlength=BLOCK_SIZE; } else{ rowlength=rowB-(cbnum-1)*BLOCK_SIZE; } for (int k=0;k<submatrixB[block].rowpointer[rowlength];k++) { BlockB_Val[vBid]=submatrixB[block].value[k]; BlockB_Col[vBid]=submatrixB[block].columnindex[k]; vBid++; } for (int jid=0;jid<rowlength;jid++) { BlockB_Ptr[pBid]=submatrixB[block].rowpointer[jid]; pBid++; } } } */ /* for (int i=0;i<nnzA;i++) { printf("%f ",BlockB_Val[i]); } printf("\n"); for (int i=0;i<nnzA;i++) { printf("%d ",BlockB_Col[i]); } printf("\n"); for (int i=0;i<ptrB_length;i++) { printf("%d ",BlockB_Ptr[i]); } printf("\n"); */ for (int i=0;i<rbnum*cbnum;i++){ if (nnzAnum[i]!=0){ free(submatrixA[i].value); free(submatrixA[i].columnindex); free(submatrixA[i].rowpointer); } /* if (nnzBnum[i]!=0){ free(submatrixB[i].value); free(submatrixB[i].columnindex); free(submatrixB[i].rowpointer); } */ } // spmv using csr MAT_VAL_TYPE *x = (MAT_VAL_TYPE *)malloc(sizeof(MAT_VAL_TYPE) * colA); for (int i = 0; i < colA; i++) { x[i] = i % 10; } MAT_VAL_TYPE *y_golden = (MAT_VAL_TYPE *)malloc(sizeof(MAT_VAL_TYPE) * rowA); gettimeofday(&t1, NULL); for (int i = 0; i < BENCH_REPEAT; i++) { for (int i = 0; i < rowA; i++) { MAT_VAL_TYPE sum = 0; for (int j = matrixA.rowpointer[i]; j < matrixA.rowpointer[i+1]; j++) { sum += matrixA.value[j] * x[matrixA.columnindex[j]]; } y_golden[i] = sum; } } gettimeofday(&t2, NULL); double time_csr_spmv = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; time_csr_spmv /= BENCH_REPEAT; printf(" CPU CSR SpMV %4.2f GFlops\n", 2 * (double)nnzA * 1.0e-6 / time_csr_spmv); // spmv using block csr MAT_VAL_TYPE *y = (MAT_VAL_TYPE *)malloc(sizeof(MAT_VAL_TYPE) * rowA); // for each block row // parallel for // printf("rbnum=%d\n",rbnum); /* for(int i=0;i<rbnum+1;i++) { printf("%d ",rowblock_ptr[i]); } printf("\n"); */ for (int blki = 0; blki < rbnum; blki++) { printf("rowblockid = %i, #blocks = %i\n", blki, rowblock_ptr[blki+1]-rowblock_ptr[blki]); for (int blkj = rowblock_ptr[blki]; blkj < rowblock_ptr[blki+1]; blkj++) { printf("%i, ", nnzb_A[blkj+1]-nnzb_A[blkj]); } printf("\n"); } gettimeofday(&t1, NULL); for (int i = 0; i < BENCH_REPEAT; i++) { for (int blki = 0; blki < rbnum; blki++) { // clear y covered by the block row // int blocksize; // blocksize= blki == (rbnum-1) ? for (int ri = 0; ri < BLOCK_SIZE; ri++) { y[blki * BLOCK_SIZE + ri] = 0; } // for each block in the block row for (int blkj = rowblock_ptr[blki]; blkj < rowblock_ptr[blki+1]; blkj++) { int x_offset = columnid[blkj] * BLOCK_SIZE; // for each row in the block for (int ri = 0; ri < BLOCK_SIZE; ri++) { MAT_VAL_TYPE sum = 0; // for each nonzero in the row of the block // the last row uses nnzlocal int stop = ri == BLOCK_SIZE - 1 ? (nnzb_A[blkj+1]-nnzb_A[blkj]) : BlockA_Ptr[ri+1+blkj*BLOCK_SIZE]; for (int rj = BlockA_Ptr[blkj*BLOCK_SIZE+ri]; rj < stop; rj++) { sum += x[x_offset + BlockA_Col[nnzb_A[blkj]+rj]] * BlockA_Val[nnzb_A[blkj]+rj]; } y[blki * BLOCK_SIZE + ri] += sum; } } } } gettimeofday(&t2, NULL); double time_ccsb_spmv = (t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0; time_ccsb_spmv /= BENCH_REPEAT; printf(" CPU CCSB SpMV %4.2f GFlops\n", 2 * (double)nnzA * 1.0e-6 / time_ccsb_spmv); // check results int errcount = 0; for (int i = 0; i < rowA; i++) { if (y[i] != y_golden[i]) { errcount++; //printf("%f %f,%d\n",y[i],y_golden[i],i); } } printf("spmv errcount = %i\n", errcount); // run gpu // set device int device_id = 0; cudaSetDevice(device_id); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device_id); printf("---------------------------------------------------------------\n"); printf("Device [ %i ] %s @ %4.2f MHz\n", device_id, deviceProp.name, deviceProp.clockRate * 1e-3f); ccsb_spmv_cuda(rowblock_ptr, columnid, nnzb_A, BlockA_Ptr, BlockA_Col, BlockA_Val, x, y_golden, rowA, colA, nnzA, rbnum, nnzbl, ptrA_length); }
6fb1ab980680ca7ab0d7122c196044c81ccca942.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/device_functions.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <iostream> #include <iomanip> #define _BLOCK_SIZE 64 #define checkCudaErrorsC(val) check( (val), #val, __FILE__, __LINE__) template<typename T> void check(T err, const char* const func, const char* const file, const int line) { if (err != hipSuccess) { std::cerr << "CUDA error at: " << file << ":" << line << std::endl; std::cerr << hipGetErrorString(err) << " " << func << std::endl; exit(1); } } // ========================================================================================================= // ========================================================================================================= /* * Extracts the highest value and the lowest value from the max_input and min_input. This kernel is prepared to be * executed iteratively, as it might process large amount of data that cannot be stored in a single block's shared memory * Each iteration will use the output of the previous one as input * @param min_input elements from which to find the lowest value * @param max_input elements from which to find the highest value * @param min_output output array of size len / blockSize which will contain the lowest elements from min_input * @param max_output output array of size len / blockSize which will contain the highest elements from max_output * @param len number of elments to process from both inputs, starting from the begining (allows iterative processing without resizing the input/output) */ __global__ void findMinMax(const float * min_input, const float * max_input, float * min_output, float * max_output, unsigned int len) { unsigned int ti = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + ti; __shared__ float min_cache[_BLOCK_SIZE * 2]; __shared__ float max_cache[_BLOCK_SIZE * 2]; // Fill the cache of out of bounds elements with values that will be discarded if (i >= len) { min_cache[ti] = 99999.f; min_cache[ti + blockDim.x] = 99999.f; max_cache[ti] = -99999.f; max_cache[ti + blockDim.x] = -99999.f; } // Fill in bound ids with real iput values else { min_cache[ti] = min_input[i]; max_cache[ti] = max_input[i]; // If possible, load the next _BLOCK_SIZE elements into shared cache // to be able to perform the process using the whole block unsigned int secondIndex = blockIdx.x * (blockDim.x * 2) + ti; if (secondIndex < len) { min_cache[ti + blockDim.x] = min_input[secondIndex]; max_cache[ti + blockDim.x] = max_input[secondIndex]; } else { min_cache[ti + blockDim.x] = 99999.f; max_cache[ti + blockDim.x] = -99999.f; } } // Allow the whole block to have written their values __syncthreads(); // Recursively divide the block piece of input by 2 until everything is reduced to the first element for (unsigned int s = _BLOCK_SIZE; s > 0; s /= 2) { // min float current = min_cache[ti]; float test = min_cache[ti + s]; min_cache[ti] = current < test ? current : test; // max current = max_cache[ti]; test = max_cache[ti + s]; max_cache[ti] = current > test ? current : test; __syncthreads(); } // Thread 0 will use the blockid to write the output in the appropiate position if (ti == 0) { min_output[blockIdx.x] = min_cache[0]; max_output[blockIdx.x] = max_cache[0]; } } // ========================================================================================================= // ========================================================================================================= /* * Builds an instogram by analyzing the input lighting. Transform each value to the instogram bin to which it belongs * based on the range from the min value to the max value. Extracted from class slides * @param buffer input lighting channel * @param size input lighting channel number of elements * @param histo output array where the histogram will be stored * @param minLum minimun value of lighting in the input lighting array * @param lumRange difference between the higeset value and the lowest value in the input lighting array * @param numBins number of bins of which the histogram will be composed */ __global__ void histo(const float *buffer, size_t size, unsigned int *histo, float minLum, float lumRange, size_t numBins) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // All threads handle blockDim.x * gridDim.x consecutive elements if (i < size) { // Compute the bin to which the being-processed lighting value belongs unsigned int bin = ((buffer[i] - minLum) / lumRange) * numBins; atomicAdd(&(histo[bin]), 1); //Varios threads podran intentar incrementar el mismo valor a la vez } } // ========================================================================================================= // ========================================================================================================= /* * Performs exclusive scan over a set of numbers. Extracted from class slides. Is designed to work with a single * block thread (it requires to store all the input data into shared memory for later use, so it will not work by * splitting the kernel execution into different blocks which wont share the cache) * @param numberArray elements in which to perform the scan & reduction * @param texSize number of elements in the input array */ __global__ void exclusive_scan(unsigned int * numberArray, unsigned int texSize) { // Shared memory size is passed throught kernel template call, based on the number // of thread which will perform the exclusive scan __shared__ unsigned int tempArray[1024]; int id = blockIdx.x * blockDim.x + threadIdx.x; int threadId = threadIdx.x; int offset = 1; unsigned int temp; int ai = threadId; int bi = threadId + texSize / 2; int i; //assign the shared memory tempArray[ai] = numberArray[id]; tempArray[bi] = numberArray[id + texSize / 2]; //up tree for (i = texSize >> 1; i > 0; i >>= 1) { __syncthreads(); if (threadId < i) { ai = offset * (2 * threadId + 1) - 1; bi = offset * (2 * threadId + 2) - 1; tempArray[bi] += tempArray[ai]; } offset <<= 1; } //put the last one 0 if (threadId == 0) tempArray[texSize - 1] = 0; //down tree for (i = 1; i < texSize; i <<= 1) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (threadId < i) { ai = offset * (2 * threadId + 1) - 1; bi = offset * (2 * threadId + 2) - 1; temp = tempArray[ai]; tempArray[ai] = tempArray[bi]; tempArray[bi] += temp; } } __syncthreads(); numberArray[id] = tempArray[threadId]; numberArray[id + texSize / 2] = tempArray[threadId + texSize / 2]; } // ========================================================================================================= // ========================================================================================================= /* * Returns the needed amount of grids to have 1 thread per element, given the defined block size * @param lenToProcess number of elements to process * @param blockSizeX number of threads per block which will be used * @returns dim3 size of the grid to use */ inline dim3 getGridSize(unsigned int lenToProcess, unsigned int blockSizeX) { unsigned int init = lenToProcess / blockSizeX; // Make sure to launch more threads than elements, not otherwise if (lenToProcess % blockSizeX > 0) init++; init = init == 0 ? 1 : init; return dim3(init, 1, 1); } // ========================================================================================================= // ========================================================================================================= void calculate_cdf(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { // TODO // 1) Encontrar el valor mximo y mnimo de luminancia en min_logLum and max_logLum a partir del canal logLuminance // ------------------------------------------------------------------------------------------------------------------- const unsigned int len = unsigned int(numRows) * unsigned int(numCols); const dim3 blockSize(_BLOCK_SIZE, 1, 1); float * d_max_output, * d_min_output, * d_min_input, * d_max_input; // Initialize buffers over which we will perform the reduction iteratively checkCudaErrorsC(hipMalloc(&d_min_input, len * sizeof(float))); checkCudaErrorsC(hipMemcpy(d_min_input, d_logLuminance, len * sizeof(float), hipMemcpyDeviceToDevice)); checkCudaErrorsC(hipMalloc(&d_max_input, len * sizeof(float))); checkCudaErrorsC(hipMemcpy(d_max_input, d_logLuminance, len * sizeof(float), hipMemcpyDeviceToDevice)); // Iterator which delimites the range of the data we are processing // is updated according to the reduction performed in the kernel unsigned int iterator = len; // Compute grid size based on half of the the elements, since we are using 1 thread to process 2 elements on initialisation in the kernel dim3 gridSize = getGridSize(len / 2, _BLOCK_SIZE); // We initialize the output buffers the the size required for the first iteration checkCudaErrorsC(hipMalloc(&d_max_output, (len / _BLOCK_SIZE) * sizeof(float))); checkCudaErrorsC(hipMalloc(&d_min_output, (len / _BLOCK_SIZE) * sizeof(float))); do { // Reduce the current input findMinMax << <gridSize, blockSize >> > (d_min_input, d_max_input, d_min_output, d_max_output, iterator); hipDeviceSynchronize(); checkCudaErrorsC(hipGetLastError()); // Reduce the iterator according to the reduction performed iterator /= _BLOCK_SIZE; gridSize = getGridSize(iterator / 2, _BLOCK_SIZE); // Update next kernel call input with previous kernel call output if (iterator > 0) { // Copy only the necessary data checkCudaErrorsC(hipMemcpy(d_min_input, d_min_output, iterator * sizeof(float), hipMemcpyDeviceToDevice)); checkCudaErrorsC(hipMemcpy(d_max_input, d_max_output, iterator * sizeof(float), hipMemcpyDeviceToDevice)); } } while (iterator > 0); checkCudaErrorsC(hipMemcpy(&min_logLum, d_min_output, sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrorsC(hipMemcpy(&max_logLum, d_max_output, sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrorsC(hipFree(d_min_input)); checkCudaErrorsC(hipFree(d_max_input)); checkCudaErrorsC(hipFree(d_min_output)); checkCudaErrorsC(hipFree(d_max_output)); // ------------------------------------------------------------------------------------------------------------------- // 2) Obtener el rango a representar // ------------------------------------------------------------------------------------------------------------------- const float range = max_logLum - min_logLum; // ------------------------------------------------------------------------------------------------------------------- // 3) Generar un histograma de todos los valores del canal logLuminance usando la formula: bin = (Lum [i] - lumMin) / lumRange * numBins // ------------------------------------------------------------------------------------------------------------------- // Use the same array where the output accumulate distribution will be computed, saving allocating and trasfering data // from an auxiliar buffer gridSize = getGridSize(len, _BLOCK_SIZE); histo << <gridSize, blockSize >> > (d_logLuminance, len, d_cdf, min_logLum, range, numBins); hipDeviceSynchronize(); checkCudaErrorsC(hipGetLastError()); unsigned int h_histo[1024]; memset(h_histo, 0, 1024 * sizeof(unsigned int)); checkCudaErrorsC(hipMemcpy(h_histo, d_cdf, 1024 * sizeof(unsigned int), hipMemcpyDeviceToHost)); unsigned int nonZeroes = 0; for (unsigned int i = 0; i < 1024; i++) { if (h_histo[i] != 0) nonZeroes++; } std::cout << nonZeroes << std::endl; // ------------------------------------------------------------------------------------------------------------------- // 4) Realizar un exclusive scan en el histograma para obtener la distribucin acumulada (cdf) // de los valores de luminancia. Se debe almacenar en el puntero c_cdf // ------------------------------------------------------------------------------------------------------------------- gridSize = dim3(1, 1, 1); dim3 bs(unsigned int(numBins) / 2, 1, 1); bs.x = bs.x > 512u ? 512u : bs.x; // max threads per block is 512 exclusive_scan << <gridSize, bs >> > (d_cdf, unsigned int(numBins)); hipDeviceSynchronize(); checkCudaErrorsC(hipGetLastError()); // ------------------------------------------------------------------------------------------------------------------- }
6fb1ab980680ca7ab0d7122c196044c81ccca942.cu
#include <device_functions.h> #include <cuda.h> #include <cuda_runtime.h> #include <iostream> #include <iomanip> #define _BLOCK_SIZE 64 #define checkCudaErrorsC(val) check( (val), #val, __FILE__, __LINE__) template<typename T> void check(T err, const char* const func, const char* const file, const int line) { if (err != cudaSuccess) { std::cerr << "CUDA error at: " << file << ":" << line << std::endl; std::cerr << cudaGetErrorString(err) << " " << func << std::endl; exit(1); } } // ========================================================================================================= // ========================================================================================================= /* * Extracts the highest value and the lowest value from the max_input and min_input. This kernel is prepared to be * executed iteratively, as it might process large amount of data that cannot be stored in a single block's shared memory * Each iteration will use the output of the previous one as input * @param min_input elements from which to find the lowest value * @param max_input elements from which to find the highest value * @param min_output output array of size len / blockSize which will contain the lowest elements from min_input * @param max_output output array of size len / blockSize which will contain the highest elements from max_output * @param len number of elments to process from both inputs, starting from the begining (allows iterative processing without resizing the input/output) */ __global__ void findMinMax(const float * min_input, const float * max_input, float * min_output, float * max_output, unsigned int len) { unsigned int ti = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + ti; __shared__ float min_cache[_BLOCK_SIZE * 2]; __shared__ float max_cache[_BLOCK_SIZE * 2]; // Fill the cache of out of bounds elements with values that will be discarded if (i >= len) { min_cache[ti] = 99999.f; min_cache[ti + blockDim.x] = 99999.f; max_cache[ti] = -99999.f; max_cache[ti + blockDim.x] = -99999.f; } // Fill in bound ids with real iput values else { min_cache[ti] = min_input[i]; max_cache[ti] = max_input[i]; // If possible, load the next _BLOCK_SIZE elements into shared cache // to be able to perform the process using the whole block unsigned int secondIndex = blockIdx.x * (blockDim.x * 2) + ti; if (secondIndex < len) { min_cache[ti + blockDim.x] = min_input[secondIndex]; max_cache[ti + blockDim.x] = max_input[secondIndex]; } else { min_cache[ti + blockDim.x] = 99999.f; max_cache[ti + blockDim.x] = -99999.f; } } // Allow the whole block to have written their values __syncthreads(); // Recursively divide the block piece of input by 2 until everything is reduced to the first element for (unsigned int s = _BLOCK_SIZE; s > 0; s /= 2) { // min float current = min_cache[ti]; float test = min_cache[ti + s]; min_cache[ti] = current < test ? current : test; // max current = max_cache[ti]; test = max_cache[ti + s]; max_cache[ti] = current > test ? current : test; __syncthreads(); } // Thread 0 will use the blockid to write the output in the appropiate position if (ti == 0) { min_output[blockIdx.x] = min_cache[0]; max_output[blockIdx.x] = max_cache[0]; } } // ========================================================================================================= // ========================================================================================================= /* * Builds an instogram by analyzing the input lighting. Transform each value to the instogram bin to which it belongs * based on the range from the min value to the max value. Extracted from class slides * @param buffer input lighting channel * @param size input lighting channel number of elements * @param histo output array where the histogram will be stored * @param minLum minimun value of lighting in the input lighting array * @param lumRange difference between the higeset value and the lowest value in the input lighting array * @param numBins number of bins of which the histogram will be composed */ __global__ void histo(const float *buffer, size_t size, unsigned int *histo, float minLum, float lumRange, size_t numBins) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // All threads handle blockDim.x * gridDim.x consecutive elements if (i < size) { // Compute the bin to which the being-processed lighting value belongs unsigned int bin = ((buffer[i] - minLum) / lumRange) * numBins; atomicAdd(&(histo[bin]), 1); //Varios threads podrían intentar incrementar el mismo valor a la vez } } // ========================================================================================================= // ========================================================================================================= /* * Performs exclusive scan over a set of numbers. Extracted from class slides. Is designed to work with a single * block thread (it requires to store all the input data into shared memory for later use, so it will not work by * splitting the kernel execution into different blocks which wont share the cache) * @param numberArray elements in which to perform the scan & reduction * @param texSize number of elements in the input array */ __global__ void exclusive_scan(unsigned int * numberArray, unsigned int texSize) { // Shared memory size is passed throught kernel template call, based on the number // of thread which will perform the exclusive scan __shared__ unsigned int tempArray[1024]; int id = blockIdx.x * blockDim.x + threadIdx.x; int threadId = threadIdx.x; int offset = 1; unsigned int temp; int ai = threadId; int bi = threadId + texSize / 2; int i; //assign the shared memory tempArray[ai] = numberArray[id]; tempArray[bi] = numberArray[id + texSize / 2]; //up tree for (i = texSize >> 1; i > 0; i >>= 1) { __syncthreads(); if (threadId < i) { ai = offset * (2 * threadId + 1) - 1; bi = offset * (2 * threadId + 2) - 1; tempArray[bi] += tempArray[ai]; } offset <<= 1; } //put the last one 0 if (threadId == 0) tempArray[texSize - 1] = 0; //down tree for (i = 1; i < texSize; i <<= 1) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (threadId < i) { ai = offset * (2 * threadId + 1) - 1; bi = offset * (2 * threadId + 2) - 1; temp = tempArray[ai]; tempArray[ai] = tempArray[bi]; tempArray[bi] += temp; } } __syncthreads(); numberArray[id] = tempArray[threadId]; numberArray[id + texSize / 2] = tempArray[threadId + texSize / 2]; } // ========================================================================================================= // ========================================================================================================= /* * Returns the needed amount of grids to have 1 thread per element, given the defined block size * @param lenToProcess number of elements to process * @param blockSizeX number of threads per block which will be used * @returns dim3 size of the grid to use */ inline dim3 getGridSize(unsigned int lenToProcess, unsigned int blockSizeX) { unsigned int init = lenToProcess / blockSizeX; // Make sure to launch more threads than elements, not otherwise if (lenToProcess % blockSizeX > 0) init++; init = init == 0 ? 1 : init; return dim3(init, 1, 1); } // ========================================================================================================= // ========================================================================================================= void calculate_cdf(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { // TODO // 1) Encontrar el valor máximo y mínimo de luminancia en min_logLum and max_logLum a partir del canal logLuminance // ------------------------------------------------------------------------------------------------------------------- const unsigned int len = unsigned int(numRows) * unsigned int(numCols); const dim3 blockSize(_BLOCK_SIZE, 1, 1); float * d_max_output, * d_min_output, * d_min_input, * d_max_input; // Initialize buffers over which we will perform the reduction iteratively checkCudaErrorsC(cudaMalloc(&d_min_input, len * sizeof(float))); checkCudaErrorsC(cudaMemcpy(d_min_input, d_logLuminance, len * sizeof(float), cudaMemcpyDeviceToDevice)); checkCudaErrorsC(cudaMalloc(&d_max_input, len * sizeof(float))); checkCudaErrorsC(cudaMemcpy(d_max_input, d_logLuminance, len * sizeof(float), cudaMemcpyDeviceToDevice)); // Iterator which delimites the range of the data we are processing // is updated according to the reduction performed in the kernel unsigned int iterator = len; // Compute grid size based on half of the the elements, since we are using 1 thread to process 2 elements on initialisation in the kernel dim3 gridSize = getGridSize(len / 2, _BLOCK_SIZE); // We initialize the output buffers the the size required for the first iteration checkCudaErrorsC(cudaMalloc(&d_max_output, (len / _BLOCK_SIZE) * sizeof(float))); checkCudaErrorsC(cudaMalloc(&d_min_output, (len / _BLOCK_SIZE) * sizeof(float))); do { // Reduce the current input findMinMax << <gridSize, blockSize >> > (d_min_input, d_max_input, d_min_output, d_max_output, iterator); cudaDeviceSynchronize(); checkCudaErrorsC(cudaGetLastError()); // Reduce the iterator according to the reduction performed iterator /= _BLOCK_SIZE; gridSize = getGridSize(iterator / 2, _BLOCK_SIZE); // Update next kernel call input with previous kernel call output if (iterator > 0) { // Copy only the necessary data checkCudaErrorsC(cudaMemcpy(d_min_input, d_min_output, iterator * sizeof(float), cudaMemcpyDeviceToDevice)); checkCudaErrorsC(cudaMemcpy(d_max_input, d_max_output, iterator * sizeof(float), cudaMemcpyDeviceToDevice)); } } while (iterator > 0); checkCudaErrorsC(cudaMemcpy(&min_logLum, d_min_output, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrorsC(cudaMemcpy(&max_logLum, d_max_output, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrorsC(cudaFree(d_min_input)); checkCudaErrorsC(cudaFree(d_max_input)); checkCudaErrorsC(cudaFree(d_min_output)); checkCudaErrorsC(cudaFree(d_max_output)); // ------------------------------------------------------------------------------------------------------------------- // 2) Obtener el rango a representar // ------------------------------------------------------------------------------------------------------------------- const float range = max_logLum - min_logLum; // ------------------------------------------------------------------------------------------------------------------- // 3) Generar un histograma de todos los valores del canal logLuminance usando la formula: bin = (Lum [i] - lumMin) / lumRange * numBins // ------------------------------------------------------------------------------------------------------------------- // Use the same array where the output accumulate distribution will be computed, saving allocating and trasfering data // from an auxiliar buffer gridSize = getGridSize(len, _BLOCK_SIZE); histo << <gridSize, blockSize >> > (d_logLuminance, len, d_cdf, min_logLum, range, numBins); cudaDeviceSynchronize(); checkCudaErrorsC(cudaGetLastError()); unsigned int h_histo[1024]; memset(h_histo, 0, 1024 * sizeof(unsigned int)); checkCudaErrorsC(cudaMemcpy(h_histo, d_cdf, 1024 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); unsigned int nonZeroes = 0; for (unsigned int i = 0; i < 1024; i++) { if (h_histo[i] != 0) nonZeroes++; } std::cout << nonZeroes << std::endl; // ------------------------------------------------------------------------------------------------------------------- // 4) Realizar un exclusive scan en el histograma para obtener la distribución acumulada (cdf) // de los valores de luminancia. Se debe almacenar en el puntero c_cdf // ------------------------------------------------------------------------------------------------------------------- gridSize = dim3(1, 1, 1); dim3 bs(unsigned int(numBins) / 2, 1, 1); bs.x = bs.x > 512u ? 512u : bs.x; // max threads per block is 512 exclusive_scan << <gridSize, bs >> > (d_cdf, unsigned int(numBins)); cudaDeviceSynchronize(); checkCudaErrorsC(cudaGetLastError()); // ------------------------------------------------------------------------------------------------------------------- }
957aab785384a504e1bf7d2f16c6d32447745b80.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <vector> #include "caffe/layers/combine_mask_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void CombineMask(const int n, const Dtype* in1, const Dtype* in2, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = (in1[index]==Dtype(1) & in2[index]==Dtype(1)) ? Dtype(1) : Dtype(0); } } template <typename Dtype> void CombineMaskLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Blob<Dtype> *mask = top[0]; hipLaunchKernelGGL(( CombineMask<Dtype>), dim3(CAFFE_GET_BLOCKS(mask->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, mask->count(), bottom[0]->gpu_data(), bottom[1]->gpu_data(), mask->mutable_gpu_data()); hipDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void CombineMaskLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { //std::cout<<"backward gpu"<<std::endl; return; } INSTANTIATE_LAYER_GPU_FUNCS(CombineMaskLayer); } // namespace caffe
957aab785384a504e1bf7d2f16c6d32447745b80.cu
#include <cfloat> #include <vector> #include "caffe/layers/combine_mask_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void CombineMask(const int n, const Dtype* in1, const Dtype* in2, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = (in1[index]==Dtype(1) & in2[index]==Dtype(1)) ? Dtype(1) : Dtype(0); } } template <typename Dtype> void CombineMaskLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Blob<Dtype> *mask = top[0]; CombineMask<Dtype><<<CAFFE_GET_BLOCKS(mask->count()), CAFFE_CUDA_NUM_THREADS>>>( mask->count(), bottom[0]->gpu_data(), bottom[1]->gpu_data(), mask->mutable_gpu_data()); cudaDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void CombineMaskLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { //std::cout<<"backward gpu"<<std::endl; return; } INSTANTIATE_LAYER_GPU_FUNCS(CombineMaskLayer); } // namespace caffe
a6e39e1da5364fa819252eb227c2d2becff22a37.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "TomoSynth.h" #include "TomoSynth.h" #include "WeightTables.h" #include "trt_utils.h" #include "EdgeGainCorrect.h" #include <stddef.h> #include "Reconstruct.h" #include "SensorInfo.h" #define TILE_SIZE 8 #define CUDACheck(stmt) do {\ hipError_t err = stmt;\ if (err != hipSuccess) {\ printf("ERROR: Failed to run %s on line %d in function %s.\n", #stmt, __LINE__, __func__); \ exit(-1); \ }\ } while(0) __global__ void reconKernel( uInt8 *llhData, float * weight, float *reconim, float *nor, uint32_t detsizex, uint32_t detsizey, uint32_t row, uint32_t col, float *normim) { int rpos, cpos; int i, j; int idx; int y = blockIdx.x * blockDim.x + threadIdx.x; int x = blockIdx.y * blockDim.y + threadIdx.y; int w = blockIdx.z * blockDim.z + threadIdx.z; if (x < row && y < col) { if( nor[x*col+y] > 0.0 ) { for (i = 0; i < detsizex; i++) { for (j = 0; j < detsizey; j++) { // for (w = 0; w < 4; w++) // { rpos = (weight[i*detsizey*4*3+j*4*3+w*3+0]) + (x * M) + M_HALF; if ((rpos > 0) && (rpos <= M * row)) { cpos = (weight[i*detsizey*4*3+j*4*3+w*3+1]) + (y * M)+ M_HALF; if ((cpos > 0 && (cpos <= M * col))) { rpos = rpos - 1; cpos = cpos - 1; idx = rpos * M * col + cpos; atomicAdd(&reconim[col*M*rpos+cpos], llhData[i*detsizey*row*col+j*row*col+x*col+y] * weight[i*detsizey*4*3+j*4*3+w*3+2]); atomicAdd(&normim[idx], weight[i*detsizey*4*3+j*4*3+w*3+2]); } } // } } } } } } __global__ void edgeGainKernel( uint32_t Mrow, uint32_t Mcol, float *reconim, float *normim ){ int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; int idx; idx = r * Mcol + c; if (r < Mrow && c < Mcol && normim[idx] > 0.0) reconim[ r*Mcol+c] = reconim[r*Mcol+c] / normim[idx]; } //cpu reconstruct float** reconstruct(nova_str *nova, uInt8 ****llhData, float **** weight, float ** nor) { float **reconim; int rpos, cpos; int r, c, i, j, w; uint32_t Mr, Mc; Mr = M * nova->row; Mc = M * nova->col; reconim = (float **) create_2D_float(Mr, Mc); printf(" M=%i\n", M); // Set image to zero for (r = 0; r < Mr; r++) { for (c = 0; c < Mc; c++) { reconim[r][c] = 0.0; } } int idx; // Begin reconstruction for (r = 0; r < nova->row; r++) { for (c = 0; c < nova->col; c++) { if (nor[r][c] > 0.0) { for (i = 0; i < nova->detsizex; i++) { for (j = 0; j < nova->detsizey; j++) { for (w = 0; w < 4; w++) { rpos = (weight[i][j][w][0]) + (r) * (M) + M_HALF; if ((rpos > 0) && (rpos <= M * nova->row)) { cpos = (weight[i][j][w][1]) + (c) * (M)+ M_HALF; if ((cpos > 0 && (cpos <= M * nova->col))) { rpos = rpos - 1; cpos = cpos - 1; #ifdef VERBOSE printf("%i %i %i %i 0 %2.4e %i %i\n",rpos, i, j, w, weight[i][j][w][0],r, M*r+M_HALF); printf("%i %i %i %i 1 %2.4e %i %i\n",cpos, i, j, w, weight[i][j][w][1],c, M*c+M_HALF); printf("%i \n\n",llhData[i][j][c][r]); #endif //if (llhData[i][j][r][c] >0 ){ reconim[rpos][cpos] = reconim[rpos][cpos] + llhData[i][j][r][c] * weight[i][j][w][2]; //} } } } } } } } } #if EDGE_GAIN_CORRECTION float* normim; normim = initEdgeGain(nova, weight, nor); for (r = 0; r < Mr; r++) { for (c = 0; c < Mc; c++) { idx = r * Mc + c; //idx=r+c*Mc; if (normim[idx] > 0.0) { reconim[r][c] = reconim[r][c] / normim[idx]; } } } free(normim); #endif return reconim; }
a6e39e1da5364fa819252eb227c2d2becff22a37.cu
#include "TomoSynth.h" #include "TomoSynth.h" #include "WeightTables.h" #include "trt_utils.h" #include "EdgeGainCorrect.h" #include <stddef.h> #include "Reconstruct.h" #include "SensorInfo.h" #define TILE_SIZE 8 #define CUDACheck(stmt) do {\ cudaError_t err = stmt;\ if (err != cudaSuccess) {\ printf("ERROR: Failed to run %s on line %d in function %s.\n", #stmt, __LINE__, __func__); \ exit(-1); \ }\ } while(0) __global__ void reconKernel( uInt8 *llhData, float * weight, float *reconim, float *nor, uint32_t detsizex, uint32_t detsizey, uint32_t row, uint32_t col, float *normim) { int rpos, cpos; int i, j; int idx; int y = blockIdx.x * blockDim.x + threadIdx.x; int x = blockIdx.y * blockDim.y + threadIdx.y; int w = blockIdx.z * blockDim.z + threadIdx.z; if (x < row && y < col) { if( nor[x*col+y] > 0.0 ) { for (i = 0; i < detsizex; i++) { for (j = 0; j < detsizey; j++) { // for (w = 0; w < 4; w++) // { rpos = (weight[i*detsizey*4*3+j*4*3+w*3+0]) + (x * M) + M_HALF; if ((rpos > 0) && (rpos <= M * row)) { cpos = (weight[i*detsizey*4*3+j*4*3+w*3+1]) + (y * M)+ M_HALF; if ((cpos > 0 && (cpos <= M * col))) { rpos = rpos - 1; cpos = cpos - 1; idx = rpos * M * col + cpos; atomicAdd(&reconim[col*M*rpos+cpos], llhData[i*detsizey*row*col+j*row*col+x*col+y] * weight[i*detsizey*4*3+j*4*3+w*3+2]); atomicAdd(&normim[idx], weight[i*detsizey*4*3+j*4*3+w*3+2]); } } // } } } } } } __global__ void edgeGainKernel( uint32_t Mrow, uint32_t Mcol, float *reconim, float *normim ){ int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; int idx; idx = r * Mcol + c; if (r < Mrow && c < Mcol && normim[idx] > 0.0) reconim[ r*Mcol+c] = reconim[r*Mcol+c] / normim[idx]; } //cpu reconstruct float** reconstruct(nova_str *nova, uInt8 ****llhData, float **** weight, float ** nor) { float **reconim; int rpos, cpos; int r, c, i, j, w; uint32_t Mr, Mc; Mr = M * nova->row; Mc = M * nova->col; reconim = (float **) create_2D_float(Mr, Mc); printf(" M=%i\n", M); // Set image to zero for (r = 0; r < Mr; r++) { for (c = 0; c < Mc; c++) { reconim[r][c] = 0.0; } } int idx; // Begin reconstruction for (r = 0; r < nova->row; r++) { for (c = 0; c < nova->col; c++) { if (nor[r][c] > 0.0) { for (i = 0; i < nova->detsizex; i++) { for (j = 0; j < nova->detsizey; j++) { for (w = 0; w < 4; w++) { rpos = (weight[i][j][w][0]) + (r) * (M) + M_HALF; if ((rpos > 0) && (rpos <= M * nova->row)) { cpos = (weight[i][j][w][1]) + (c) * (M)+ M_HALF; if ((cpos > 0 && (cpos <= M * nova->col))) { rpos = rpos - 1; cpos = cpos - 1; #ifdef VERBOSE printf("%i %i %i %i 0 %2.4e %i %i\n",rpos, i, j, w, weight[i][j][w][0],r, M*r+M_HALF); printf("%i %i %i %i 1 %2.4e %i %i\n",cpos, i, j, w, weight[i][j][w][1],c, M*c+M_HALF); printf("%i \n\n",llhData[i][j][c][r]); #endif //if (llhData[i][j][r][c] >0 ){ reconim[rpos][cpos] = reconim[rpos][cpos] + llhData[i][j][r][c] * weight[i][j][w][2]; //} } } } } } } } } #if EDGE_GAIN_CORRECTION float* normim; normim = initEdgeGain(nova, weight, nor); for (r = 0; r < Mr; r++) { for (c = 0; c < Mc; c++) { idx = r * Mc + c; //idx=r+c*Mc; if (normim[idx] > 0.0) { reconim[r][c] = reconim[r][c] / normim[idx]; } } } free(normim); #endif return reconim; }
dc80d9bd319c9d0f54c2dc1b74c75658419afc9a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <math.h> #include <hdf5.h> #define NPML 10 #define NPMLp 11 #define NPMLp2 22 const float light_velocity = 2.99792458e8; // m s- const float ep0 = 8.85418781762038920e-12; // F m-1 (permittivity at vacuum) const float mu0 = 1.25663706143591730e-6; // N A-2 (permeability at vacuum) const float imp0 = sqrt( mu0/ep0 ); // (impedance at vacuum) const float pi = 3.14159265358979323846; const int MBPG = 65535; // Allocate constant memory for CPML __constant__ float rcmbE[NPMLp2]; __constant__ float rcmaE[NPMLp2]; __constant__ float rcmbH[NPMLp2]; __constant__ float rcmaH[NPMLp2]; typedef struct N3 { int x, y, z; } N3; typedef struct P3F3 { float ***x, ***y, ***z; } P3F3; typedef struct P1F3 { float *x, *y, *z; } P1F3; typedef struct P1F2 { float *f, *b; } P1F2; typedef struct P1F6 { P1F2 x, y, z; } P1F6; __host__ void updateTimer(time_t t0, int tstep, char str[]) { int elapsedTime=(int)(time(0)-t0); sprintf(str, "%02d:%02d:%02d (%d)", elapsedTime/3600, elapsedTime%3600/60, elapsedTime%60, elapsedTime); } __host__ void exec(char *format, ...) { char str[1024]; va_list ap; va_start(ap, format); vsprintf(str, format, ap); system(str); } __host__ void dumpToH5(int Ni, int Nj, int Nk, int is, int js, int ks, int ie, int je, int ke, float ***f, char *format, ...) { char filename[1024]; va_list ap; va_start(ap, format); vsprintf(filename, format, ap); hid_t file, dataset, filespace, memspace; hsize_t dimsm[3] = { Ni, Nj, Nk }; hsize_t start[3] = { is, js, ks }; hsize_t count[3] = { 1-is+ie, 1-js+je, 1-ks+ke }; memspace = H5Screate_simple(3, dimsm, 0); filespace = H5Screate_simple(3, count, 0); file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); dataset = H5Dcreate(file, "Data", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT); H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, 0, count, 0); H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, f[0][0]); H5Dclose(dataset); H5Sclose(filespace); H5Sclose(memspace); H5Fclose(file); } __host__ float ***makeArray3D(int Nx, int Ny, int Nz) { float ***f; int i; f = (float ***) calloc (Nx, sizeof(float **)); f[0] = (float **) calloc (Ny*Nx, sizeof(float *)); f[0][0] = (float *) calloc (Nz*Ny*Nx, sizeof(float)); for (i=0; i<Nx; i++) f[i] = f[0] + i*Ny; for (i=0; i<Ny*Nx; i++) f[0][i] = f[0][0] + i*Nz; return f; } __host__ float **makeArray2D(int Nx, int Ny) { float **f; f = (float **) calloc (Nx, sizeof(float *)); f[0] = (float *) calloc (Ny*Nx, sizeof(float)); for (int i=0; i<Nx; i++) f[i] = f[0] + i*Ny; return f; } __host__ float *makeArray1D( int Nx ) { float *f; f = (float *) calloc (Nx, sizeof(float)); return f; } __host__ void set_geometry( N3 N, P3F3 CE ) { int i,j,k; for ( i=1; i<N.x; i++ ) { for ( j=1; j<N.y; j++ ) { for ( k=1; k<N.z; k++ ) { CE.x[i][j][k] = 0.5; CE.y[i][j][k] = 0.5; CE.z[i][j][k] = 0.5; if ( i == N.x-1 ) { CE.y[i][j][k] = 0; CE.z[i][j][k] = 0; } if ( j == N.y-1 ) { CE.z[i][j][k] = 0; CE.x[i][j][k] = 0; } if ( k == N.z-1 ) { CE.x[i][j][k] = 0; CE.y[i][j][k] = 0; } } } } /* int idx; for ( idx=0; idx<(N.x+1)*N.y*N.z; idx++ ) { i = idx/(N.y*N.z); j = ( idx - i*N.y*N.z )/N.z; k = idx%N.z; printf("%d [%d,%d,%d] %g, %g, %g\n", idx, i, j, k, CE.x[0][0][idx], CE.y[0][0][idx], CE.z[0][0][idx] ); } */ } __host__ void verify_16xNz(int Nz) { int R = Nz%16; int N1 = Nz-R; int N2 = N1+16; if ( R == 0 ) printf("Nz is a multiple of 16.\n"); else { printf("Error: Nz is not a multiple of 16.\n"); printf("Recommend Nz: %d or %d\n", N1, N2); exit(0); } } __host__ float calcOccupancy(int TPB) { float occupancy; int WPB; // wrap/block int ABPM; // active block/streaming multiprocessor int AWPM; // active warp/streaming multiprocessor int MAX_ABPM = 8; int MAX_AWPM = 32; //int MAX_TPM = 1024; int TPW = 32; // thread/warp WPB = TPB%TPW == 0 ? TPB/TPW : TPB/TPW+1; ABPM = MAX_AWPM/WPB < MAX_ABPM ? MAX_AWPM/WPB : MAX_ABPM; AWPM = WPB*ABPM; occupancy = (float)AWPM/MAX_AWPM; return occupancy; } __host__ int selectTPB(int Ntot, int Nsurplus_plane) { int i; int *tpb, bpg, TPB=0; int Nsurplus; float occupancy, max_occupancy=0; int Ntpb = 512/16 + 2; tpb = (int *) calloc (Ntpb, sizeof(int)); tpb[0] = 512; tpb[1] = 256; tpb[2] = 128; for ( i=3; i<Ntpb; i++ ) tpb[i] = tpb[0] - 16*(i-2); //for ( i=0; i<Ntpb; i++ ) printf("tpb[%d]=%d\n",i,tpb[i]); for ( i=0; i<Ntpb; i++) { occupancy = calcOccupancy( tpb[i] ); if ( occupancy > max_occupancy ) { max_occupancy = occupancy; bpg = Ntot%tpb[i] == 0 ? Ntot/tpb[i] : Ntot/tpb[i] + 1; Nsurplus = tpb[i]*bpg - Ntot; if ( Nsurplus_plane == 0 ) TPB = tpb[i]; else if ( Nsurplus <= Nsurplus_plane ) TPB = tpb[i]; } } if ( TPB == 0 ) { printf("Error: There is not a TPB satisfied the conditions\n"); exit(0); } printf("\tNsurplus_plane=%d, Nsurplus=%d\n", Nsurplus_plane, Nsurplus); printf("\tNtot=%d, TPB=%d\n", Ntot, TPB); return TPB; } __global__ void initArray(int Ntot, float *a, int idx0) { int idx = idx0 + blockIdx.x*blockDim.x + threadIdx.x; if ( idx < Ntot ) a[idx] = 0; } __host__ void initMainArrays(int Ntot, P1F3 F) { int i; int TPB, BPG, NK, sBPG, *idx0; dim3 Db, *Dg; printf("select TPB,BPG: main init\n"); TPB = 512; Db = dim3( TPB ); BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; NK = BPG/MBPG + 1; // Number of kernel sBPG = BPG/NK; Dg = (dim3 *) malloc ( NK*sizeof(dim3) ); idx0 = (int *) malloc ( NK*sizeof(int) ); for ( i=0; i<NK; i++ ) { idx0[i] = TPB*sBPG*i; Dg[i] = dim3(sBPG); } Dg[NK-1] = dim3(sBPG+BPG%NK); printf("\tTPB=%d, BPG=%d, sBPG(%d)=%d\n", TPB, BPG, NK, sBPG); for ( i=0; i<NK; i++ ) { hipLaunchKernelGGL(( initArray) , dim3(Dg[i]),dim3(Db), 0, 0, Ntot, F.x, idx0[i]); hipLaunchKernelGGL(( initArray) , dim3(Dg[i]),dim3(Db), 0, 0, Ntot, F.y, idx0[i]); hipLaunchKernelGGL(( initArray) , dim3(Dg[i]),dim3(Db), 0, 0, Ntot, F.z, idx0[i]); } } __host__ void initPsiArrays(int Ntot, dim3 Db, dim3 Dg, P1F2 psi1, P1F2 psi2) { hipLaunchKernelGGL(( initArray) , dim3(Dg),dim3(Db), 0, 0, Ntot, psi1.f, 0); hipLaunchKernelGGL(( initArray) , dim3(Dg),dim3(Db), 0, 0, Ntot, psi1.b, 0); hipLaunchKernelGGL(( initArray) , dim3(Dg),dim3(Db), 0, 0, Ntot, psi2.f, 0); hipLaunchKernelGGL(( initArray) , dim3(Dg),dim3(Db), 0, 0, Ntot, psi2.b, 0); } __host__ void freeMainArrays(P1F3 F) { hipFree(F.x); hipFree(F.y); hipFree(F.z); } __host__ void freePsiArrays(P1F6 psix, P1F6 psiy, P1F6 psiz) { hipFree(psix.y.f); hipFree(psix.y.b); hipFree(psix.z.f); hipFree(psix.z.b); hipFree(psiy.z.f); hipFree(psiy.z.b); hipFree(psiy.x.f); hipFree(psiy.x.b); hipFree(psiz.x.f); hipFree(psiz.x.b); hipFree(psiz.y.f); hipFree(psiz.y.b); } __global__ void updateE(N3 N, P1F3 E, P1F3 H, P1F3 CE, int idx0) { int tk = threadIdx.x; int idx = blockIdx.x*blockDim.x + tk + idx0; int Nyz = N.y*N.z; int eidx = idx + Nyz; extern __shared__ float hs[]; float* hx = (float*) hs; float* hy = (float*) &hx[blockDim.x+1]; float* hz = (float*) &hy[blockDim.x+1]; hx[tk] = H.x[idx]; hy[tk] = H.y[idx]; hz[tk] = H.z[idx]; if ( tk==blockDim.x-1 ) { hx[tk+1] = H.x[idx+1]; hy[tk+1] = H.y[idx+1]; } __syncthreads(); E.x[eidx] += CE.x[idx]*( H.z[idx+N.z] - hz[tk] - hy[tk+1] + hy[tk] ); E.y[eidx] += CE.y[idx]*( hx[tk+1] - hx[tk] - H.z[idx+Nyz] + hz[tk] ); E.z[eidx] += CE.z[idx]*( H.y[idx+Nyz] - hy[tk] - H.x[idx+N.z] + hx[tk] ); } __global__ void updateH(N3 N, P1F3 E, P1F3 H, int idx0) { int tk = threadIdx.x; int idx = blockIdx.x*blockDim.x + tk + idx0; int Nyz = N.y*N.z; int eidx = idx + Nyz; extern __shared__ float es[]; float* ex = (float*) es; float* ey = (float*) &ex[blockDim.x+1]; float* ez = (float*) &ey[blockDim.x+1]; ex[tk+1] = E.x[eidx]; ey[tk+1] = E.y[eidx]; ez[tk] = E.z[eidx]; if ( tk==0 ) { ex[0] = E.x[eidx-1]; ey[0] = E.y[eidx-1]; } __syncthreads(); H.x[idx] -= 0.5*( ez[tk] - E.z[eidx-N.z] - ey[tk+1] + ey[tk] ); H.y[idx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + E.z[eidx-Nyz] ); H.z[idx] -= 0.5*( ey[tk+1] - E.y[eidx-Nyz] - ex[tk+1] + E.x[eidx-N.z] ); } __global__ void updateSrc(N3 N, P1F3 E, int tstep) { int idx, ijk; idx = threadIdx.x; //ijk = (idx+1)*N.y*N.z + (N.y/2)*N.z + (N.z/2); //ijk = (idx+1)*N.y*N.z + (N.y/2 - 30)*N.z + (N.z/2 - 50); //ijk = (N.x/2 - 30)*N.y*N.z + (idx)*N.z + (N.z/2 - 50); ijk = (N.x/2-30)*N.y*N.z + (N.y/2-50)*N.z + idx; //E.x[ijk] += sin(0.1*tstep); //E.y[ijk] += sin(0.1*tstep); E.z[ijk] += sin(0.1*tstep); } __global__ void updateCPMLxE(N3 N, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*blockDim.x + threadIdx.x; int Nyz = N.y*N.z; int pi = pidx/Nyz + backward*(NPML+1); int idx = pidx + ( 1 + backward*(N.x-NPML-2) )*Nyz; int eidx = idx + Nyz; psi1[pidx] = rcmbE[pi]*psi1[pidx] + rcmaE[pi]*( H.z[idx+Nyz] - H.z[idx] ); E.y[eidx] -= CE.y[idx]*psi1[pidx]; psi2[pidx] = rcmbE[pi]*psi2[pidx] + rcmaE[pi]*( H.y[idx+Nyz] - H.y[idx] ); E.z[eidx] += CE.z[idx]*psi2[pidx]; } __global__ void updateCPMLxH(N3 N, P1F3 E, P1F3 H, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*blockDim.x + threadIdx.x; int Nyz = N.y*N.z; int pi = pidx/Nyz + 1 + backward*(NPML+1); int idx = pidx + ( 1 + backward*(N.x-NPML-1) )*Nyz; int eidx = idx + Nyz; psi1[pidx] = rcmbH[pi]*psi1[pidx] + rcmaH[pi]*( E.z[eidx] - E.z[eidx-Nyz] ); H.y[idx] += 0.5*psi1[pidx]; psi2[pidx] = rcmbH[pi]*psi2[pidx] + rcmaH[pi]*( E.y[eidx] - E.y[eidx-Nyz] ); H.z[idx] -= 0.5*psi2[pidx]; } __global__ void updateCPMLyE(N3 N, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*blockDim.x + threadIdx.x; int i = pidx/(NPML*N.z); int pj = ( pidx/N.z )%NPML + backward*(NPML+1); int idx = pidx + ( 1 + i*(N.y-NPML) + backward*(N.y-NPML-2) )*N.z; int eidx = idx + N.y*N.z; psi1[pidx] = rcmbE[pj]*psi1[pidx] + rcmaE[pj]*( H.x[idx+N.z] - H.x[idx] ); E.z[eidx] -= CE.z[idx]*psi1[pidx]; psi2[pidx] = rcmbE[pj]*psi2[pidx] + rcmaE[pj]*( H.z[idx+N.z] - H.z[idx] ); E.x[eidx] += CE.x[idx]*psi2[pidx]; } __global__ void updateCPMLyH(N3 N, P1F3 E, P1F3 H, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*blockDim.x + threadIdx.x; int i = pidx/(NPML*N.z); int pj = ( pidx/N.z )%NPML + 1 + backward*(NPML+1); int idx = pidx + ( 1 + i*(N.y-NPML) + backward*(N.y-NPML-1) )*N.z; int eidx = idx + N.y*N.z; psi1[pidx] = rcmbH[pj]*psi1[pidx] + rcmaH[pj]*( E.x[eidx] - E.x[eidx-N.z] ); H.z[idx] += 0.5*psi1[pidx]; psi2[pidx] = rcmbH[pj]*psi2[pidx] + rcmaH[pj]*( E.z[eidx] - E.z[eidx-N.z] ); H.x[idx] -= 0.5*psi2[pidx]; } __global__ void updateCPMLzE(N3 N, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) { int tk = threadIdx.x; int pidx = blockIdx.x*blockDim.x + tk; int pk = pidx%NPMLp + backward*NPMLp; int idx = pidx + 1 + (pidx/NPMLp)*(N.z-NPMLp) + backward*(N.z-NPMLp-1); int eidx = idx + N.y*N.z; extern __shared__ float hs[]; float* hx = (float*) hs; float* hy = (float*) &hx[blockDim.x+1]; hx[tk] = H.x[idx]; hy[tk] = H.y[idx]; __syncthreads(); psi1[pidx] = rcmbE[pk]*psi1[pidx] + rcmaE[pk]*( hy[tk+1] - hy[tk] ); E.x[eidx] -= CE.x[idx]*psi1[pidx]; psi2[pidx] = rcmbE[pk]*psi2[pidx] + rcmaE[pk]*( hx[tk+1] - hx[tk] ); E.y[eidx] += CE.y[idx]*psi2[pidx]; } __global__ void updateCPMLzH(N3 N, P1F3 E, P1F3 H, float *psi1, float *psi2, int backward) { int tk = threadIdx.x; int pidx = blockIdx.x*blockDim.x + tk; int pk = pidx%NPMLp + backward*NPMLp; int idx = pidx + (pidx/NPMLp + backward)*(N.z-NPMLp); int eidx = idx + N.y*N.z; extern __shared__ float es[]; float* ex = (float*) es; float* ey = (float*) &ex[blockDim.x+1]; ex[tk+1] = E.x[eidx]; ey[tk+1] = E.y[eidx]; __syncthreads(); psi1[pidx] = rcmbH[pk]*psi1[pidx] + rcmaH[pk]*( ey[tk+1] - ey[tk] ); H.x[idx] += 0.5*psi1[pidx]; psi2[pidx] = rcmbH[pk]*psi2[pidx] + rcmaH[pk]*( ex[tk+1] - ex[tk] ); H.y[idx] -= 0.5*psi2[pidx]; } int main() { int tstep; char time_str[32]; time_t t0; int i; // -------------------------------------------------------------------------------- // Set the parameters N3 N; N.x = 250; N.y = 250; N.z = 320; //int TMAX = 500; int TMAX = 100000; float S = 0.5; float dx = 10e-9; float dt = S*dx/light_velocity; int Npml = NPML; printf("N(%d,%d,%d), TMAX=%d\n", N.x, N.y, N.z, TMAX); verify_16xNz( N.z ); printf("Npml=%d\n",Npml); // -------------------------------------------------------------------------------- // Allocate host memory P3F3 CE; CE.x = makeArray3D( N.x+1, N.y, N.z ); CE.y = makeArray3D( N.x+1, N.y, N.z ); CE.z = makeArray3D( N.x+1, N.y, N.z ); /* float ***Ex, ***Ey, ***Ez; Ex = makeArray3D( N.x+2, N.y, N.z ); Ey = makeArray3D( N.x+2, N.y, N.z ); Ez = makeArray3D( N.x+2, N.y, N.z ); float ***Hx, ***Hy, ***Hz; Hx = makeArray3D( N.x+2, N.y, N.z ); Hy = makeArray3D( N.x+2, N.y, N.z ); Hz = makeArray3D( N.x+2, N.y, N.z ); */ // -------------------------------------------------------------------------------- // Geometry set_geometry( N, CE ); // -------------------------------------------------------------------------------- // Parameters for CPML int m = 4; // grade_order float sigma_max = (m+1.)/(15*pi*Npml*dx); float alpha = 0.05; float *sigmaE, *bE, *aE; float *sigmaH, *bH, *aH; sigmaE = (float *) calloc (2*(Npml+1), sizeof(float)); sigmaH = (float *) calloc (2*(Npml+1), sizeof(float)); bE = (float *) calloc (2*(Npml+1), sizeof(float)); bH = (float *) calloc (2*(Npml+1), sizeof(float)); aE = (float *) calloc (2*(Npml+1), sizeof(float)); aH = (float *) calloc (2*(Npml+1), sizeof(float)); for (i=0; i<Npml; i++) { sigmaE[i] = pow( (Npml-0.5-i)/Npml, m )*sigma_max; sigmaE[i+Npml+1] = pow( (0.5+i)/Npml, m )*sigma_max; sigmaH[i+1] = pow( (float)(Npml-i)/Npml, m )*sigma_max; sigmaH[i+Npml+2] = pow( (1.+i)/Npml, m )*sigma_max; } for (i=0; i<2*(Npml+1); i++) { bE[i] = exp( -(sigmaE[i] + alpha)*dt/ep0 ); bH[i] = exp( -(sigmaH[i] + alpha)*dt/ep0 ); aE[i] = sigmaE[i]/(sigmaE[i]+alpha)*(bE[i]-1); aH[i] = sigmaH[i]/(sigmaH[i]+alpha)*(bH[i]-1); //printf("[%d]\tsigmaE=%g,\tbE=%g,aE=%g\n", i, sigmaE[i], bE[i], aE[i]); //printf("[%d]\tsigmaH=%g,\tbH=%g,aH=%g\n", i, sigmaH[i], bH[i], aH[i]); } free(sigmaE); free(sigmaH); // -------------------------------------------------------------------------------- // Copy arrays from host to constant memory hipMemcpyToSymbol(rcmbE, bE, 2*(Npml+1)*sizeof(float)); hipMemcpyToSymbol(rcmaE, aE, 2*(Npml+1)*sizeof(float)); hipMemcpyToSymbol(rcmbH, bH, 2*(Npml+1)*sizeof(float)); hipMemcpyToSymbol(rcmaH, aH, 2*(Npml+1)*sizeof(float)); free(bE); free(aE); free(bH); free(aH); // -------------------------------------------------------------------------------- // Set the GPU parameters // TPB: Number of threads per block // BPG: Number of thread blocks per grid int Ntot, TPB, BPG; int NK, sBPG, *idx0; // main update printf("select TPB,BPG: main\n"); dim3 Db_main, *Dg_main; Ntot = N.x*N.y*N.z; TPB = selectTPB( Ntot, N.y*N.z ); Db_main = dim3( TPB ); BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; NK = BPG/MBPG + 1; // Number of kernel sBPG = BPG/NK; Dg_main = (dim3 *) malloc ( NK*sizeof(dim3) ); idx0 = (int *) malloc ( NK*sizeof(int) ); for ( i=0; i<NK; i++ ) { idx0[i] = TPB*sBPG*i; Dg_main[i] = dim3(sBPG); } Dg_main[NK-1] = dim3(sBPG+BPG%NK); size_t Ns_main = sizeof(float)*( 2*(TPB+1)+TPB ); printf("\tBPG=%d, sBPG(%d)=%d, Ns_main=%d\n", BPG, NK, sBPG, Ns_main); // source //TPB = N.x; //TPB = N.y; TPB = N.z; BPG = 1; dim3 DBsrc(TPB); dim3 DGsrc(BPG); printf("source: TPB=%d, BPG=%d\n", TPB, BPG); // cpml printf("select TPB,BPG: pml x\n"); dim3 Db_pmlx, Dg_pmlx; Ntot = Npml*N.y*N.z; TPB = selectTPB( Ntot, N.y*N.z ); BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; Db_pmlx = dim3( TPB ); Dg_pmlx = dim3( BPG ); int Ntotpmlx = TPB*BPG; printf("\tBPG=%d\n", BPG); printf("select TPB,BPG: pml y\n"); dim3 Db_pmly, Dg_pmly; Ntot = N.x*Npml*N.z; TPB = selectTPB( Ntot, Npml*N.z ); BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; Db_pmly = dim3( TPB ); Dg_pmly = dim3( BPG ); int Ntotpmly = TPB*BPG; printf("\tBPG=%d\n", BPG); printf("select TPB,BPG: pml z\n"); dim3 Db_pmlz, Dg_pmlz; Ntot = N.x*N.y*(Npml+1); //TPB = selectTPB( Ntot, N.y*Npml ); //TPB = 506; //(Npml+1)*46 TPB = 512; //(Npml+1)*32 BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; Db_pmlz = dim3( TPB ); Dg_pmlz = dim3( BPG ); int Ntotpmlz = TPB*BPG; size_t Ns_pmlz = sizeof(float)*( 2*(TPB+1) ); printf("\tBPG=%d, Ns_pmlz=%d\n", BPG, Ns_pmlz); // -------------------------------------------------------------------------------- // Allocate device memory P1F3 devE, devH; P1F3 devCE; int size_devF = (N.x+2)*N.y*N.z*sizeof(float); int size_devC = (N.x+1)*N.y*N.z*sizeof(float); hipMalloc ( (void**) &devE.x, size_devF ); hipMalloc ( (void**) &devE.y, size_devF ); hipMalloc ( (void**) &devE.z, size_devF ); hipMalloc ( (void**) &devH.x, size_devF ); hipMalloc ( (void**) &devH.y, size_devF ); hipMalloc ( (void**) &devH.z, size_devF ); hipMalloc ( (void**) &devCE.x, size_devC ); hipMalloc ( (void**) &devCE.y, size_devC ); hipMalloc ( (void**) &devCE.z, size_devC ); // -------------------------------------------------------------------------------- // Allocate device memory for CPML P1F6 psixE, psiyE, psizE; P1F6 psixH, psiyH, psizH; int size_psix = Ntotpmlx*sizeof(float); int size_psiy = Ntotpmly*sizeof(float); int size_psiz = Ntotpmlz*sizeof(float); hipMalloc ( (void**) &psixE.y.f, size_psix ); hipMalloc ( (void**) &psixE.y.b, size_psix ); hipMalloc ( (void**) &psixE.z.f, size_psix ); hipMalloc ( (void**) &psixE.z.b, size_psix ); hipMalloc ( (void**) &psixH.y.f, size_psix ); hipMalloc ( (void**) &psixH.y.b, size_psix ); hipMalloc ( (void**) &psixH.z.f, size_psix ); hipMalloc ( (void**) &psixH.z.b, size_psix ); hipMalloc ( (void**) &psiyE.z.f, size_psiy ); hipMalloc ( (void**) &psiyE.z.b, size_psiy ); hipMalloc ( (void**) &psiyE.x.f, size_psiy ); hipMalloc ( (void**) &psiyE.x.b, size_psiy ); hipMalloc ( (void**) &psiyH.z.f, size_psiy ); hipMalloc ( (void**) &psiyH.z.b, size_psiy ); hipMalloc ( (void**) &psiyH.x.f, size_psiy ); hipMalloc ( (void**) &psiyH.x.b, size_psiy ); hipMalloc ( (void**) &psizE.x.f, size_psiz ); hipMalloc ( (void**) &psizE.x.b, size_psiz ); hipMalloc ( (void**) &psizE.y.f, size_psiz ); hipMalloc ( (void**) &psizE.y.b, size_psiz ); hipMalloc ( (void**) &psizH.x.f, size_psiz ); hipMalloc ( (void**) &psizH.x.b, size_psiz ); hipMalloc ( (void**) &psizH.y.f, size_psiz ); hipMalloc ( (void**) &psizH.y.b, size_psiz ); // -------------------------------------------------------------------------------- // Initialize the device arrays initMainArrays ( (N.x+2)*N.y*N.z, devE ); initMainArrays ( (N.x+2)*N.y*N.z, devH ); //initMainArrays ( (N.x+1)*N.y*N.z, devCE ); initPsiArrays ( Ntotpmlx, Db_pmlx, Dg_pmlx, psixE.y, psixE.z ); initPsiArrays ( Ntotpmly, Db_pmly, Dg_pmly, psiyE.z, psiyE.x ); initPsiArrays ( Ntotpmlz, Db_pmlz, Dg_pmlz, psizE.x, psizE.y ); initPsiArrays ( Ntotpmlx, Db_pmlx, Dg_pmlx, psixH.y, psixH.z ); initPsiArrays ( Ntotpmly, Db_pmly, Dg_pmly, psiyH.z, psiyH.x ); initPsiArrays ( Ntotpmlz, Db_pmlz, Dg_pmlz, psizH.x, psizH.y ); // -------------------------------------------------------------------------------- // Copy arrays from host to device hipMemcpy ( devCE.x, CE.x[0][0], size_devC, hipMemcpyHostToDevice ); hipMemcpy ( devCE.y, CE.y[0][0], size_devC, hipMemcpyHostToDevice ); hipMemcpy ( devCE.z, CE.z[0][0], size_devC, hipMemcpyHostToDevice ); free(CE.x); free(CE.y); free(CE.z); // -------------------------------------------------------------------------------- // time loop t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { // E-fields main region update for ( i=0; i<NK; i++)hipLaunchKernelGGL(( updateE) , dim3(Dg_main[i]),dim3(Db_main),Ns_main, 0, N, devE, devH, devCE, idx0[i] ); // E-fields CPML region update hipLaunchKernelGGL(( updateCPMLxE) , dim3(Dg_pmlx),dim3(Db_pmlx), 0, 0, N, devE, devH, devCE, psixE.y.f, psixE.z.f, 0); hipLaunchKernelGGL(( updateCPMLxE) , dim3(Dg_pmlx),dim3(Db_pmlx), 0, 0, N, devE, devH, devCE, psixE.y.b, psixE.z.b, 1); hipLaunchKernelGGL(( updateCPMLyE) , dim3(Dg_pmly),dim3(Db_pmly), 0, 0, N, devE, devH, devCE, psiyE.z.f, psiyE.x.f, 0); hipLaunchKernelGGL(( updateCPMLyE) , dim3(Dg_pmly),dim3(Db_pmly), 0, 0, N, devE, devH, devCE, psiyE.z.b, psiyE.x.b, 1); hipLaunchKernelGGL(( updateCPMLzE) , dim3(Dg_pmlz),dim3(Db_pmlz),Ns_pmlz, 0, N, devE, devH, devCE, psizE.x.f, psizE.y.f, 0); hipLaunchKernelGGL(( updateCPMLzE) , dim3(Dg_pmlz),dim3(Db_pmlz),Ns_pmlz, 0, N, devE, devH, devCE, psizE.x.b, psizE.y.b, 1); // Source update hipLaunchKernelGGL(( updateSrc) , dim3(DGsrc),dim3(DBsrc), 0, 0, N, devE, tstep ); // H-fields main region update for ( i=0; i<NK; i++)hipLaunchKernelGGL(( updateH) , dim3(Dg_main[i]),dim3(Db_main),Ns_main, 0, N, devE, devH, idx0[i] ); // H-fields CPML region update hipLaunchKernelGGL(( updateCPMLxH) , dim3(Dg_pmlx),dim3(Db_pmlx), 0, 0, N, devE, devH, psixH.y.f, psixH.z.f, 0); hipLaunchKernelGGL(( updateCPMLxH) , dim3(Dg_pmlx),dim3(Db_pmlx), 0, 0, N, devE, devH, psixH.y.b, psixH.z.b, 1); hipLaunchKernelGGL(( updateCPMLyH) , dim3(Dg_pmlx),dim3(Db_pmlx), 0, 0, N, devE, devH, psiyH.z.f, psiyH.x.f, 0); hipLaunchKernelGGL(( updateCPMLyH) , dim3(Dg_pmlx),dim3(Db_pmlx), 0, 0, N, devE, devH, psiyH.z.b, psiyH.x.b, 1); hipLaunchKernelGGL(( updateCPMLzH) , dim3(Dg_pmlz),dim3(Db_pmlz),Ns_pmlz, 0, N, devE, devH, psizH.x.f, psizH.y.f, 0); hipLaunchKernelGGL(( updateCPMLzH) , dim3(Dg_pmlz),dim3(Db_pmlz),Ns_pmlz, 0, N, devE, devH, psizH.x.b, psizH.y.b, 1); /* if ( tstep/100*100 == tstep ) { // Copy arrays from device to host hipMemcpy( Ex[0][0], devE.x, (N.x+2)*N.y*N.z*sizeof(float), hipMemcpyDeviceToHost ); //hipMemcpy( Ez[0][0], devE.z, (N.x+2)*N.y*N.z*sizeof(float), hipMemcpyDeviceToHost ); //hipMemcpy( CEx[0][0], devCE.x, (N.x+1)*N.y*N.z*sizeof(float), hipMemcpyDeviceToHost ); hipMemcpy( Ey[0][0], devE.y, (N.x+2)*N.y*N.z*sizeof(float), hipMemcpyDeviceToHost ); hipMemcpy( Ez[0][0], devE.z, (N.x+2)*N.y*N.z*sizeof(float), hipMemcpyDeviceToHost ); hipMemcpy( Hx[0][0], devH.x, (N.x+2)*N.y*N.z*sizeof(float), hipMemcpyDeviceToHost ); hipMemcpy( Hy[0][0], devH.y, (N.x+2)*N.y*N.z*sizeof(float), hipMemcpyDeviceToHost ); hipMemcpy( Hz[0][0], devH.z, (N.x+2)*N.y*N.z*sizeof(float), hipMemcpyDeviceToHost ); dumpToH5(N.x+2, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Ex, "gpu_png/Ex-%05d.h5", tstep); exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Ey, "gpu_png/Ey-%05d.h5", tstep); exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ey-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Ez, "gpu_png/Ez-%05d.h5", tstep); exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ez-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Hx, "gpu_png/Hx-%05d.h5", tstep); exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hx-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Hy, "gpu_png/Hy-%05d.h5", tstep); exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hy-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Hz, "gpu_png/Hz-%05d.h5", tstep); exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hz-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, N.y/2, 0, N.x-1, N.y/2, N.z-1, Ex, "gpu_png/Ex-%05d.h5", tstep); exec("h5topng -ZM0.1 -y0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, N.y/2, 0, N.x-1, N.y/2, N.z-1, Ey, "gpu_png/Ey-%05d.h5", tstep); exec("h5topng -ZM0.1 -y0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ey-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, N.y/2, 0, N.x-1, N.y/2, N.z-1, Ez, "gpu_png/Ez-%05d.h5", tstep); exec("h5topng -ZM0.1 -y0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ez-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, N.y/2, 0, N.x-1, N.y/2, N.z-1, Hx, "gpu_png/Hx-%05d.h5", tstep); exec("h5topng -ZM0.1 -y0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hx-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, N.y/2, 0, N.x-1, N.y/2, N.z-1, Hy, "gpu_png/Hy-%05d.h5", tstep); exec("h5topng -ZM0.1 -y0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hy-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, N.y/2, 0, N.x-1, N.y/2, N.z-1, Hz, "gpu_png/Hz-%05d.h5", tstep); exec("h5topng -ZM0.1 -y0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hz-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, 0, N.z/2, N.x+1, N.y-1, N.z/2, Ex, "gpu_png/Ex-%05d.h5", tstep); exec("h5topng -ZM0.1 -z0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, 0, N.z/2, N.x+1, N.y-1, N.z/2, Ey, "gpu_png/Ey-%05d.h5", tstep); exec("h5topng -ZM0.1 -z0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ey-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, 0, N.z/2, N.x+1, N.y-1, N.z/2, Ez, "gpu_png/Ez-%05d.h5", tstep); exec("h5topng -ZM0.1 -z0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ez-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, 0, N.z/2, N.x+1, N.y-1, N.z/2, Hx, "gpu_png/Hx-%05d.h5", tstep); exec("h5topng -ZM0.1 -z0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hx-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, 0, N.z/2, N.x+1, N.y-1, N.z/2, Hy, "gpu_png/Hy-%05d.h5", tstep); exec("h5topng -ZM0.1 -z0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hy-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, 0, N.z/2, N.x+1, N.y-1, N.z/2, Hz, "gpu_png/Hz-%05d.h5", tstep); exec("h5topng -ZM0.1 -z0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hz-%05d.h5", tstep); //dumpToH5(N.x+1, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, CEx, "gpu_png/CEx-%05d.h5", tstep); //exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/CEx-%05d.h5", tstep); updateTimer(t0, tstep, time_str); printf("tstep=%d\t%s\n", tstep, time_str); } */ } updateTimer(t0, tstep, time_str); printf("tstep=%d\t%s\n", tstep, time_str); /* // time test t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) for ( i=0; i<NK; i++)hipLaunchKernelGGL(( updateE) , dim3(Dg_main[i]),dim3(Db_main),Ns_main, 0, N, devE, devH, devCE, idx0[i] ); updateTimer(t0, tstep, time_str); printf("main E: \t%s\n", time_str); t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) for ( i=0; i<NK; i++)hipLaunchKernelGGL(( updateH) , dim3(Dg_main[i]),dim3(Db_main),Ns_main, 0, N, devE, devH, idx0[i] ); updateTimer(t0, tstep, time_str); printf("main H: \t%s\n", time_str); t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { hipLaunchKernelGGL(( updateCPMLxE) , dim3(Dg_pmlx),dim3(Db_pmlx), 0, 0, N, devE, devH, devCE, psixE.y.f, psixE.z.f, 0); hipLaunchKernelGGL(( updateCPMLxE) , dim3(Dg_pmlx),dim3(Db_pmlx), 0, 0, N, devE, devH, devCE, psixE.y.b, psixE.z.b, 1); } updateTimer(t0, tstep, time_str); printf("cpmlx E:\t%s\n", time_str); t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { hipLaunchKernelGGL(( updateCPMLxH) , dim3(Dg_pmlx),dim3(Db_pmlx), 0, 0, N, devE, devH, psixH.y.f, psixH.z.f, 0); hipLaunchKernelGGL(( updateCPMLxH) , dim3(Dg_pmlx),dim3(Db_pmlx), 0, 0, N, devE, devH, psixH.y.b, psixH.z.b, 1); } updateTimer(t0, tstep, time_str); printf("cpmlx H:\t%s\n", time_str); t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { hipLaunchKernelGGL(( updateCPMLyE) , dim3(Dg_pmly),dim3(Db_pmly), 0, 0, N, devE, devH, devCE, psiyE.z.f, psiyE.x.f, 0); hipLaunchKernelGGL(( updateCPMLyE) , dim3(Dg_pmly),dim3(Db_pmly), 0, 0, N, devE, devH, devCE, psiyE.z.b, psiyE.x.b, 1); } updateTimer(t0, tstep, time_str); printf("cpmly E:\t%s\n", time_str); t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { hipLaunchKernelGGL(( updateCPMLyH) , dim3(Dg_pmlx),dim3(Db_pmlx), 0, 0, N, devE, devH, psiyH.z.f, psiyH.x.f, 0); hipLaunchKernelGGL(( updateCPMLyH) , dim3(Dg_pmlx),dim3(Db_pmlx), 0, 0, N, devE, devH, psiyH.z.b, psiyH.x.b, 1); } updateTimer(t0, tstep, time_str); printf("cpmly H:\t%s\n", time_str); t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { hipLaunchKernelGGL(( updateCPMLzE) , dim3(Dg_pmlz),dim3(Db_pmlz),Ns_pmlz, 0, N, devE, devH, devCE, psizE.x.f, psizE.y.f, 0); hipLaunchKernelGGL(( updateCPMLzE) , dim3(Dg_pmlz),dim3(Db_pmlz),Ns_pmlz, 0, N, devE, devH, devCE, psizE.x.b, psizE.y.b, 1); } updateTimer(t0, tstep, time_str); printf("cpmlz E:\t%s\n", time_str); t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { hipLaunchKernelGGL(( updateCPMLzH) , dim3(Dg_pmlz),dim3(Db_pmlz),Ns_pmlz, 0, N, devE, devH, psizH.x.f, psizH.y.f, 0); hipLaunchKernelGGL(( updateCPMLzH) , dim3(Dg_pmlz),dim3(Db_pmlz),Ns_pmlz, 0, N, devE, devH, psizH.x.b, psizH.y.b, 1); } updateTimer(t0, tstep, time_str); printf("cpmlz H:\t%s\n", time_str); */ freeMainArrays ( devE ); freeMainArrays ( devH ); freeMainArrays ( devCE ); freePsiArrays ( psixE, psiyE, psizE ); freePsiArrays ( psixH, psiyH, psizH ); }
dc80d9bd319c9d0f54c2dc1b74c75658419afc9a.cu
#include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <math.h> #include <hdf5.h> #define NPML 10 #define NPMLp 11 #define NPMLp2 22 const float light_velocity = 2.99792458e8; // m s- const float ep0 = 8.85418781762038920e-12; // F m-1 (permittivity at vacuum) const float mu0 = 1.25663706143591730e-6; // N A-2 (permeability at vacuum) const float imp0 = sqrt( mu0/ep0 ); // (impedance at vacuum) const float pi = 3.14159265358979323846; const int MBPG = 65535; // Allocate constant memory for CPML __constant__ float rcmbE[NPMLp2]; __constant__ float rcmaE[NPMLp2]; __constant__ float rcmbH[NPMLp2]; __constant__ float rcmaH[NPMLp2]; typedef struct N3 { int x, y, z; } N3; typedef struct P3F3 { float ***x, ***y, ***z; } P3F3; typedef struct P1F3 { float *x, *y, *z; } P1F3; typedef struct P1F2 { float *f, *b; } P1F2; typedef struct P1F6 { P1F2 x, y, z; } P1F6; __host__ void updateTimer(time_t t0, int tstep, char str[]) { int elapsedTime=(int)(time(0)-t0); sprintf(str, "%02d:%02d:%02d (%d)", elapsedTime/3600, elapsedTime%3600/60, elapsedTime%60, elapsedTime); } __host__ void exec(char *format, ...) { char str[1024]; va_list ap; va_start(ap, format); vsprintf(str, format, ap); system(str); } __host__ void dumpToH5(int Ni, int Nj, int Nk, int is, int js, int ks, int ie, int je, int ke, float ***f, char *format, ...) { char filename[1024]; va_list ap; va_start(ap, format); vsprintf(filename, format, ap); hid_t file, dataset, filespace, memspace; hsize_t dimsm[3] = { Ni, Nj, Nk }; hsize_t start[3] = { is, js, ks }; hsize_t count[3] = { 1-is+ie, 1-js+je, 1-ks+ke }; memspace = H5Screate_simple(3, dimsm, 0); filespace = H5Screate_simple(3, count, 0); file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); dataset = H5Dcreate(file, "Data", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT); H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, 0, count, 0); H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, f[0][0]); H5Dclose(dataset); H5Sclose(filespace); H5Sclose(memspace); H5Fclose(file); } __host__ float ***makeArray3D(int Nx, int Ny, int Nz) { float ***f; int i; f = (float ***) calloc (Nx, sizeof(float **)); f[0] = (float **) calloc (Ny*Nx, sizeof(float *)); f[0][0] = (float *) calloc (Nz*Ny*Nx, sizeof(float)); for (i=0; i<Nx; i++) f[i] = f[0] + i*Ny; for (i=0; i<Ny*Nx; i++) f[0][i] = f[0][0] + i*Nz; return f; } __host__ float **makeArray2D(int Nx, int Ny) { float **f; f = (float **) calloc (Nx, sizeof(float *)); f[0] = (float *) calloc (Ny*Nx, sizeof(float)); for (int i=0; i<Nx; i++) f[i] = f[0] + i*Ny; return f; } __host__ float *makeArray1D( int Nx ) { float *f; f = (float *) calloc (Nx, sizeof(float)); return f; } __host__ void set_geometry( N3 N, P3F3 CE ) { int i,j,k; for ( i=1; i<N.x; i++ ) { for ( j=1; j<N.y; j++ ) { for ( k=1; k<N.z; k++ ) { CE.x[i][j][k] = 0.5; CE.y[i][j][k] = 0.5; CE.z[i][j][k] = 0.5; if ( i == N.x-1 ) { CE.y[i][j][k] = 0; CE.z[i][j][k] = 0; } if ( j == N.y-1 ) { CE.z[i][j][k] = 0; CE.x[i][j][k] = 0; } if ( k == N.z-1 ) { CE.x[i][j][k] = 0; CE.y[i][j][k] = 0; } } } } /* int idx; for ( idx=0; idx<(N.x+1)*N.y*N.z; idx++ ) { i = idx/(N.y*N.z); j = ( idx - i*N.y*N.z )/N.z; k = idx%N.z; printf("%d [%d,%d,%d] %g, %g, %g\n", idx, i, j, k, CE.x[0][0][idx], CE.y[0][0][idx], CE.z[0][0][idx] ); } */ } __host__ void verify_16xNz(int Nz) { int R = Nz%16; int N1 = Nz-R; int N2 = N1+16; if ( R == 0 ) printf("Nz is a multiple of 16.\n"); else { printf("Error: Nz is not a multiple of 16.\n"); printf("Recommend Nz: %d or %d\n", N1, N2); exit(0); } } __host__ float calcOccupancy(int TPB) { float occupancy; int WPB; // wrap/block int ABPM; // active block/streaming multiprocessor int AWPM; // active warp/streaming multiprocessor int MAX_ABPM = 8; int MAX_AWPM = 32; //int MAX_TPM = 1024; int TPW = 32; // thread/warp WPB = TPB%TPW == 0 ? TPB/TPW : TPB/TPW+1; ABPM = MAX_AWPM/WPB < MAX_ABPM ? MAX_AWPM/WPB : MAX_ABPM; AWPM = WPB*ABPM; occupancy = (float)AWPM/MAX_AWPM; return occupancy; } __host__ int selectTPB(int Ntot, int Nsurplus_plane) { int i; int *tpb, bpg, TPB=0; int Nsurplus; float occupancy, max_occupancy=0; int Ntpb = 512/16 + 2; tpb = (int *) calloc (Ntpb, sizeof(int)); tpb[0] = 512; tpb[1] = 256; tpb[2] = 128; for ( i=3; i<Ntpb; i++ ) tpb[i] = tpb[0] - 16*(i-2); //for ( i=0; i<Ntpb; i++ ) printf("tpb[%d]=%d\n",i,tpb[i]); for ( i=0; i<Ntpb; i++) { occupancy = calcOccupancy( tpb[i] ); if ( occupancy > max_occupancy ) { max_occupancy = occupancy; bpg = Ntot%tpb[i] == 0 ? Ntot/tpb[i] : Ntot/tpb[i] + 1; Nsurplus = tpb[i]*bpg - Ntot; if ( Nsurplus_plane == 0 ) TPB = tpb[i]; else if ( Nsurplus <= Nsurplus_plane ) TPB = tpb[i]; } } if ( TPB == 0 ) { printf("Error: There is not a TPB satisfied the conditions\n"); exit(0); } printf("\tNsurplus_plane=%d, Nsurplus=%d\n", Nsurplus_plane, Nsurplus); printf("\tNtot=%d, TPB=%d\n", Ntot, TPB); return TPB; } __global__ void initArray(int Ntot, float *a, int idx0) { int idx = idx0 + blockIdx.x*blockDim.x + threadIdx.x; if ( idx < Ntot ) a[idx] = 0; } __host__ void initMainArrays(int Ntot, P1F3 F) { int i; int TPB, BPG, NK, sBPG, *idx0; dim3 Db, *Dg; printf("select TPB,BPG: main init\n"); TPB = 512; Db = dim3( TPB ); BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; NK = BPG/MBPG + 1; // Number of kernel sBPG = BPG/NK; Dg = (dim3 *) malloc ( NK*sizeof(dim3) ); idx0 = (int *) malloc ( NK*sizeof(int) ); for ( i=0; i<NK; i++ ) { idx0[i] = TPB*sBPG*i; Dg[i] = dim3(sBPG); } Dg[NK-1] = dim3(sBPG+BPG%NK); printf("\tTPB=%d, BPG=%d, sBPG(%d)=%d\n", TPB, BPG, NK, sBPG); for ( i=0; i<NK; i++ ) { initArray <<<Dg[i],Db>>> (Ntot, F.x, idx0[i]); initArray <<<Dg[i],Db>>> (Ntot, F.y, idx0[i]); initArray <<<Dg[i],Db>>> (Ntot, F.z, idx0[i]); } } __host__ void initPsiArrays(int Ntot, dim3 Db, dim3 Dg, P1F2 psi1, P1F2 psi2) { initArray <<<Dg,Db>>> (Ntot, psi1.f, 0); initArray <<<Dg,Db>>> (Ntot, psi1.b, 0); initArray <<<Dg,Db>>> (Ntot, psi2.f, 0); initArray <<<Dg,Db>>> (Ntot, psi2.b, 0); } __host__ void freeMainArrays(P1F3 F) { cudaFree(F.x); cudaFree(F.y); cudaFree(F.z); } __host__ void freePsiArrays(P1F6 psix, P1F6 psiy, P1F6 psiz) { cudaFree(psix.y.f); cudaFree(psix.y.b); cudaFree(psix.z.f); cudaFree(psix.z.b); cudaFree(psiy.z.f); cudaFree(psiy.z.b); cudaFree(psiy.x.f); cudaFree(psiy.x.b); cudaFree(psiz.x.f); cudaFree(psiz.x.b); cudaFree(psiz.y.f); cudaFree(psiz.y.b); } __global__ void updateE(N3 N, P1F3 E, P1F3 H, P1F3 CE, int idx0) { int tk = threadIdx.x; int idx = blockIdx.x*blockDim.x + tk + idx0; int Nyz = N.y*N.z; int eidx = idx + Nyz; extern __shared__ float hs[]; float* hx = (float*) hs; float* hy = (float*) &hx[blockDim.x+1]; float* hz = (float*) &hy[blockDim.x+1]; hx[tk] = H.x[idx]; hy[tk] = H.y[idx]; hz[tk] = H.z[idx]; if ( tk==blockDim.x-1 ) { hx[tk+1] = H.x[idx+1]; hy[tk+1] = H.y[idx+1]; } __syncthreads(); E.x[eidx] += CE.x[idx]*( H.z[idx+N.z] - hz[tk] - hy[tk+1] + hy[tk] ); E.y[eidx] += CE.y[idx]*( hx[tk+1] - hx[tk] - H.z[idx+Nyz] + hz[tk] ); E.z[eidx] += CE.z[idx]*( H.y[idx+Nyz] - hy[tk] - H.x[idx+N.z] + hx[tk] ); } __global__ void updateH(N3 N, P1F3 E, P1F3 H, int idx0) { int tk = threadIdx.x; int idx = blockIdx.x*blockDim.x + tk + idx0; int Nyz = N.y*N.z; int eidx = idx + Nyz; extern __shared__ float es[]; float* ex = (float*) es; float* ey = (float*) &ex[blockDim.x+1]; float* ez = (float*) &ey[blockDim.x+1]; ex[tk+1] = E.x[eidx]; ey[tk+1] = E.y[eidx]; ez[tk] = E.z[eidx]; if ( tk==0 ) { ex[0] = E.x[eidx-1]; ey[0] = E.y[eidx-1]; } __syncthreads(); H.x[idx] -= 0.5*( ez[tk] - E.z[eidx-N.z] - ey[tk+1] + ey[tk] ); H.y[idx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + E.z[eidx-Nyz] ); H.z[idx] -= 0.5*( ey[tk+1] - E.y[eidx-Nyz] - ex[tk+1] + E.x[eidx-N.z] ); } __global__ void updateSrc(N3 N, P1F3 E, int tstep) { int idx, ijk; idx = threadIdx.x; //ijk = (idx+1)*N.y*N.z + (N.y/2)*N.z + (N.z/2); //ijk = (idx+1)*N.y*N.z + (N.y/2 - 30)*N.z + (N.z/2 - 50); //ijk = (N.x/2 - 30)*N.y*N.z + (idx)*N.z + (N.z/2 - 50); ijk = (N.x/2-30)*N.y*N.z + (N.y/2-50)*N.z + idx; //E.x[ijk] += sin(0.1*tstep); //E.y[ijk] += sin(0.1*tstep); E.z[ijk] += sin(0.1*tstep); } __global__ void updateCPMLxE(N3 N, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*blockDim.x + threadIdx.x; int Nyz = N.y*N.z; int pi = pidx/Nyz + backward*(NPML+1); int idx = pidx + ( 1 + backward*(N.x-NPML-2) )*Nyz; int eidx = idx + Nyz; psi1[pidx] = rcmbE[pi]*psi1[pidx] + rcmaE[pi]*( H.z[idx+Nyz] - H.z[idx] ); E.y[eidx] -= CE.y[idx]*psi1[pidx]; psi2[pidx] = rcmbE[pi]*psi2[pidx] + rcmaE[pi]*( H.y[idx+Nyz] - H.y[idx] ); E.z[eidx] += CE.z[idx]*psi2[pidx]; } __global__ void updateCPMLxH(N3 N, P1F3 E, P1F3 H, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*blockDim.x + threadIdx.x; int Nyz = N.y*N.z; int pi = pidx/Nyz + 1 + backward*(NPML+1); int idx = pidx + ( 1 + backward*(N.x-NPML-1) )*Nyz; int eidx = idx + Nyz; psi1[pidx] = rcmbH[pi]*psi1[pidx] + rcmaH[pi]*( E.z[eidx] - E.z[eidx-Nyz] ); H.y[idx] += 0.5*psi1[pidx]; psi2[pidx] = rcmbH[pi]*psi2[pidx] + rcmaH[pi]*( E.y[eidx] - E.y[eidx-Nyz] ); H.z[idx] -= 0.5*psi2[pidx]; } __global__ void updateCPMLyE(N3 N, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*blockDim.x + threadIdx.x; int i = pidx/(NPML*N.z); int pj = ( pidx/N.z )%NPML + backward*(NPML+1); int idx = pidx + ( 1 + i*(N.y-NPML) + backward*(N.y-NPML-2) )*N.z; int eidx = idx + N.y*N.z; psi1[pidx] = rcmbE[pj]*psi1[pidx] + rcmaE[pj]*( H.x[idx+N.z] - H.x[idx] ); E.z[eidx] -= CE.z[idx]*psi1[pidx]; psi2[pidx] = rcmbE[pj]*psi2[pidx] + rcmaE[pj]*( H.z[idx+N.z] - H.z[idx] ); E.x[eidx] += CE.x[idx]*psi2[pidx]; } __global__ void updateCPMLyH(N3 N, P1F3 E, P1F3 H, float *psi1, float *psi2, int backward) { int pidx = blockIdx.x*blockDim.x + threadIdx.x; int i = pidx/(NPML*N.z); int pj = ( pidx/N.z )%NPML + 1 + backward*(NPML+1); int idx = pidx + ( 1 + i*(N.y-NPML) + backward*(N.y-NPML-1) )*N.z; int eidx = idx + N.y*N.z; psi1[pidx] = rcmbH[pj]*psi1[pidx] + rcmaH[pj]*( E.x[eidx] - E.x[eidx-N.z] ); H.z[idx] += 0.5*psi1[pidx]; psi2[pidx] = rcmbH[pj]*psi2[pidx] + rcmaH[pj]*( E.z[eidx] - E.z[eidx-N.z] ); H.x[idx] -= 0.5*psi2[pidx]; } __global__ void updateCPMLzE(N3 N, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) { int tk = threadIdx.x; int pidx = blockIdx.x*blockDim.x + tk; int pk = pidx%NPMLp + backward*NPMLp; int idx = pidx + 1 + (pidx/NPMLp)*(N.z-NPMLp) + backward*(N.z-NPMLp-1); int eidx = idx + N.y*N.z; extern __shared__ float hs[]; float* hx = (float*) hs; float* hy = (float*) &hx[blockDim.x+1]; hx[tk] = H.x[idx]; hy[tk] = H.y[idx]; __syncthreads(); psi1[pidx] = rcmbE[pk]*psi1[pidx] + rcmaE[pk]*( hy[tk+1] - hy[tk] ); E.x[eidx] -= CE.x[idx]*psi1[pidx]; psi2[pidx] = rcmbE[pk]*psi2[pidx] + rcmaE[pk]*( hx[tk+1] - hx[tk] ); E.y[eidx] += CE.y[idx]*psi2[pidx]; } __global__ void updateCPMLzH(N3 N, P1F3 E, P1F3 H, float *psi1, float *psi2, int backward) { int tk = threadIdx.x; int pidx = blockIdx.x*blockDim.x + tk; int pk = pidx%NPMLp + backward*NPMLp; int idx = pidx + (pidx/NPMLp + backward)*(N.z-NPMLp); int eidx = idx + N.y*N.z; extern __shared__ float es[]; float* ex = (float*) es; float* ey = (float*) &ex[blockDim.x+1]; ex[tk+1] = E.x[eidx]; ey[tk+1] = E.y[eidx]; __syncthreads(); psi1[pidx] = rcmbH[pk]*psi1[pidx] + rcmaH[pk]*( ey[tk+1] - ey[tk] ); H.x[idx] += 0.5*psi1[pidx]; psi2[pidx] = rcmbH[pk]*psi2[pidx] + rcmaH[pk]*( ex[tk+1] - ex[tk] ); H.y[idx] -= 0.5*psi2[pidx]; } int main() { int tstep; char time_str[32]; time_t t0; int i; // -------------------------------------------------------------------------------- // Set the parameters N3 N; N.x = 250; N.y = 250; N.z = 320; //int TMAX = 500; int TMAX = 100000; float S = 0.5; float dx = 10e-9; float dt = S*dx/light_velocity; int Npml = NPML; printf("N(%d,%d,%d), TMAX=%d\n", N.x, N.y, N.z, TMAX); verify_16xNz( N.z ); printf("Npml=%d\n",Npml); // -------------------------------------------------------------------------------- // Allocate host memory P3F3 CE; CE.x = makeArray3D( N.x+1, N.y, N.z ); CE.y = makeArray3D( N.x+1, N.y, N.z ); CE.z = makeArray3D( N.x+1, N.y, N.z ); /* float ***Ex, ***Ey, ***Ez; Ex = makeArray3D( N.x+2, N.y, N.z ); Ey = makeArray3D( N.x+2, N.y, N.z ); Ez = makeArray3D( N.x+2, N.y, N.z ); float ***Hx, ***Hy, ***Hz; Hx = makeArray3D( N.x+2, N.y, N.z ); Hy = makeArray3D( N.x+2, N.y, N.z ); Hz = makeArray3D( N.x+2, N.y, N.z ); */ // -------------------------------------------------------------------------------- // Geometry set_geometry( N, CE ); // -------------------------------------------------------------------------------- // Parameters for CPML int m = 4; // grade_order float sigma_max = (m+1.)/(15*pi*Npml*dx); float alpha = 0.05; float *sigmaE, *bE, *aE; float *sigmaH, *bH, *aH; sigmaE = (float *) calloc (2*(Npml+1), sizeof(float)); sigmaH = (float *) calloc (2*(Npml+1), sizeof(float)); bE = (float *) calloc (2*(Npml+1), sizeof(float)); bH = (float *) calloc (2*(Npml+1), sizeof(float)); aE = (float *) calloc (2*(Npml+1), sizeof(float)); aH = (float *) calloc (2*(Npml+1), sizeof(float)); for (i=0; i<Npml; i++) { sigmaE[i] = pow( (Npml-0.5-i)/Npml, m )*sigma_max; sigmaE[i+Npml+1] = pow( (0.5+i)/Npml, m )*sigma_max; sigmaH[i+1] = pow( (float)(Npml-i)/Npml, m )*sigma_max; sigmaH[i+Npml+2] = pow( (1.+i)/Npml, m )*sigma_max; } for (i=0; i<2*(Npml+1); i++) { bE[i] = exp( -(sigmaE[i] + alpha)*dt/ep0 ); bH[i] = exp( -(sigmaH[i] + alpha)*dt/ep0 ); aE[i] = sigmaE[i]/(sigmaE[i]+alpha)*(bE[i]-1); aH[i] = sigmaH[i]/(sigmaH[i]+alpha)*(bH[i]-1); //printf("[%d]\tsigmaE=%g,\tbE=%g,aE=%g\n", i, sigmaE[i], bE[i], aE[i]); //printf("[%d]\tsigmaH=%g,\tbH=%g,aH=%g\n", i, sigmaH[i], bH[i], aH[i]); } free(sigmaE); free(sigmaH); // -------------------------------------------------------------------------------- // Copy arrays from host to constant memory cudaMemcpyToSymbol(rcmbE, bE, 2*(Npml+1)*sizeof(float)); cudaMemcpyToSymbol(rcmaE, aE, 2*(Npml+1)*sizeof(float)); cudaMemcpyToSymbol(rcmbH, bH, 2*(Npml+1)*sizeof(float)); cudaMemcpyToSymbol(rcmaH, aH, 2*(Npml+1)*sizeof(float)); free(bE); free(aE); free(bH); free(aH); // -------------------------------------------------------------------------------- // Set the GPU parameters // TPB: Number of threads per block // BPG: Number of thread blocks per grid int Ntot, TPB, BPG; int NK, sBPG, *idx0; // main update printf("select TPB,BPG: main\n"); dim3 Db_main, *Dg_main; Ntot = N.x*N.y*N.z; TPB = selectTPB( Ntot, N.y*N.z ); Db_main = dim3( TPB ); BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; NK = BPG/MBPG + 1; // Number of kernel sBPG = BPG/NK; Dg_main = (dim3 *) malloc ( NK*sizeof(dim3) ); idx0 = (int *) malloc ( NK*sizeof(int) ); for ( i=0; i<NK; i++ ) { idx0[i] = TPB*sBPG*i; Dg_main[i] = dim3(sBPG); } Dg_main[NK-1] = dim3(sBPG+BPG%NK); size_t Ns_main = sizeof(float)*( 2*(TPB+1)+TPB ); printf("\tBPG=%d, sBPG(%d)=%d, Ns_main=%d\n", BPG, NK, sBPG, Ns_main); // source //TPB = N.x; //TPB = N.y; TPB = N.z; BPG = 1; dim3 DBsrc(TPB); dim3 DGsrc(BPG); printf("source: TPB=%d, BPG=%d\n", TPB, BPG); // cpml printf("select TPB,BPG: pml x\n"); dim3 Db_pmlx, Dg_pmlx; Ntot = Npml*N.y*N.z; TPB = selectTPB( Ntot, N.y*N.z ); BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; Db_pmlx = dim3( TPB ); Dg_pmlx = dim3( BPG ); int Ntotpmlx = TPB*BPG; printf("\tBPG=%d\n", BPG); printf("select TPB,BPG: pml y\n"); dim3 Db_pmly, Dg_pmly; Ntot = N.x*Npml*N.z; TPB = selectTPB( Ntot, Npml*N.z ); BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; Db_pmly = dim3( TPB ); Dg_pmly = dim3( BPG ); int Ntotpmly = TPB*BPG; printf("\tBPG=%d\n", BPG); printf("select TPB,BPG: pml z\n"); dim3 Db_pmlz, Dg_pmlz; Ntot = N.x*N.y*(Npml+1); //TPB = selectTPB( Ntot, N.y*Npml ); //TPB = 506; //(Npml+1)*46 TPB = 512; //(Npml+1)*32 BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; Db_pmlz = dim3( TPB ); Dg_pmlz = dim3( BPG ); int Ntotpmlz = TPB*BPG; size_t Ns_pmlz = sizeof(float)*( 2*(TPB+1) ); printf("\tBPG=%d, Ns_pmlz=%d\n", BPG, Ns_pmlz); // -------------------------------------------------------------------------------- // Allocate device memory P1F3 devE, devH; P1F3 devCE; int size_devF = (N.x+2)*N.y*N.z*sizeof(float); int size_devC = (N.x+1)*N.y*N.z*sizeof(float); cudaMalloc ( (void**) &devE.x, size_devF ); cudaMalloc ( (void**) &devE.y, size_devF ); cudaMalloc ( (void**) &devE.z, size_devF ); cudaMalloc ( (void**) &devH.x, size_devF ); cudaMalloc ( (void**) &devH.y, size_devF ); cudaMalloc ( (void**) &devH.z, size_devF ); cudaMalloc ( (void**) &devCE.x, size_devC ); cudaMalloc ( (void**) &devCE.y, size_devC ); cudaMalloc ( (void**) &devCE.z, size_devC ); // -------------------------------------------------------------------------------- // Allocate device memory for CPML P1F6 psixE, psiyE, psizE; P1F6 psixH, psiyH, psizH; int size_psix = Ntotpmlx*sizeof(float); int size_psiy = Ntotpmly*sizeof(float); int size_psiz = Ntotpmlz*sizeof(float); cudaMalloc ( (void**) &psixE.y.f, size_psix ); cudaMalloc ( (void**) &psixE.y.b, size_psix ); cudaMalloc ( (void**) &psixE.z.f, size_psix ); cudaMalloc ( (void**) &psixE.z.b, size_psix ); cudaMalloc ( (void**) &psixH.y.f, size_psix ); cudaMalloc ( (void**) &psixH.y.b, size_psix ); cudaMalloc ( (void**) &psixH.z.f, size_psix ); cudaMalloc ( (void**) &psixH.z.b, size_psix ); cudaMalloc ( (void**) &psiyE.z.f, size_psiy ); cudaMalloc ( (void**) &psiyE.z.b, size_psiy ); cudaMalloc ( (void**) &psiyE.x.f, size_psiy ); cudaMalloc ( (void**) &psiyE.x.b, size_psiy ); cudaMalloc ( (void**) &psiyH.z.f, size_psiy ); cudaMalloc ( (void**) &psiyH.z.b, size_psiy ); cudaMalloc ( (void**) &psiyH.x.f, size_psiy ); cudaMalloc ( (void**) &psiyH.x.b, size_psiy ); cudaMalloc ( (void**) &psizE.x.f, size_psiz ); cudaMalloc ( (void**) &psizE.x.b, size_psiz ); cudaMalloc ( (void**) &psizE.y.f, size_psiz ); cudaMalloc ( (void**) &psizE.y.b, size_psiz ); cudaMalloc ( (void**) &psizH.x.f, size_psiz ); cudaMalloc ( (void**) &psizH.x.b, size_psiz ); cudaMalloc ( (void**) &psizH.y.f, size_psiz ); cudaMalloc ( (void**) &psizH.y.b, size_psiz ); // -------------------------------------------------------------------------------- // Initialize the device arrays initMainArrays ( (N.x+2)*N.y*N.z, devE ); initMainArrays ( (N.x+2)*N.y*N.z, devH ); //initMainArrays ( (N.x+1)*N.y*N.z, devCE ); initPsiArrays ( Ntotpmlx, Db_pmlx, Dg_pmlx, psixE.y, psixE.z ); initPsiArrays ( Ntotpmly, Db_pmly, Dg_pmly, psiyE.z, psiyE.x ); initPsiArrays ( Ntotpmlz, Db_pmlz, Dg_pmlz, psizE.x, psizE.y ); initPsiArrays ( Ntotpmlx, Db_pmlx, Dg_pmlx, psixH.y, psixH.z ); initPsiArrays ( Ntotpmly, Db_pmly, Dg_pmly, psiyH.z, psiyH.x ); initPsiArrays ( Ntotpmlz, Db_pmlz, Dg_pmlz, psizH.x, psizH.y ); // -------------------------------------------------------------------------------- // Copy arrays from host to device cudaMemcpy ( devCE.x, CE.x[0][0], size_devC, cudaMemcpyHostToDevice ); cudaMemcpy ( devCE.y, CE.y[0][0], size_devC, cudaMemcpyHostToDevice ); cudaMemcpy ( devCE.z, CE.z[0][0], size_devC, cudaMemcpyHostToDevice ); free(CE.x); free(CE.y); free(CE.z); // -------------------------------------------------------------------------------- // time loop t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { // E-fields main region update for ( i=0; i<NK; i++) updateE <<<Dg_main[i],Db_main,Ns_main>>> ( N, devE, devH, devCE, idx0[i] ); // E-fields CPML region update updateCPMLxE <<<Dg_pmlx,Db_pmlx>>> ( N, devE, devH, devCE, psixE.y.f, psixE.z.f, 0); updateCPMLxE <<<Dg_pmlx,Db_pmlx>>> ( N, devE, devH, devCE, psixE.y.b, psixE.z.b, 1); updateCPMLyE <<<Dg_pmly,Db_pmly>>> ( N, devE, devH, devCE, psiyE.z.f, psiyE.x.f, 0); updateCPMLyE <<<Dg_pmly,Db_pmly>>> ( N, devE, devH, devCE, psiyE.z.b, psiyE.x.b, 1); updateCPMLzE <<<Dg_pmlz,Db_pmlz,Ns_pmlz>>> ( N, devE, devH, devCE, psizE.x.f, psizE.y.f, 0); updateCPMLzE <<<Dg_pmlz,Db_pmlz,Ns_pmlz>>> ( N, devE, devH, devCE, psizE.x.b, psizE.y.b, 1); // Source update updateSrc <<<DGsrc,DBsrc>>> ( N, devE, tstep ); // H-fields main region update for ( i=0; i<NK; i++) updateH <<<Dg_main[i],Db_main,Ns_main>>> ( N, devE, devH, idx0[i] ); // H-fields CPML region update updateCPMLxH <<<Dg_pmlx,Db_pmlx>>> ( N, devE, devH, psixH.y.f, psixH.z.f, 0); updateCPMLxH <<<Dg_pmlx,Db_pmlx>>> ( N, devE, devH, psixH.y.b, psixH.z.b, 1); updateCPMLyH <<<Dg_pmlx,Db_pmlx>>> ( N, devE, devH, psiyH.z.f, psiyH.x.f, 0); updateCPMLyH <<<Dg_pmlx,Db_pmlx>>> ( N, devE, devH, psiyH.z.b, psiyH.x.b, 1); updateCPMLzH <<<Dg_pmlz,Db_pmlz,Ns_pmlz>>> ( N, devE, devH, psizH.x.f, psizH.y.f, 0); updateCPMLzH <<<Dg_pmlz,Db_pmlz,Ns_pmlz>>> ( N, devE, devH, psizH.x.b, psizH.y.b, 1); /* if ( tstep/100*100 == tstep ) { // Copy arrays from device to host cudaMemcpy( Ex[0][0], devE.x, (N.x+2)*N.y*N.z*sizeof(float), cudaMemcpyDeviceToHost ); //cudaMemcpy( Ez[0][0], devE.z, (N.x+2)*N.y*N.z*sizeof(float), cudaMemcpyDeviceToHost ); //cudaMemcpy( CEx[0][0], devCE.x, (N.x+1)*N.y*N.z*sizeof(float), cudaMemcpyDeviceToHost ); cudaMemcpy( Ey[0][0], devE.y, (N.x+2)*N.y*N.z*sizeof(float), cudaMemcpyDeviceToHost ); cudaMemcpy( Ez[0][0], devE.z, (N.x+2)*N.y*N.z*sizeof(float), cudaMemcpyDeviceToHost ); cudaMemcpy( Hx[0][0], devH.x, (N.x+2)*N.y*N.z*sizeof(float), cudaMemcpyDeviceToHost ); cudaMemcpy( Hy[0][0], devH.y, (N.x+2)*N.y*N.z*sizeof(float), cudaMemcpyDeviceToHost ); cudaMemcpy( Hz[0][0], devH.z, (N.x+2)*N.y*N.z*sizeof(float), cudaMemcpyDeviceToHost ); dumpToH5(N.x+2, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Ex, "gpu_png/Ex-%05d.h5", tstep); exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Ey, "gpu_png/Ey-%05d.h5", tstep); exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ey-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Ez, "gpu_png/Ez-%05d.h5", tstep); exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ez-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Hx, "gpu_png/Hx-%05d.h5", tstep); exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hx-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Hy, "gpu_png/Hy-%05d.h5", tstep); exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hy-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Hz, "gpu_png/Hz-%05d.h5", tstep); exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hz-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, N.y/2, 0, N.x-1, N.y/2, N.z-1, Ex, "gpu_png/Ex-%05d.h5", tstep); exec("h5topng -ZM0.1 -y0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, N.y/2, 0, N.x-1, N.y/2, N.z-1, Ey, "gpu_png/Ey-%05d.h5", tstep); exec("h5topng -ZM0.1 -y0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ey-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, N.y/2, 0, N.x-1, N.y/2, N.z-1, Ez, "gpu_png/Ez-%05d.h5", tstep); exec("h5topng -ZM0.1 -y0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ez-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, N.y/2, 0, N.x-1, N.y/2, N.z-1, Hx, "gpu_png/Hx-%05d.h5", tstep); exec("h5topng -ZM0.1 -y0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hx-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, N.y/2, 0, N.x-1, N.y/2, N.z-1, Hy, "gpu_png/Hy-%05d.h5", tstep); exec("h5topng -ZM0.1 -y0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hy-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, N.y/2, 0, N.x-1, N.y/2, N.z-1, Hz, "gpu_png/Hz-%05d.h5", tstep); exec("h5topng -ZM0.1 -y0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hz-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, 0, N.z/2, N.x+1, N.y-1, N.z/2, Ex, "gpu_png/Ex-%05d.h5", tstep); exec("h5topng -ZM0.1 -z0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, 0, N.z/2, N.x+1, N.y-1, N.z/2, Ey, "gpu_png/Ey-%05d.h5", tstep); exec("h5topng -ZM0.1 -z0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ey-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, 0, N.z/2, N.x+1, N.y-1, N.z/2, Ez, "gpu_png/Ez-%05d.h5", tstep); exec("h5topng -ZM0.1 -z0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ez-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, 0, N.z/2, N.x+1, N.y-1, N.z/2, Hx, "gpu_png/Hx-%05d.h5", tstep); exec("h5topng -ZM0.1 -z0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hx-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, 0, N.z/2, N.x+1, N.y-1, N.z/2, Hy, "gpu_png/Hy-%05d.h5", tstep); exec("h5topng -ZM0.1 -z0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hy-%05d.h5", tstep); dumpToH5(N.x+2, N.y, N.z, 0, 0, N.z/2, N.x+1, N.y-1, N.z/2, Hz, "gpu_png/Hz-%05d.h5", tstep); exec("h5topng -ZM0.1 -z0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Hz-%05d.h5", tstep); //dumpToH5(N.x+1, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, CEx, "gpu_png/CEx-%05d.h5", tstep); //exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/CEx-%05d.h5", tstep); updateTimer(t0, tstep, time_str); printf("tstep=%d\t%s\n", tstep, time_str); } */ } updateTimer(t0, tstep, time_str); printf("tstep=%d\t%s\n", tstep, time_str); /* // time test t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) for ( i=0; i<NK; i++) updateE <<<Dg_main[i],Db_main,Ns_main>>> ( N, devE, devH, devCE, idx0[i] ); updateTimer(t0, tstep, time_str); printf("main E: \t%s\n", time_str); t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) for ( i=0; i<NK; i++) updateH <<<Dg_main[i],Db_main,Ns_main>>> ( N, devE, devH, idx0[i] ); updateTimer(t0, tstep, time_str); printf("main H: \t%s\n", time_str); t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { updateCPMLxE <<<Dg_pmlx,Db_pmlx>>> ( N, devE, devH, devCE, psixE.y.f, psixE.z.f, 0); updateCPMLxE <<<Dg_pmlx,Db_pmlx>>> ( N, devE, devH, devCE, psixE.y.b, psixE.z.b, 1); } updateTimer(t0, tstep, time_str); printf("cpmlx E:\t%s\n", time_str); t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { updateCPMLxH <<<Dg_pmlx,Db_pmlx>>> ( N, devE, devH, psixH.y.f, psixH.z.f, 0); updateCPMLxH <<<Dg_pmlx,Db_pmlx>>> ( N, devE, devH, psixH.y.b, psixH.z.b, 1); } updateTimer(t0, tstep, time_str); printf("cpmlx H:\t%s\n", time_str); t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { updateCPMLyE <<<Dg_pmly,Db_pmly>>> ( N, devE, devH, devCE, psiyE.z.f, psiyE.x.f, 0); updateCPMLyE <<<Dg_pmly,Db_pmly>>> ( N, devE, devH, devCE, psiyE.z.b, psiyE.x.b, 1); } updateTimer(t0, tstep, time_str); printf("cpmly E:\t%s\n", time_str); t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { updateCPMLyH <<<Dg_pmlx,Db_pmlx>>> ( N, devE, devH, psiyH.z.f, psiyH.x.f, 0); updateCPMLyH <<<Dg_pmlx,Db_pmlx>>> ( N, devE, devH, psiyH.z.b, psiyH.x.b, 1); } updateTimer(t0, tstep, time_str); printf("cpmly H:\t%s\n", time_str); t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { updateCPMLzE <<<Dg_pmlz,Db_pmlz,Ns_pmlz>>> ( N, devE, devH, devCE, psizE.x.f, psizE.y.f, 0); updateCPMLzE <<<Dg_pmlz,Db_pmlz,Ns_pmlz>>> ( N, devE, devH, devCE, psizE.x.b, psizE.y.b, 1); } updateTimer(t0, tstep, time_str); printf("cpmlz E:\t%s\n", time_str); t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { updateCPMLzH <<<Dg_pmlz,Db_pmlz,Ns_pmlz>>> ( N, devE, devH, psizH.x.f, psizH.y.f, 0); updateCPMLzH <<<Dg_pmlz,Db_pmlz,Ns_pmlz>>> ( N, devE, devH, psizH.x.b, psizH.y.b, 1); } updateTimer(t0, tstep, time_str); printf("cpmlz H:\t%s\n", time_str); */ freeMainArrays ( devE ); freeMainArrays ( devH ); freeMainArrays ( devCE ); freePsiArrays ( psixE, psiyE, psizE ); freePsiArrays ( psixH, psiyH, psizH ); }
0ea87de6570826593bd2cfaf64e71a33b3013707.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> using namespace std; typedef unsigned long long uint64; typedef unsigned long uint16; typedef uint64 rule; typedef uint16 cellData; //(16| 16| 16|8 | 8 |) //(A |-> B| C|<lblName|lblWeight>|) __device__ int getRuleA(rule r){return r >> 48;} __device__ int getRuleB(rule r){return r >> 32 & 0xFFFF;} __device__ int getRuleC(rule r){return r >> 16 & 0xFFFF;} __device__ int getRuleN(rule r){return r >> 8 & 0xFF;} __device__ int getRuleW(rule r){return r & 0xFF;} __host__ rule buildRule(int A, int B, int C, int lblName, int lblWeght){ return (uint64)A << 48 | (uint64)B << 32 | (uint64)C << 16 | lblName << 8 | lblWeght; } //(16 |16 |16 |8 |8 ) //(k |non-terminalIndex |lblState |lblName |lblWeght ) __device__ int getDataI(cellData d){return d;} __host__ cellData buildData_Host(int ruleIndex){return ruleIndex;} __device__ cellData buildData(int k, int ruleIndex, int lblState, int lblName, int lblWeght){ return ruleIndex; } __global__ void processRule(rule* rules, int rulesCount, int nCount, int strLen, int subLen, cellData* table){ // subLen === l // start === i int start = blockIdx.y * blockDim.y + threadIdx.y; if(start >= strLen - subLen) return; rule currentRule = rules[threadIdx.x]; for(int k = 0; k < subLen; k++){ cellData *current = table + ( subLen * strLen + start ) * (nCount + 1); cellData *left = table + ( k * strLen + start ) * (nCount + 1); cellData *right = table + ( (subLen - k - 1) * strLen + (k + start + 1) ) * (nCount + 1); int c = getRuleC(currentRule); if(current[ getRuleA(currentRule) ]) return; for(int m = 1; m <= nCount; m++){ if ( getDataI( left[m] ) == getRuleB(currentRule) ){ for(int n = 1; n <= nCount; n++){ if ( getDataI( right[n] ) == c ){ current[ getRuleA(currentRule) ] = getRuleA(currentRule); } } } } } __syncthreads(); } __host__ void fillTable(rule* rules, int rulesCount, int nCount, int strLen, cellData* table){ hipEvent_t start, stop; float gpuTime = 0.0f; int deviceCount; hipDeviceProp_t cdp; hipGetDeviceProperties ( &cdp, 0 ); hipEventCreate ( &start ); hipEventCreate ( &stop ); hipEventRecord ( start, 0 ); cellData *dev_table = 0; rule *dev_rules = 0; int table_size = strLen * strLen * (nCount+1); hipError_t cudaStatus; cudaStatus = hipSetDevice(0); cudaStatus = hipMalloc((void**)&dev_table, table_size * sizeof(cellData)); cudaStatus = hipMalloc((void**)&dev_rules, rulesCount * sizeof(rule)); cudaStatus = hipMemcpy(dev_table, table, table_size * sizeof(cellData), hipMemcpyHostToDevice); cudaStatus = hipMemcpy(dev_rules, rules, rulesCount * sizeof(rule), hipMemcpyHostToDevice); int threadsPerBlockX = cdp.maxThreadsPerBlock / rulesCount; for(int subLen = 1; subLen <= strLen; subLen++){ hipLaunchKernelGGL(( processRule), dim3(dim3( 1,(strLen-subLen)/(threadsPerBlockX)+1 )), dim3(dim3(rulesCount, threadsPerBlockX )) , 0, 0, dev_rules, rulesCount, nCount, strLen, subLen, dev_table); } cudaStatus = hipDeviceSynchronize(); cudaStatus = hipMemcpy(table, dev_table, table_size * sizeof(cellData), hipMemcpyDeviceToHost); hipEventRecord (stop, 0); hipEventSynchronize ( stop ); hipEventElapsedTime ( &gpuTime, start, stop ); cout<<"time "<< gpuTime<<endl; hipFree(dev_table); hipFree(dev_rules); } int main(){ int rulesCount = 7; int wordLen = 1000; int nCount = 4; rule *rules = new rule[rulesCount]; rules[0] = buildRule(1,2,3,0,0); rules[1] = buildRule(2,3,2,0,0); rules[2] = buildRule(2,3,3,0,0); rules[3] = buildRule(3,1,2,0,0); rules[4] = buildRule(4,2,4,0,0); rules[5] = buildRule(1,4,2,0,0); rules[6] = buildRule(3,4,2,0,0); cellData* table = new cellData[wordLen * wordLen * (nCount + 1)]; for(int i = 0; i < wordLen * wordLen * (nCount + 1); i++) table[i] = 0; for(int i=0; i<wordLen; i++){ table[i*(nCount+1) + 0] = 1; table[i*(nCount+1) + 3] = buildData_Host(3); } table[(wordLen-2)*(nCount+1) + 0] = 1; table[(wordLen-2)*(nCount+1) + 2] = buildData_Host(2); table[(wordLen-2)*(nCount+1) + 3] = buildData_Host(0); table[(wordLen-5)*(nCount+1) + 0] = 1; table[(wordLen-5)*(nCount+1) + 4] = buildData_Host(4); table[(wordLen-5)*(nCount+1) + 3] = buildData_Host(0); fillTable(rules, rulesCount, nCount, wordLen, table); //for(int i = 0; i < wordLen; i++){ // for(int j = 0; j < wordLen*(nCount+1); j++){ // if( !( j % (nCount+1) ) && j ) cout<<' '; // if( !( j % (nCount+1) ) ) // cout<<"";//(table[i*wordLen*(nCount+1)+j]); // else // cout<<(table[i*wordLen*(nCount+1)+j]); // //if( !( j % (nCount+1) ) ) cout<<' '; // } // cout<<endl; //} for(int i = wordLen-1; i < wordLen; i++){ for(int j = 0; j < 1*(nCount+1); j++){ if( !( j % (nCount+1) ) && j ) cout<<' '; if( !( j % (nCount+1) ) ) cout<<(table[i*wordLen*(nCount+1)+j]); else cout<<(table[i*wordLen*(nCount+1)+j] >> 32 & 0xFFFF); if( !( j % (nCount+1) ) ) cout<<' '; } cout<<endl; } system("pause"); return 0; }
0ea87de6570826593bd2cfaf64e71a33b3013707.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> using namespace std; typedef unsigned long long uint64; typedef unsigned long uint16; typedef uint64 rule; typedef uint16 cellData; //(16| 16| 16|8 | 8 |) //(A |-> B| C|<lblName|lblWeight>|) __device__ int getRuleA(rule r){return r >> 48;} __device__ int getRuleB(rule r){return r >> 32 & 0xFFFF;} __device__ int getRuleC(rule r){return r >> 16 & 0xFFFF;} __device__ int getRuleN(rule r){return r >> 8 & 0xFF;} __device__ int getRuleW(rule r){return r & 0xFF;} __host__ rule buildRule(int A, int B, int C, int lblName, int lblWeght){ return (uint64)A << 48 | (uint64)B << 32 | (uint64)C << 16 | lblName << 8 | lblWeght; } //(16 |16 |16 |8 |8 ) //(k |non-terminalIndex |lblState |lblName |lblWeght ) __device__ int getDataI(cellData d){return d;} __host__ cellData buildData_Host(int ruleIndex){return ruleIndex;} __device__ cellData buildData(int k, int ruleIndex, int lblState, int lblName, int lblWeght){ return ruleIndex; } __global__ void processRule(rule* rules, int rulesCount, int nCount, int strLen, int subLen, cellData* table){ // subLen === l // start === i int start = blockIdx.y * blockDim.y + threadIdx.y; if(start >= strLen - subLen) return; rule currentRule = rules[threadIdx.x]; for(int k = 0; k < subLen; k++){ cellData *current = table + ( subLen * strLen + start ) * (nCount + 1); cellData *left = table + ( k * strLen + start ) * (nCount + 1); cellData *right = table + ( (subLen - k - 1) * strLen + (k + start + 1) ) * (nCount + 1); int c = getRuleC(currentRule); if(current[ getRuleA(currentRule) ]) return; for(int m = 1; m <= nCount; m++){ if ( getDataI( left[m] ) == getRuleB(currentRule) ){ for(int n = 1; n <= nCount; n++){ if ( getDataI( right[n] ) == c ){ current[ getRuleA(currentRule) ] = getRuleA(currentRule); } } } } } __syncthreads(); } __host__ void fillTable(rule* rules, int rulesCount, int nCount, int strLen, cellData* table){ cudaEvent_t start, stop; float gpuTime = 0.0f; int deviceCount; cudaDeviceProp cdp; cudaGetDeviceProperties ( &cdp, 0 ); cudaEventCreate ( &start ); cudaEventCreate ( &stop ); cudaEventRecord ( start, 0 ); cellData *dev_table = 0; rule *dev_rules = 0; int table_size = strLen * strLen * (nCount+1); cudaError_t cudaStatus; cudaStatus = cudaSetDevice(0); cudaStatus = cudaMalloc((void**)&dev_table, table_size * sizeof(cellData)); cudaStatus = cudaMalloc((void**)&dev_rules, rulesCount * sizeof(rule)); cudaStatus = cudaMemcpy(dev_table, table, table_size * sizeof(cellData), cudaMemcpyHostToDevice); cudaStatus = cudaMemcpy(dev_rules, rules, rulesCount * sizeof(rule), cudaMemcpyHostToDevice); int threadsPerBlockX = cdp.maxThreadsPerBlock / rulesCount; for(int subLen = 1; subLen <= strLen; subLen++){ processRule<<< dim3( 1,(strLen-subLen)/(threadsPerBlockX)+1 ), dim3(rulesCount, threadsPerBlockX ) >>>(dev_rules, rulesCount, nCount, strLen, subLen, dev_table); } cudaStatus = cudaDeviceSynchronize(); cudaStatus = cudaMemcpy(table, dev_table, table_size * sizeof(cellData), cudaMemcpyDeviceToHost); cudaEventRecord (stop, 0); cudaEventSynchronize ( stop ); cudaEventElapsedTime ( &gpuTime, start, stop ); cout<<"time "<< gpuTime<<endl; cudaFree(dev_table); cudaFree(dev_rules); } int main(){ int rulesCount = 7; int wordLen = 1000; int nCount = 4; rule *rules = new rule[rulesCount]; rules[0] = buildRule(1,2,3,0,0); rules[1] = buildRule(2,3,2,0,0); rules[2] = buildRule(2,3,3,0,0); rules[3] = buildRule(3,1,2,0,0); rules[4] = buildRule(4,2,4,0,0); rules[5] = buildRule(1,4,2,0,0); rules[6] = buildRule(3,4,2,0,0); cellData* table = new cellData[wordLen * wordLen * (nCount + 1)]; for(int i = 0; i < wordLen * wordLen * (nCount + 1); i++) table[i] = 0; for(int i=0; i<wordLen; i++){ table[i*(nCount+1) + 0] = 1; table[i*(nCount+1) + 3] = buildData_Host(3); } table[(wordLen-2)*(nCount+1) + 0] = 1; table[(wordLen-2)*(nCount+1) + 2] = buildData_Host(2); table[(wordLen-2)*(nCount+1) + 3] = buildData_Host(0); table[(wordLen-5)*(nCount+1) + 0] = 1; table[(wordLen-5)*(nCount+1) + 4] = buildData_Host(4); table[(wordLen-5)*(nCount+1) + 3] = buildData_Host(0); fillTable(rules, rulesCount, nCount, wordLen, table); //for(int i = 0; i < wordLen; i++){ // for(int j = 0; j < wordLen*(nCount+1); j++){ // if( !( j % (nCount+1) ) && j ) cout<<' '; // if( !( j % (nCount+1) ) ) // cout<<"";//(table[i*wordLen*(nCount+1)+j]); // else // cout<<(table[i*wordLen*(nCount+1)+j]); // //if( !( j % (nCount+1) ) ) cout<<' '; // } // cout<<endl; //} for(int i = wordLen-1; i < wordLen; i++){ for(int j = 0; j < 1*(nCount+1); j++){ if( !( j % (nCount+1) ) && j ) cout<<' '; if( !( j % (nCount+1) ) ) cout<<(table[i*wordLen*(nCount+1)+j]); else cout<<(table[i*wordLen*(nCount+1)+j] >> 32 & 0xFFFF); if( !( j % (nCount+1) ) ) cout<<' '; } cout<<endl; } system("pause"); return 0; }
3fd3ddc11fba52958ee1567e20d60a1bb5d9577f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) Chris Choy ([email protected]). * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural * Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part * of the code. */ #ifndef GPU_POOLING_MAX_KERNEL #define GPU_POOLING_MAX_KERNEL #include <limits> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/host_vector.h> #include <thrust/functional.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include "gpu.cuh" #include "pooling_max.cuh" #include "utils.hpp" template <typename Dtype, typename Itype> __global__ void set_gradient(const int n, const Dtype *d_grad_out, Dtype *d_grad_in, const Itype *in_index) { CUDA_KERNEL_LOOP(index, n) { d_grad_in[in_index[index]] = d_grad_out[index]; } } template <typename Dtype, typename Itype> __global__ void max_pool(const int N, const int out_nrows, const int nchannel, const int nnz, const Dtype *d_in_feat, Dtype *d_out_feat, Itype *d_max_index, const Itype *d_in_map, const Itype *d_out_map, const Itype *d_in_index_min) { // N == nnz * nchannel CUDA_KERNEL_LOOP(index, N) { int nrow = index / nchannel; int ch = index % nchannel; Itype out_map_row = d_out_map[nrow]; Itype in_index = d_in_index_min[nrow]; Itype num_in_feat; if (nrow == out_nrows - 1) num_in_feat = nnz - in_index; else num_in_feat = d_in_index_min[nrow + 1] - in_index; // It is guaranteed to have at least one input per output Itype curr_index, max_index = d_in_map[in_index] * nchannel + ch; Dtype curr_val, max_val = d_in_feat[max_index]; for (int curr_iter = 0; curr_iter < num_in_feat; curr_iter++) { curr_index = d_in_map[in_index + curr_iter] * nchannel + ch; curr_val = d_in_feat[curr_index]; if (max_val < curr_val) { max_val = curr_val; max_index = curr_index; } } Itype out_ind = out_map_row * nchannel + ch; d_out_feat[out_ind] = max_val; d_max_index[out_ind] = max_index; } } // Put features in to the out features according to the input index. // The input index is sorted according to the out index so no need to take out // index template <typename Dtype, typename Itype> __global__ void copy_sorted(const int n, const int nrows, const int nchannel, const Dtype *in_feat, const Itype *in_index, Dtype *out_feat) { int nrow, ch; CUDA_KERNEL_LOOP(index, n) { nrow = index / nchannel; ch = index % nchannel; out_feat[index] = in_feat[in_index[nrow] * nchannel + ch]; } } template <typename Dtype, typename Itype> void MaxPoolingForwardKernelGPU(const Dtype *d_in_feat, Dtype *d_out_feat, int out_nrows, Itype *d_max_index, int nchannel, const std::vector<std::vector<Itype>> &in_maps, const std::vector<std::vector<Itype>> &out_maps, Itype *d_scr, hipStream_t stream) { int nnz = 0; // Copy all maps to one vector for (const auto &map : in_maps) nnz += map.size(); Itype *d_in_map, *d_out_map; // CUDA_CHECK(hipMalloc((void **)&d_in_map, 2 * nnz * sizeof(Itype))); d_in_map = d_scr; d_out_map = d_in_map + nnz; Itype *d_in_map_iter = d_in_map, *d_out_map_iter = d_out_map; for (int k = 0; k < in_maps.size(); k++) { int curr_n = in_maps[k].size(); if (curr_n > 0) { CUDA_CHECK(hipMemcpy(d_in_map_iter, in_maps[k].data(), sizeof(Itype) * curr_n, hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(d_out_map_iter, out_maps[k].data(), sizeof(Itype) * curr_n, hipMemcpyHostToDevice)); d_in_map_iter += curr_n; d_out_map_iter += curr_n; } } // First, sort d_out_map and d_in_map with the d_out_map so that in_feat are // placed adjacent according to out_map thrust::sort_by_key(thrust::device, d_out_map, d_out_map + nnz, d_in_map); // Second, create number of in_feat per out, and starting index Itype *d_index, *d_in_map_min, *d_reduced_out_map; // CUDA_CHECK(hipMalloc((void **)&d_index, 3 * nnz * sizeof(Itype))); d_index = d_scr + 2 * nnz; d_in_map_min = d_index + nnz; d_reduced_out_map = d_index + 2 * nnz; thrust::sequence(thrust::device, d_index, d_index + nnz); thrust::equal_to<Itype> equal_pred; thrust::minimum<Itype> min_op; auto reduction_pair = thrust::reduce_by_key(thrust::device, // execution policy d_out_map, // key begin d_out_map + nnz, // key end d_index, // val begin d_reduced_out_map, // key out begin d_in_map_min, // val out begin equal_pred, // binary pred min_op); // binary op size_t num_unique_out_map = reduction_pair.first - d_reduced_out_map; if (num_unique_out_map != out_nrows) throw std::invalid_argument( Formatter() << "Reduction size mismatch. out_nrows: " << out_nrows << ", num_unique_out_map: " << num_unique_out_map); // Finally, use the max kernel to map all in_feats with the same out key to // out_feats Also, create out max_index for gradient hipLaunchKernelGGL(( max_pool<Dtype, Itype>) , dim3(GET_BLOCKS(out_nrows * nchannel)), dim3(CUDA_NUM_THREADS), 0, stream, nchannel * out_nrows, // N out_nrows, nchannel, nnz, d_in_feat, d_out_feat, d_max_index, // Out indices for backward d_in_map, // in index d_reduced_out_map, d_in_map_min); // hipFree(d_in_map); // hipFree(d_index); CUDA_CHECK(hipGetLastError()); } template void MaxPoolingForwardKernelGPU<float, int32_t>( const float *d_in_feat, float *d_out_feat, int out_nrows, int32_t *d_max_index, int nchannel, const std::vector<std::vector<int32_t>> &in_map, const std::vector<std::vector<int32_t>> &out_map, int32_t *d_scr, hipStream_t stream); template void MaxPoolingForwardKernelGPU<double, int32_t>( const double *d_in_feat, double *d_out_feat, int out_nrows, int32_t *d_max_index, int nchannel, const std::vector<std::vector<int32_t>> &in_map, const std::vector<std::vector<int32_t>> &out_map, int32_t *d_scr, hipStream_t stream); template <typename Dtype, typename Itype> void MaxPoolingBackwardKernelGPU(Dtype *d_grad_in_feat, int in_nrows, const Dtype *d_grad_out_feat, int out_nrows, const Itype *d_max_index, int nchannel, hipStream_t stream) { int num_kernels = out_nrows * nchannel; // Assume that gradients for input feature are all set to zero hipLaunchKernelGGL(( set_gradient<Dtype>), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, d_grad_out_feat, d_grad_in_feat, d_max_index); CUDA_CHECK(hipGetLastError()); } template void MaxPoolingBackwardKernelGPU<float, int32_t>( float *d_grad_in_feat, int in_nrows, const float *d_grad_out_feat, int out_nrows, const int32_t *d_max_index, int nchannel, hipStream_t stream); template void MaxPoolingBackwardKernelGPU<double, int32_t>( double *d_grad_in_feat, int in_nrows, const double *d_grad_out_feat, int out_nrows, const int32_t *d_max_index, int nchannel, hipStream_t stream); #endif
3fd3ddc11fba52958ee1567e20d60a1bb5d9577f.cu
/* Copyright (c) Chris Choy ([email protected]). * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural * Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part * of the code. */ #ifndef GPU_POOLING_MAX_KERNEL #define GPU_POOLING_MAX_KERNEL #include <limits> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/host_vector.h> #include <thrust/functional.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include "gpu.cuh" #include "pooling_max.cuh" #include "utils.hpp" template <typename Dtype, typename Itype> __global__ void set_gradient(const int n, const Dtype *d_grad_out, Dtype *d_grad_in, const Itype *in_index) { CUDA_KERNEL_LOOP(index, n) { d_grad_in[in_index[index]] = d_grad_out[index]; } } template <typename Dtype, typename Itype> __global__ void max_pool(const int N, const int out_nrows, const int nchannel, const int nnz, const Dtype *d_in_feat, Dtype *d_out_feat, Itype *d_max_index, const Itype *d_in_map, const Itype *d_out_map, const Itype *d_in_index_min) { // N == nnz * nchannel CUDA_KERNEL_LOOP(index, N) { int nrow = index / nchannel; int ch = index % nchannel; Itype out_map_row = d_out_map[nrow]; Itype in_index = d_in_index_min[nrow]; Itype num_in_feat; if (nrow == out_nrows - 1) num_in_feat = nnz - in_index; else num_in_feat = d_in_index_min[nrow + 1] - in_index; // It is guaranteed to have at least one input per output Itype curr_index, max_index = d_in_map[in_index] * nchannel + ch; Dtype curr_val, max_val = d_in_feat[max_index]; for (int curr_iter = 0; curr_iter < num_in_feat; curr_iter++) { curr_index = d_in_map[in_index + curr_iter] * nchannel + ch; curr_val = d_in_feat[curr_index]; if (max_val < curr_val) { max_val = curr_val; max_index = curr_index; } } Itype out_ind = out_map_row * nchannel + ch; d_out_feat[out_ind] = max_val; d_max_index[out_ind] = max_index; } } // Put features in to the out features according to the input index. // The input index is sorted according to the out index so no need to take out // index template <typename Dtype, typename Itype> __global__ void copy_sorted(const int n, const int nrows, const int nchannel, const Dtype *in_feat, const Itype *in_index, Dtype *out_feat) { int nrow, ch; CUDA_KERNEL_LOOP(index, n) { nrow = index / nchannel; ch = index % nchannel; out_feat[index] = in_feat[in_index[nrow] * nchannel + ch]; } } template <typename Dtype, typename Itype> void MaxPoolingForwardKernelGPU(const Dtype *d_in_feat, Dtype *d_out_feat, int out_nrows, Itype *d_max_index, int nchannel, const std::vector<std::vector<Itype>> &in_maps, const std::vector<std::vector<Itype>> &out_maps, Itype *d_scr, cudaStream_t stream) { int nnz = 0; // Copy all maps to one vector for (const auto &map : in_maps) nnz += map.size(); Itype *d_in_map, *d_out_map; // CUDA_CHECK(cudaMalloc((void **)&d_in_map, 2 * nnz * sizeof(Itype))); d_in_map = d_scr; d_out_map = d_in_map + nnz; Itype *d_in_map_iter = d_in_map, *d_out_map_iter = d_out_map; for (int k = 0; k < in_maps.size(); k++) { int curr_n = in_maps[k].size(); if (curr_n > 0) { CUDA_CHECK(cudaMemcpy(d_in_map_iter, in_maps[k].data(), sizeof(Itype) * curr_n, cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(d_out_map_iter, out_maps[k].data(), sizeof(Itype) * curr_n, cudaMemcpyHostToDevice)); d_in_map_iter += curr_n; d_out_map_iter += curr_n; } } // First, sort d_out_map and d_in_map with the d_out_map so that in_feat are // placed adjacent according to out_map thrust::sort_by_key(thrust::device, d_out_map, d_out_map + nnz, d_in_map); // Second, create number of in_feat per out, and starting index Itype *d_index, *d_in_map_min, *d_reduced_out_map; // CUDA_CHECK(cudaMalloc((void **)&d_index, 3 * nnz * sizeof(Itype))); d_index = d_scr + 2 * nnz; d_in_map_min = d_index + nnz; d_reduced_out_map = d_index + 2 * nnz; thrust::sequence(thrust::device, d_index, d_index + nnz); thrust::equal_to<Itype> equal_pred; thrust::minimum<Itype> min_op; auto reduction_pair = thrust::reduce_by_key(thrust::device, // execution policy d_out_map, // key begin d_out_map + nnz, // key end d_index, // val begin d_reduced_out_map, // key out begin d_in_map_min, // val out begin equal_pred, // binary pred min_op); // binary op size_t num_unique_out_map = reduction_pair.first - d_reduced_out_map; if (num_unique_out_map != out_nrows) throw std::invalid_argument( Formatter() << "Reduction size mismatch. out_nrows: " << out_nrows << ", num_unique_out_map: " << num_unique_out_map); // Finally, use the max kernel to map all in_feats with the same out key to // out_feats Also, create out max_index for gradient max_pool<Dtype, Itype> <<<GET_BLOCKS(out_nrows * nchannel), CUDA_NUM_THREADS, 0, stream>>>( nchannel * out_nrows, // N out_nrows, nchannel, nnz, d_in_feat, d_out_feat, d_max_index, // Out indices for backward d_in_map, // in index d_reduced_out_map, d_in_map_min); // cudaFree(d_in_map); // cudaFree(d_index); CUDA_CHECK(cudaGetLastError()); } template void MaxPoolingForwardKernelGPU<float, int32_t>( const float *d_in_feat, float *d_out_feat, int out_nrows, int32_t *d_max_index, int nchannel, const std::vector<std::vector<int32_t>> &in_map, const std::vector<std::vector<int32_t>> &out_map, int32_t *d_scr, cudaStream_t stream); template void MaxPoolingForwardKernelGPU<double, int32_t>( const double *d_in_feat, double *d_out_feat, int out_nrows, int32_t *d_max_index, int nchannel, const std::vector<std::vector<int32_t>> &in_map, const std::vector<std::vector<int32_t>> &out_map, int32_t *d_scr, cudaStream_t stream); template <typename Dtype, typename Itype> void MaxPoolingBackwardKernelGPU(Dtype *d_grad_in_feat, int in_nrows, const Dtype *d_grad_out_feat, int out_nrows, const Itype *d_max_index, int nchannel, cudaStream_t stream) { int num_kernels = out_nrows * nchannel; // Assume that gradients for input feature are all set to zero set_gradient<Dtype><<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, d_grad_out_feat, d_grad_in_feat, d_max_index); CUDA_CHECK(cudaGetLastError()); } template void MaxPoolingBackwardKernelGPU<float, int32_t>( float *d_grad_in_feat, int in_nrows, const float *d_grad_out_feat, int out_nrows, const int32_t *d_max_index, int nchannel, cudaStream_t stream); template void MaxPoolingBackwardKernelGPU<double, int32_t>( double *d_grad_in_feat, int in_nrows, const double *d_grad_out_feat, int out_nrows, const int32_t *d_max_index, int nchannel, cudaStream_t stream); #endif
eb9e1e742e08bc79bd1daff6e4db4114cb8d5a69.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda.h> #include<stdlib.h> #include<stdio.h> #include<iostream> #include <iomanip> // std::setprecision using namespace std; union FP32{ float f; unsigned int i; }; __global__ void test_cvt_f32_to_f16(float* A, short int* C, int N){ for(int i=0;i<N;i++){ //asm("cvt.rn.satfinite.e4m3x2.f32 %0, %1, %2;\n\t":"=h"(C[i]):"f"(A[i]),"f"(B[i])); asm("cvt.rn.relu.satfinite.f16.f32 %0, %1;\n\t":"=h"(C[i]):"f"(A[i])); } } __global__ void test_cvt_f32_to_tf32(float* A, unsigned int* C, int N){ for(int i=0;i<N;i++){ //asm("cvt.rn.satfinite.e4m3x2.f32 %0, %1, %2;\n\t":"=h"(C[i]):"f"(A[i]),"f"(B[i])); asm("cvt.rna.satfinite.tf32.f32 %0, %1;\n\t":"=r"(C[i]):"f"(A[i])); } } void Initfloat(float * a, const int n) { float value; for ( int i = 0; i < n; i++ ) { value = (float)(rand() % 20 - 10) + (float)(rand() % 20 - 10) / 10.0 + (float)(rand() % 20 - 10) / 100.0 + (float)(rand() % 20 - 10) / 1000.0 + (float)(rand() % 20 - 10) / 10000.0 + (float)(rand() % 20 - 10) / 100000.0 + (float)(rand() % 20 - 10) / 1000000.0 + (float)(rand() % 20 - 10) / 10000000.0 + (float)(rand() % 20 - 10) / 100000000.0; a[i] = value; } } void InitZero(float * a, const int n) { for ( int i = 0; i < n; i++ ) { a[i] = 0.0; } } void show(float * a, const int n) { std::cout << std::endl; for ( int i=0; i<n; i++){ std::cout<< std::setprecision(20) << a[i] << std::endl; } std::cout << std::endl; } int main(int argc, char** argv){ int size = 10; float *dataA = (float*)malloc(sizeof(float) * size); unsigned int *dataC = (unsigned int*)malloc(sizeof(unsigned int) * size); float *d_dataA = NULL; unsigned int *d_dataC = NULL; hipMalloc((void**)&d_dataA, sizeof(float) * size); hipMalloc((void**)&d_dataC, sizeof(unsigned int) * size); FP32 fp32; Initfloat(dataA, size); /* Nan */ fp32.i = 0x7fffffff; dataA[size-1] = fp32.f; fp32.i = 0xffffffff; dataA[size-2] = fp32.f; /* inf */ fp32.i = 0x7f800000; dataA[size-3] = fp32.f; fp32.i = 0xff800000; dataA[size-4] = fp32.f; /* 0 */ fp32.i = 0x00000000; dataA[size-5] = fp32.f; fp32.i = 0x80000000; dataA[size-6] = fp32.f; /* overflow */ fp32.i = 0x7f7ffeba; dataA[size-7] = fp32.f; fp32.i = 0xff7ffeba; dataA[size-8] = fp32.f; /* random normal */ fp32.i = 0x43acad91; dataA[size-9] = fp32.f; fp32.i = 0xc3acad91; dataA[size-10] = fp32.f; // /* underflow */ // fp32.i = 0x36801000; dataA[count] = fp32.f; // show(dataA, size); for(int i=0;i<size;i++){ dataC[i] = 0; } hipMemcpy(d_dataA,dataA,sizeof(float) * size,hipMemcpyHostToDevice); hipMemcpy(d_dataC,dataC,sizeof(unsigned int) * size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( test_cvt_f32_to_tf32), dim3(1), dim3(1), 0, 0, d_dataA, d_dataC, size); hipMemcpy(dataC,d_dataC,sizeof(unsigned int) * size, hipMemcpyDeviceToHost); std::cout << std::endl; for(int i=0;i<size;i++){ FP32 fp32_i; FP32 tf32_o; fp32_i.f = dataA[i]; tf32_o.i = dataC[i]; cout<< "f32 / tf32 hex format:,"<<hex<<fp32_i.i<<"," <<tf32_o.i <<endl; cout<< "f32 / tf32 dec format:,"<< std::setprecision(20) <<fp32_i.f<<"," <<tf32_o.f<<endl; std::cout << std::endl; } hipFree(d_dataA); hipFree(d_dataC); free(dataA); free(dataC); return 0; }
eb9e1e742e08bc79bd1daff6e4db4114cb8d5a69.cu
#include<cuda.h> #include<stdlib.h> #include<stdio.h> #include<iostream> #include <iomanip> // std::setprecision using namespace std; union FP32{ float f; unsigned int i; }; __global__ void test_cvt_f32_to_f16(float* A, short int* C, int N){ for(int i=0;i<N;i++){ //asm("cvt.rn.satfinite.e4m3x2.f32 %0, %1, %2;\n\t":"=h"(C[i]):"f"(A[i]),"f"(B[i])); asm("cvt.rn.relu.satfinite.f16.f32 %0, %1;\n\t":"=h"(C[i]):"f"(A[i])); } } __global__ void test_cvt_f32_to_tf32(float* A, unsigned int* C, int N){ for(int i=0;i<N;i++){ //asm("cvt.rn.satfinite.e4m3x2.f32 %0, %1, %2;\n\t":"=h"(C[i]):"f"(A[i]),"f"(B[i])); asm("cvt.rna.satfinite.tf32.f32 %0, %1;\n\t":"=r"(C[i]):"f"(A[i])); } } void Initfloat(float * a, const int n) { float value; for ( int i = 0; i < n; i++ ) { value = (float)(rand() % 20 - 10) + (float)(rand() % 20 - 10) / 10.0 + (float)(rand() % 20 - 10) / 100.0 + (float)(rand() % 20 - 10) / 1000.0 + (float)(rand() % 20 - 10) / 10000.0 + (float)(rand() % 20 - 10) / 100000.0 + (float)(rand() % 20 - 10) / 1000000.0 + (float)(rand() % 20 - 10) / 10000000.0 + (float)(rand() % 20 - 10) / 100000000.0; a[i] = value; } } void InitZero(float * a, const int n) { for ( int i = 0; i < n; i++ ) { a[i] = 0.0; } } void show(float * a, const int n) { std::cout << std::endl; for ( int i=0; i<n; i++){ std::cout<< std::setprecision(20) << a[i] << std::endl; } std::cout << std::endl; } int main(int argc, char** argv){ int size = 10; float *dataA = (float*)malloc(sizeof(float) * size); unsigned int *dataC = (unsigned int*)malloc(sizeof(unsigned int) * size); float *d_dataA = NULL; unsigned int *d_dataC = NULL; cudaMalloc((void**)&d_dataA, sizeof(float) * size); cudaMalloc((void**)&d_dataC, sizeof(unsigned int) * size); FP32 fp32; Initfloat(dataA, size); /* Nan */ fp32.i = 0x7fffffff; dataA[size-1] = fp32.f; fp32.i = 0xffffffff; dataA[size-2] = fp32.f; /* inf */ fp32.i = 0x7f800000; dataA[size-3] = fp32.f; fp32.i = 0xff800000; dataA[size-4] = fp32.f; /* 0 */ fp32.i = 0x00000000; dataA[size-5] = fp32.f; fp32.i = 0x80000000; dataA[size-6] = fp32.f; /* overflow */ fp32.i = 0x7f7ffeba; dataA[size-7] = fp32.f; fp32.i = 0xff7ffeba; dataA[size-8] = fp32.f; /* random normal */ fp32.i = 0x43acad91; dataA[size-9] = fp32.f; fp32.i = 0xc3acad91; dataA[size-10] = fp32.f; // /* underflow */ // fp32.i = 0x36801000; dataA[count] = fp32.f; // show(dataA, size); for(int i=0;i<size;i++){ dataC[i] = 0; } cudaMemcpy(d_dataA,dataA,sizeof(float) * size,cudaMemcpyHostToDevice); cudaMemcpy(d_dataC,dataC,sizeof(unsigned int) * size,cudaMemcpyHostToDevice); test_cvt_f32_to_tf32<<<1, 1>>> (d_dataA, d_dataC, size); cudaMemcpy(dataC,d_dataC,sizeof(unsigned int) * size, cudaMemcpyDeviceToHost); std::cout << std::endl; for(int i=0;i<size;i++){ FP32 fp32_i; FP32 tf32_o; fp32_i.f = dataA[i]; tf32_o.i = dataC[i]; cout<< "f32 / tf32 hex format:,"<<hex<<fp32_i.i<<"," <<tf32_o.i <<endl; cout<< "f32 / tf32 dec format:,"<< std::setprecision(20) <<fp32_i.f<<"," <<tf32_o.f<<endl; std::cout << std::endl; } cudaFree(d_dataA); cudaFree(d_dataC); free(dataA); free(dataC); return 0; }
3d38bf99bbd59078cd1d318d5893a0003ebb7dbc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void hgemm_tn_128x128( unsigned short* param_C, const unsigned short* param_A, const unsigned short* param_B, float param_alpha, float param_beta, int param_flags, int param_lda, int param_ldb, int param_ldc, int param_m, int param_n, int param_k, int param_ldaz, int param_ldbz, int param_ldcz, int param_batch_loops ) { __shared__ float share[128*8*2 + 128*8*2 + 4]; *param_C = share[0]; }
3d38bf99bbd59078cd1d318d5893a0003ebb7dbc.cu
extern "C" __global__ void hgemm_tn_128x128( unsigned short* param_C, const unsigned short* param_A, const unsigned short* param_B, float param_alpha, float param_beta, int param_flags, int param_lda, int param_ldb, int param_ldc, int param_m, int param_n, int param_k, int param_ldaz, int param_ldbz, int param_ldcz, int param_batch_loops ) { __shared__ float share[128*8*2 + 128*8*2 + 4]; *param_C = share[0]; }
9489f4322a0343fd6c9246c193941e61aacafe46.hip
// !!! This is a file automatically generated by hipify!!! #include "matrix_cuda.h" #include "memoryWrapper.cuh" #include <hip/hip_runtime.h> //#include <type_traits> //template<typename T> //inline typename std::enable_if<std::is_unsigned<T>::value, T>::type ceildiv (T x, T y) //{ // return x / y + (x % y != 0); //} inline __device__ __host__ uint32_t ceildiv(uint32_t x, uint32_t y) { // division instruction gives you a free modulo. So add one if not cleanly divisible. not that should matter... return x / y + (x % y != 0); } template<typename T> __global__ void mmul_naive_kernel(T * mem_left, T * mem_right, T * mem_out, dim3 sizes) { //I put gridDim here instead of blockDim and that was a really weird bug, cause most of the tests still passed. //TODO Maybe see if e.g. TEST(mmul_cuda, simple_equality) is just garbage because it still worked... uint32_t row = threadIdx.x + blockIdx.x * blockDim.x; uint32_t col = threadIdx.y + blockIdx.y * blockDim.y; //product_size is the size of the scalar product, the amount of columns in left and the amount of rows in right uint32_t stride_left = sizes.y; uint32_t product_size = sizes.y; uint32_t stride_right = sizes.z; uint32_t stride_out = stride_right; //If the matrix size is not divisible, just ignore too large indices if (row >= sizes.x || col >= sizes.z) { //printf("skipped %i %i\n", row,col); return; } T elem = 0; //Todo what about splitting this loop over many threads with either atomic write or some sort of aggregation step? for (size_t i = 0; i < product_size; ++i) { //elem += left(row, i)*right(i, col) -> _mem[N*row+col]; elem += mem_left[stride_left * row + i] * mem_right[stride_right * i + col]; // result has the same amount of colunmns == stride as right mem_out[stride_out * row + col] = elem; } // generic implementation template<typename T> Matrix<T> mmul_cuda_naive(Matrix<T> const& left, Matrix<T> const& right, uint32_t n_threads) { uint32_t rrows = left.M; uint32_t rcols = right.N; Matrix<T> ret(rrows, rcols); auto mem_start = std::chrono::high_resolution_clock::now(); //initialize and copy DeviceMemory<T> left_mem(left.data(), left.size()); DeviceMemory<T> right_mem(right.data(), right.size()); //just initialize DeviceMemory<T> out_mem(ret.size()); auto mem_stop = std::chrono::high_resolution_clock::now(); auto mem_duration = mem_stop - mem_start; dim3 sizes = {uint32_t(left.M), uint32_t(left.N), uint32_t(right.N)}; //TODO check heuristic for these //ATTENTION putting 0 in any dimension is invalid and does not signify "nonexistent" //let's try using thread blocks of 8x8=2 warps. This sucks a bit for very small matrices but then wtf use cuda... dim3 blocks{ceildiv(rrows, n_threads), ceildiv(rcols, n_threads), 1}; dim3 threads{n_threads, n_threads, 1}; assert(blocks.x * blocks.y * threads.x * threads.y >= ret.size()); // there should be at most one nearly empty set of blocks assert(blocks.x * blocks.y * threads.x * threads.y < (blocks.x + 1) * (blocks.y + 1) * threads.x * threads.y); mmul_naive_kernel<T> << < blocks, threads, 0 >> > (left_mem.mem(), right_mem.mem(), out_mem.mem(), sizes); hipDeviceSynchronize(); // todo needed? quitOnCudaError(); mem_start = std::chrono::high_resolution_clock::now(); hipMemcpy(ret.data(), out_mem); mem_stop = std::chrono::high_resolution_clock::now(); mem_duration += mem_stop - mem_start; lastMemoryOp = mem_duration; return ret; } // TODO this turned out to be much more of a problem than if it where just square matrices. // Not sure how efficient all the index mess is, but this should be started with as many threads as possible so that // a block uses the maximum possible amount of shared memory as to make the copying worth it. template<typename T> __global__ void mmul_shared_kernel(T* mem_left, T* mem_right, T* mem_out, dim3 sizes) { //product_size is the size of the scalar product, the amount of columns in left and the amount of rows in right uint32_t stride_left = sizes.y; uint32_t product_size = sizes.y; uint32_t stride_right = sizes.z; uint32_t row = threadIdx.x + blockIdx.x * blockDim.x; uint32_t col = threadIdx.y + blockIdx.y * blockDim.y; //range of rows in the left matrix and cols in the right matrix that we need in this block extern __shared__ float smem[]; uint32_t row_left_max = blockDim.x + blockIdx.x * blockDim.x; uint32_t row_left_min = 0 + blockIdx.x * blockDim.x; uint32_t col_right_max = blockDim.y + blockIdx.y * blockDim.y; uint32_t col_right_min = 0 + blockIdx.y * blockDim.y; // Row,Col coordinates to memory location auto left_idx = [=](size_t row, size_t col) { return stride_left*row + col; }; auto right_idx = [=](size_t row, size_t col) { return stride_right*row + col; }; // Helpers to convert row,col coordinates on original matrix to shared memory location auto left2shared = [=] (size_t row, size_t col) -> size_t { assert(row >= row_left_min && row <= row_left_max && col <= col_right_max && col >= col_right_max); auto memory_location = (row - row_left_min) * row_left_max + (col - col_right_min); return memory_location; }; auto right2shared = [=] (size_t row, size_t col) -> size_t { auto offset_to_right_mem = sizes.x * sizes.y * sizeof(T); //amount of elements in left matrix return offset_to_right_mem + left2shared(row, col); }; // every row in the thread block should fetch the corresponding row // from left matrix and divide it so that the threadblock columns do equal work { uint32_t n_left_cols = sizes.x; uint32_t step_size = ceildiv(n_left_cols, blockDim.y); uint32_t start_col = threadIdx.y * step_size; uint32_t end_col = threadIdx.y * step_size + step_size; for (size_t col_to_copy = start_col; (col_to_copy != end_col) && (col_to_copy != n_left_cols); ++col_to_copy) { smem[left2shared(row,col_to_copy)] = mem_left[left_idx(row, col_to_copy)]; } } // every row in the thread block should fetch the corresponding column // from right matrix and divide it so that the threadblock columns do equal work { uint32_t n_right_rows = sizes.z; uint32_t step_size = ceildiv(n_right_rows, blockDim.y); uint32_t start_row = threadIdx.y * step_size; uint32_t end_row = threadIdx.y * step_size + step_size; // The ceildiv operation could overshoot. In that case just end loop if we reach n_right_rows // TODO right now the same element can be copied multiple times. include check to avoid that? // TODO Should not be a functional problem, as it's writing the same thing and does not care about the original // memory value. Also adapt in above block for (size_t row_to_copy = start_row; (row_to_copy != end_row) && (row_to_copy != n_right_rows); ++row_to_copy) { smem[right2shared(row_to_copy,col)] = mem_right[right_idx(row_to_copy, col)]; } } //make sure all the shared memory is available __syncthreads(); //If the matrix size is not cleanly tileable, just ignore too large indices if (row >= stride_left || col >= stride_right) { return; } T elem; for (size_t i = 0; i < product_size; ++i) { //elem += left(row, i)*right(i, col) -> _mem[N*row+col]; elem += smem[left2shared(row,i)] * smem[right2shared(i,col)]; } mem_out[stride_left * row + col] = elem; } //easier version that only understands NxN matrices template<typename T> __global__ void mmul_shared_kernel_NN(T* mem_left, T* mem_right, T* mem_out, uint32_t N) { uint32_t row = threadIdx.x + blockIdx.x * blockDim.x; uint32_t col = threadIdx.y + blockIdx.y * blockDim.y; //If the matrix size is not divisible, just ignore too large indices if (row >= N || col >= N) { //printf("skipped %i %i\n", row,col); return; } uint32_t min_row = 0 + blockIdx.x * blockDim.x; // uint32_t max_row = blockDim.x + blockIdx.x * blockDim.x; uint32_t min_col = 0 + blockIdx.y * blockDim.y; // uint32_t max_col = blockDim.x + blockIdx.x * blockDim.x; uint32_t elements_in_block = blockDim.x * blockDim.y; // works for both matrices, both are NxN and N=stride auto matrix_idx = [=](uint32_t row, uint32_t col) -> uint32_t { return N * row + col; }; __shared__ extern float smem[]; //for a given iteration (each iteration moves the block window by one step) compute indices in original matrices auto sliding_idx_left = [=](uint32_t i){ return matrix_idx(row, i*blockDim.y+threadIdx.y); }; auto sliding_idx_right = [=](uint32_t i){ return matrix_idx(i*blockDim.x+threadIdx.x, col); }; //TODO this should be different between rows and cols of scalar product auto scalar_prod_index = [=](uint32_t j) { return threadIdx.x * blockDim.x + j; }; //always write to these locations uint32_t left_element = (row-min_row) * blockDim.y + (col-min_col); uint32_t right_element = elements_in_block + left_element; T output_elem = 0; for(size_t i=0;i<ceildiv(N,blockDim.y);++i) { __syncthreads(); smem[left_element] = mem_left[sliding_idx_left(i)]; smem[right_element] = mem_right[sliding_idx_right(i)]; __syncthreads(); for(size_t j=0;j!=blockDim.y;++j) { output_elem += smem[scalar_prod_index(j)] * smem[scalar_prod_index(j)+elements_in_block]; } } mem_out[matrix_idx(row,col)] = output_elem; //mem_out[matrix_idx(row,col)] = threadIdx.x*threadIdx.y; } template<typename T> Matrix<T> mmul_cuda_shared(Matrix<T> const& left, Matrix<T> const& right, uint32_t n_threads) { assert(left.M == right.M && left.N == right.N); uint32_t N = left.M; Matrix<T> ret(N, N); auto mem_start = std::chrono::high_resolution_clock::now(); //initialize and copy DeviceMemory<T> left_mem(left.data(), left.size()); DeviceMemory<T> right_mem(right.data(), right.size()); //just initialize DeviceMemory<T> out_mem(ret.size()); auto mem_stop = std::chrono::high_resolution_clock::now(); auto mem_duration = mem_stop - mem_start; dim3 blocks{ceildiv(N, n_threads), ceildiv(N, n_threads), 1}; dim3 threads{n_threads, n_threads, 1}; assert(blocks.x * blocks.y * threads.x * threads.y >= ret.size()); // there should be at most one nearly empty set of blocks assert(blocks.x * blocks.y * threads.x * threads.y < (blocks.x + 1) * (blocks.y + 1) * threads.x * threads.y); size_t shared_mem_size = sizeof(T) * 2 * 8 * 8; mmul_shared_kernel_NN<T> << < blocks, threads, shared_mem_size>> > (left_mem.mem(), right_mem.mem(), out_mem.mem(), N); hipDeviceSynchronize(); // todo needed? quitOnCudaError(); mem_start = std::chrono::high_resolution_clock::now(); hipMemcpy(ret.data(), out_mem); mem_stop = std::chrono::high_resolution_clock::now(); mem_duration += mem_stop - mem_start; lastMemoryOp = mem_duration; return ret; } // fill out overloads Matrix<float> mmul_cuda_naive(Matrix<float> const& left, Matrix<float> const& right, uint32_t n_threads) { return mmul_cuda_naive<float>(left, right,n_threads); } Matrix<double> mmul_cuda_naive(Matrix<double> const& left, Matrix<double> const& right, uint32_t n_threads) { return mmul_cuda_naive<double>(left, right,n_threads); } Matrix<int16_t> mmul_cuda_naive(Matrix<int16_t> const& left, Matrix<int16_t> const& right, uint32_t n_threads) { return mmul_cuda_naive<int16_t>(left, right,n_threads); } Matrix<float> mmul_cuda_shared(Matrix<float> const& left, Matrix<float> const& right, uint32_t n_threads) { return mmul_cuda_shared<float>(left, right,n_threads); } Matrix<double> mmul_cuda_shared(Matrix<double> const& left, Matrix<double> const& right, uint32_t n_threads) { return mmul_cuda_shared<double>(left, right,n_threads); } //// or hand instantiate templates //template void mmul_naive_wrapper<float>(float* mem_a, float* mem_b, float* mem_out, dim3 blocks, dim3 threads); //template void mmul_naive_wrapper<double>(double* mem_a, double* mem_b, double* mem_out, dim3 blocks, dim3 threads); //template void mmul_naive_wrapper<int>(int* mem_a, int* mem_b, int* mem_out, dim3 blocks, dim3 threads);
9489f4322a0343fd6c9246c193941e61aacafe46.cu
#include "matrix_cuda.h" #include "memoryWrapper.cuh" #include <cuda_runtime.h> //#include <type_traits> //template<typename T> //inline typename std::enable_if<std::is_unsigned<T>::value, T>::type ceildiv (T x, T y) //{ // return x / y + (x % y != 0); //} inline __device__ __host__ uint32_t ceildiv(uint32_t x, uint32_t y) { // division instruction gives you a free modulo. So add one if not cleanly divisible. not that should matter... return x / y + (x % y != 0); } template<typename T> __global__ void mmul_naive_kernel(T * mem_left, T * mem_right, T * mem_out, dim3 sizes) { //I put gridDim here instead of blockDim and that was a really weird bug, cause most of the tests still passed. //TODO Maybe see if e.g. TEST(mmul_cuda, simple_equality) is just garbage because it still worked... uint32_t row = threadIdx.x + blockIdx.x * blockDim.x; uint32_t col = threadIdx.y + blockIdx.y * blockDim.y; //product_size is the size of the scalar product, the amount of columns in left and the amount of rows in right uint32_t stride_left = sizes.y; uint32_t product_size = sizes.y; uint32_t stride_right = sizes.z; uint32_t stride_out = stride_right; //If the matrix size is not divisible, just ignore too large indices if (row >= sizes.x || col >= sizes.z) { //printf("skipped %i %i\n", row,col); return; } T elem = 0; //Todo what about splitting this loop over many threads with either atomic write or some sort of aggregation step? for (size_t i = 0; i < product_size; ++i) { //elem += left(row, i)*right(i, col) -> _mem[N*row+col]; elem += mem_left[stride_left * row + i] * mem_right[stride_right * i + col]; // result has the same amount of colunmns == stride as right mem_out[stride_out * row + col] = elem; } // generic implementation template<typename T> Matrix<T> mmul_cuda_naive(Matrix<T> const& left, Matrix<T> const& right, uint32_t n_threads) { uint32_t rrows = left.M; uint32_t rcols = right.N; Matrix<T> ret(rrows, rcols); auto mem_start = std::chrono::high_resolution_clock::now(); //initialize and copy DeviceMemory<T> left_mem(left.data(), left.size()); DeviceMemory<T> right_mem(right.data(), right.size()); //just initialize DeviceMemory<T> out_mem(ret.size()); auto mem_stop = std::chrono::high_resolution_clock::now(); auto mem_duration = mem_stop - mem_start; dim3 sizes = {uint32_t(left.M), uint32_t(left.N), uint32_t(right.N)}; //TODO check heuristic for these //ATTENTION putting 0 in any dimension is invalid and does not signify "nonexistent" //let's try using thread blocks of 8x8=2 warps. This sucks a bit for very small matrices but then wtf use cuda... dim3 blocks{ceildiv(rrows, n_threads), ceildiv(rcols, n_threads), 1}; dim3 threads{n_threads, n_threads, 1}; assert(blocks.x * blocks.y * threads.x * threads.y >= ret.size()); // there should be at most one nearly empty set of blocks assert(blocks.x * blocks.y * threads.x * threads.y < (blocks.x + 1) * (blocks.y + 1) * threads.x * threads.y); mmul_naive_kernel<T> << < blocks, threads, 0 >> > (left_mem.mem(), right_mem.mem(), out_mem.mem(), sizes); cudaDeviceSynchronize(); // todo needed? quitOnCudaError(); mem_start = std::chrono::high_resolution_clock::now(); cudaMemcpy(ret.data(), out_mem); mem_stop = std::chrono::high_resolution_clock::now(); mem_duration += mem_stop - mem_start; lastMemoryOp = mem_duration; return ret; } // TODO this turned out to be much more of a problem than if it where just square matrices. // Not sure how efficient all the index mess is, but this should be started with as many threads as possible so that // a block uses the maximum possible amount of shared memory as to make the copying worth it. template<typename T> __global__ void mmul_shared_kernel(T* mem_left, T* mem_right, T* mem_out, dim3 sizes) { //product_size is the size of the scalar product, the amount of columns in left and the amount of rows in right uint32_t stride_left = sizes.y; uint32_t product_size = sizes.y; uint32_t stride_right = sizes.z; uint32_t row = threadIdx.x + blockIdx.x * blockDim.x; uint32_t col = threadIdx.y + blockIdx.y * blockDim.y; //range of rows in the left matrix and cols in the right matrix that we need in this block extern __shared__ float smem[]; uint32_t row_left_max = blockDim.x + blockIdx.x * blockDim.x; uint32_t row_left_min = 0 + blockIdx.x * blockDim.x; uint32_t col_right_max = blockDim.y + blockIdx.y * blockDim.y; uint32_t col_right_min = 0 + blockIdx.y * blockDim.y; // Row,Col coordinates to memory location auto left_idx = [=](size_t row, size_t col) { return stride_left*row + col; }; auto right_idx = [=](size_t row, size_t col) { return stride_right*row + col; }; // Helpers to convert row,col coordinates on original matrix to shared memory location auto left2shared = [=] (size_t row, size_t col) -> size_t { assert(row >= row_left_min && row <= row_left_max && col <= col_right_max && col >= col_right_max); auto memory_location = (row - row_left_min) * row_left_max + (col - col_right_min); return memory_location; }; auto right2shared = [=] (size_t row, size_t col) -> size_t { auto offset_to_right_mem = sizes.x * sizes.y * sizeof(T); //amount of elements in left matrix return offset_to_right_mem + left2shared(row, col); }; // every row in the thread block should fetch the corresponding row // from left matrix and divide it so that the threadblock columns do equal work { uint32_t n_left_cols = sizes.x; uint32_t step_size = ceildiv(n_left_cols, blockDim.y); uint32_t start_col = threadIdx.y * step_size; uint32_t end_col = threadIdx.y * step_size + step_size; for (size_t col_to_copy = start_col; (col_to_copy != end_col) && (col_to_copy != n_left_cols); ++col_to_copy) { smem[left2shared(row,col_to_copy)] = mem_left[left_idx(row, col_to_copy)]; } } // every row in the thread block should fetch the corresponding column // from right matrix and divide it so that the threadblock columns do equal work { uint32_t n_right_rows = sizes.z; uint32_t step_size = ceildiv(n_right_rows, blockDim.y); uint32_t start_row = threadIdx.y * step_size; uint32_t end_row = threadIdx.y * step_size + step_size; // The ceildiv operation could overshoot. In that case just end loop if we reach n_right_rows // TODO right now the same element can be copied multiple times. include check to avoid that? // TODO Should not be a functional problem, as it's writing the same thing and does not care about the original // memory value. Also adapt in above block for (size_t row_to_copy = start_row; (row_to_copy != end_row) && (row_to_copy != n_right_rows); ++row_to_copy) { smem[right2shared(row_to_copy,col)] = mem_right[right_idx(row_to_copy, col)]; } } //make sure all the shared memory is available __syncthreads(); //If the matrix size is not cleanly tileable, just ignore too large indices if (row >= stride_left || col >= stride_right) { return; } T elem; for (size_t i = 0; i < product_size; ++i) { //elem += left(row, i)*right(i, col) -> _mem[N*row+col]; elem += smem[left2shared(row,i)] * smem[right2shared(i,col)]; } mem_out[stride_left * row + col] = elem; } //easier version that only understands NxN matrices template<typename T> __global__ void mmul_shared_kernel_NN(T* mem_left, T* mem_right, T* mem_out, uint32_t N) { uint32_t row = threadIdx.x + blockIdx.x * blockDim.x; uint32_t col = threadIdx.y + blockIdx.y * blockDim.y; //If the matrix size is not divisible, just ignore too large indices if (row >= N || col >= N) { //printf("skipped %i %i\n", row,col); return; } uint32_t min_row = 0 + blockIdx.x * blockDim.x; // uint32_t max_row = blockDim.x + blockIdx.x * blockDim.x; uint32_t min_col = 0 + blockIdx.y * blockDim.y; // uint32_t max_col = blockDim.x + blockIdx.x * blockDim.x; uint32_t elements_in_block = blockDim.x * blockDim.y; // works for both matrices, both are NxN and N=stride auto matrix_idx = [=](uint32_t row, uint32_t col) -> uint32_t { return N * row + col; }; __shared__ extern float smem[]; //for a given iteration (each iteration moves the block window by one step) compute indices in original matrices auto sliding_idx_left = [=](uint32_t i){ return matrix_idx(row, i*blockDim.y+threadIdx.y); }; auto sliding_idx_right = [=](uint32_t i){ return matrix_idx(i*blockDim.x+threadIdx.x, col); }; //TODO this should be different between rows and cols of scalar product auto scalar_prod_index = [=](uint32_t j) { return threadIdx.x * blockDim.x + j; }; //always write to these locations uint32_t left_element = (row-min_row) * blockDim.y + (col-min_col); uint32_t right_element = elements_in_block + left_element; T output_elem = 0; for(size_t i=0;i<ceildiv(N,blockDim.y);++i) { __syncthreads(); smem[left_element] = mem_left[sliding_idx_left(i)]; smem[right_element] = mem_right[sliding_idx_right(i)]; __syncthreads(); for(size_t j=0;j!=blockDim.y;++j) { output_elem += smem[scalar_prod_index(j)] * smem[scalar_prod_index(j)+elements_in_block]; } } mem_out[matrix_idx(row,col)] = output_elem; //mem_out[matrix_idx(row,col)] = threadIdx.x*threadIdx.y; } template<typename T> Matrix<T> mmul_cuda_shared(Matrix<T> const& left, Matrix<T> const& right, uint32_t n_threads) { assert(left.M == right.M && left.N == right.N); uint32_t N = left.M; Matrix<T> ret(N, N); auto mem_start = std::chrono::high_resolution_clock::now(); //initialize and copy DeviceMemory<T> left_mem(left.data(), left.size()); DeviceMemory<T> right_mem(right.data(), right.size()); //just initialize DeviceMemory<T> out_mem(ret.size()); auto mem_stop = std::chrono::high_resolution_clock::now(); auto mem_duration = mem_stop - mem_start; dim3 blocks{ceildiv(N, n_threads), ceildiv(N, n_threads), 1}; dim3 threads{n_threads, n_threads, 1}; assert(blocks.x * blocks.y * threads.x * threads.y >= ret.size()); // there should be at most one nearly empty set of blocks assert(blocks.x * blocks.y * threads.x * threads.y < (blocks.x + 1) * (blocks.y + 1) * threads.x * threads.y); size_t shared_mem_size = sizeof(T) * 2 * 8 * 8; mmul_shared_kernel_NN<T> << < blocks, threads, shared_mem_size>> > (left_mem.mem(), right_mem.mem(), out_mem.mem(), N); cudaDeviceSynchronize(); // todo needed? quitOnCudaError(); mem_start = std::chrono::high_resolution_clock::now(); cudaMemcpy(ret.data(), out_mem); mem_stop = std::chrono::high_resolution_clock::now(); mem_duration += mem_stop - mem_start; lastMemoryOp = mem_duration; return ret; } // fill out overloads Matrix<float> mmul_cuda_naive(Matrix<float> const& left, Matrix<float> const& right, uint32_t n_threads) { return mmul_cuda_naive<float>(left, right,n_threads); } Matrix<double> mmul_cuda_naive(Matrix<double> const& left, Matrix<double> const& right, uint32_t n_threads) { return mmul_cuda_naive<double>(left, right,n_threads); } Matrix<int16_t> mmul_cuda_naive(Matrix<int16_t> const& left, Matrix<int16_t> const& right, uint32_t n_threads) { return mmul_cuda_naive<int16_t>(left, right,n_threads); } Matrix<float> mmul_cuda_shared(Matrix<float> const& left, Matrix<float> const& right, uint32_t n_threads) { return mmul_cuda_shared<float>(left, right,n_threads); } Matrix<double> mmul_cuda_shared(Matrix<double> const& left, Matrix<double> const& right, uint32_t n_threads) { return mmul_cuda_shared<double>(left, right,n_threads); } //// or hand instantiate templates //template void mmul_naive_wrapper<float>(float* mem_a, float* mem_b, float* mem_out, dim3 blocks, dim3 threads); //template void mmul_naive_wrapper<double>(double* mem_a, double* mem_b, double* mem_out, dim3 blocks, dim3 threads); //template void mmul_naive_wrapper<int>(int* mem_a, int* mem_b, int* mem_out, dim3 blocks, dim3 threads);
4f0983810ea9488ea16242a36d3d18b921473602.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gpu_sqrt.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; hipMalloc(&a, XSIZE*YSIZE); long N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gpu_sqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, a,N); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gpu_sqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, a,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gpu_sqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, a,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4f0983810ea9488ea16242a36d3d18b921473602.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gpu_sqrt.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); long N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gpu_sqrt<<<gridBlock,threadBlock>>>(a,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gpu_sqrt<<<gridBlock,threadBlock>>>(a,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gpu_sqrt<<<gridBlock,threadBlock>>>(a,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
691151276edd92ee897782b4c1fc1b6f9cc2e107.hip
// !!! This is a file automatically generated by hipify!!! /*notice this works for any size of array, another method which is called interleaved pairs approach*/ #include <stdio.h> #include <stdlib.h> #include "common.h" #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" //reduction neighbored pairs kernel __global__ void redunction_neighbored_pairs(int * input, int * temp, int size) { int tid = threadIdx.x; int gid = blockDim.x * blockIdx.x + threadIdx.x; if (gid > size) return; int offset = blockDim.x * blockIdx.x * 2; for(int i = blockDim.x; i >= 1; i /= 2 ) { if( tid < i ){ input[offset+tid] += input[offset+ tid + i]; } /*if(i*tid < blockDim.x){ printf("blockIdx.x: %d offset: %d gid: %d tid: %d index+offset: %d input[index+offset]: %d index+offset+i: %d input[index+offset+i]: %d\n", blockIdx.x, offset, gid, tid, index+offset, input[index+offset], index+offset+i, input[index+offset+i]); input[index+offset] += input[index+offset+i]; }*/ __syncthreads(); } //for each block, element that is assigned to the first core/thread of block will be the //sum value of this block if (tid == 0) { //printf("final output value is: %d\n",input[gid]); temp[blockIdx.x] = input[offset]; /*if(blockIdx.x == 1){ printf("current block id and output value is: %d, %d\n", blockIdx.x, temp[blockIdx.x]); }*/ //printf("current block id is: %d, current gid is: %d, temp[%d] = %d\n",blockIdx.x,gid,blockIdx.x,temp[blockIdx.x]); } } int main(int argc, char ** argv) { printf("Running neighbored pairs reduction kernel \n"); // int size = 1 << 27; //128 Mb of data //int size = 1024; int byte_size = size * sizeof(int); int block_size = 128; // int * cpu_input, *h_ref; cpu_input = (int*)malloc(byte_size); // initialize(cpu_input, size, INIT_RANDOM); // // //get the reduction result from cpu int cpu_result = accumulate_cpu(cpu_input,size); // dim3 block(block_size); dim3 grid((size%(block_size*2)==0)?size/(block_size*2):size/(block_size*2)+1); // printf("Kernel launch parameters | grid.x : %d, block.x : %d \n",grid.x, block.x); // //prepare pointer to collect sum for each block int block_byte_size = sizeof(int)* grid.x; h_ref = (int*)malloc(block_byte_size); // int * gpu_input, *g_ref; // hipMalloc((void**)&gpu_input,byte_size); hipMalloc((void**)&g_ref, block_byte_size); // hipMemset(g_ref, 0, block_byte_size); hipMemcpy(gpu_input, cpu_input, byte_size, hipMemcpyHostToDevice); // hipLaunchKernelGGL(( redunction_neighbored_pairs) , dim3(grid), dim3(block) , 0, 0, gpu_input, g_ref, size); // hipDeviceSynchronize(); // hipMemcpy(h_ref, g_ref, block_byte_size, hipMemcpyDeviceToHost); // int gpu_result = 0; // for (int i = 0; i < grid.x; i++) { printf("current index and h_ref value is: %d, %d\n", i, h_ref[i]); gpu_result += h_ref[i]; } // // //validity check compare_results(gpu_result, cpu_result); // hipFree(g_ref); hipFree(gpu_input); // free(h_ref); free(cpu_input); // hipDeviceReset(); return 0; }
691151276edd92ee897782b4c1fc1b6f9cc2e107.cu
/*notice this works for any size of array, another method which is called interleaved pairs approach*/ #include <stdio.h> #include <stdlib.h> #include "common.h" #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" //reduction neighbored pairs kernel __global__ void redunction_neighbored_pairs(int * input, int * temp, int size) { int tid = threadIdx.x; int gid = blockDim.x * blockIdx.x + threadIdx.x; if (gid > size) return; int offset = blockDim.x * blockIdx.x * 2; for(int i = blockDim.x; i >= 1; i /= 2 ) { if( tid < i ){ input[offset+tid] += input[offset+ tid + i]; } /*if(i*tid < blockDim.x){ printf("blockIdx.x: %d offset: %d gid: %d tid: %d index+offset: %d input[index+offset]: %d index+offset+i: %d input[index+offset+i]: %d\n", blockIdx.x, offset, gid, tid, index+offset, input[index+offset], index+offset+i, input[index+offset+i]); input[index+offset] += input[index+offset+i]; }*/ __syncthreads(); } //for each block, element that is assigned to the first core/thread of block will be the //sum value of this block if (tid == 0) { //printf("final output value is: %d\n",input[gid]); temp[blockIdx.x] = input[offset]; /*if(blockIdx.x == 1){ printf("current block id and output value is: %d, %d\n", blockIdx.x, temp[blockIdx.x]); }*/ //printf("current block id is: %d, current gid is: %d, temp[%d] = %d\n",blockIdx.x,gid,blockIdx.x,temp[blockIdx.x]); } } int main(int argc, char ** argv) { printf("Running neighbored pairs reduction kernel \n"); // int size = 1 << 27; //128 Mb of data //int size = 1024; int byte_size = size * sizeof(int); int block_size = 128; // int * cpu_input, *h_ref; cpu_input = (int*)malloc(byte_size); // initialize(cpu_input, size, INIT_RANDOM); // // //get the reduction result from cpu int cpu_result = accumulate_cpu(cpu_input,size); // dim3 block(block_size); dim3 grid((size%(block_size*2)==0)?size/(block_size*2):size/(block_size*2)+1); // printf("Kernel launch parameters | grid.x : %d, block.x : %d \n",grid.x, block.x); // //prepare pointer to collect sum for each block int block_byte_size = sizeof(int)* grid.x; h_ref = (int*)malloc(block_byte_size); // int * gpu_input, *g_ref; // cudaMalloc((void**)&gpu_input,byte_size); cudaMalloc((void**)&g_ref, block_byte_size); // cudaMemset(g_ref, 0, block_byte_size); cudaMemcpy(gpu_input, cpu_input, byte_size, cudaMemcpyHostToDevice); // redunction_neighbored_pairs <<<grid, block >>>(gpu_input, g_ref, size); // cudaDeviceSynchronize(); // cudaMemcpy(h_ref, g_ref, block_byte_size, cudaMemcpyDeviceToHost); // int gpu_result = 0; // for (int i = 0; i < grid.x; i++) { printf("current index and h_ref value is: %d, %d\n", i, h_ref[i]); gpu_result += h_ref[i]; } // // //validity check compare_results(gpu_result, cpu_result); // cudaFree(g_ref); cudaFree(gpu_input); // free(h_ref); free(cpu_input); // cudaDeviceReset(); return 0; }
818aa17c20349d60c8774008f1182074daaae23c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "matrix_indexing.hpp" #include "non_periodic_convD.cuh" #include "utils_hip.cuh" extern hipStream_t streamRep; #define idx2(i, j, d) (SUB2IND2D(i, j, d)) #define idx3(i, j, k, d1, d2) (SUB2IND3D(i, j, k, d1, d2)) #define idx4(i, j, k, l, m, n, o) (SUB2IND4D(i, j, k, l, m, n, o)) #define CUDART_PI_D acos(-1.0) extern int Blocks; extern int Threads; // Complex pointwise multiplication static __global__ void ComplexPointwiseMulAndScale(ComplexD *a, const ComplexD *b, int size, uint32_t nVec) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int j = 0; j < nVec; j++) { for (int i = threadID; i < size; i += numThreads) { a[i + j * size] = ComplexScale(ComplexMul(a[i + j * size], b[i]), 1.0f); } } } __global__ void setDataFft1D(ComplexD *Kc, ComplexD *Xc, int ng, int nVec, double *VGrid, double hsq, int sign) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int i = threadID; i < ng; i += numThreads) { Kc[i].x = kernel1d(hsq, i); Kc[i].y=0; if (i > 0) { Kc[i].x = Kc[i].x + sign * kernel1d(hsq, ng - i); if (sign == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * i / (2 * ng); Kc[i] = ComplexMul(Kc[i], my_cexpf(arg)); } } for (int j = 0; j < nVec; j++) { Xc[i + j * ng].x = VGrid[i + j * ng]; Xc[i + j * ng].y=0; if (sign == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * i / (2 * ng); Xc[i + j * ng] = ComplexMul(Xc[i + j * ng], my_cexpf(arg)); } } } } __global__ void setDataFft2D(ComplexD *Kc, ComplexD *Xc, int n1, int n2, int nVec, const double *const VGrid, double hsq, int signx, int signy) { register int i, j; for (register uint32_t TID = blockIdx.x * blockDim.x + threadIdx.x; TID < n1 * n2; TID += blockDim.x * gridDim.x) { i = TID % n1; j = (TID / n1); Kc[idx2(i, j, n1)].x = kernel2d(hsq, i, j); Kc[idx2(i, j, n1)].y = 0; if (i > 0) { Kc[idx2(i, j, n1)].x += signx * kernel2d(hsq, n1 - i, j); } if (j > 0) { Kc[idx2(i, j, n1)].x += signy * kernel2d(hsq, i, n2 - j); } if (i > 0 && j > 0) { Kc[idx2(i, j, n1)].x += signx * signy * kernel2d(hsq, n1 - i, n2 - j); } for (uint32_t iVec = 0; iVec < nVec; iVec++) { Xc[idx3(i, j, iVec, n1, n2)].x = VGrid[idx3(i, j, iVec, n1, n2)]; Xc[idx3(i, j, iVec, n1, n2)].y = 0; if (signx == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * i / (2 * n1); Xc[idx3(i, j, iVec, n1, n2)] = ComplexMul(Xc[idx3(i, j, iVec, n1, n2)], my_cexpf(arg)); } if (signy == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * j / (2 * n2); Xc[idx3(i, j, iVec, n1, n2)] = ComplexMul(Xc[idx3(i, j, iVec, n1, n2)], my_cexpf(arg)); } } if (signx == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * i / (2 * n1); Kc[idx2(i, j, n1)] = ComplexMul(Kc[idx2(i, j, n1)], my_cexpf(arg)); } if (signy == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * j / (2 * n2); Kc[idx2(i, j, n1)] = ComplexMul(Kc[idx2(i, j, n1)], my_cexpf(arg)); } } } __global__ void addToPhiGrid(ComplexD *Xc, double *PhiGrid, int ng, double scale) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int i = threadID; i < ng; i += numThreads) { PhiGrid[i] += scale * Xc[i].x; } } __global__ void normalizeInverse(ComplexD *Xc, int ng, uint32_t nVec) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int i = threadID; i < ng; i += numThreads) { for (uint32_t iVec = 0; iVec < nVec; iVec++) { ComplexD arg; arg.x = 0; arg.y = +2 * CUDART_PI_D * i / (2 * ng); Xc[i + iVec * ng] = ComplexMul(Xc[i + iVec * ng], my_cexpf(arg)); } } } __global__ void normalizeInverse2D(ComplexD *Xc, uint32_t n1, uint32_t n2, uint32_t nVec, int signx, int signy) { register int i, j; for (register uint32_t TID = blockIdx.x * blockDim.x + threadIdx.x; TID < n1 * n2; TID += blockDim.x * gridDim.x) { i = TID % n1; j = (TID / n1); for (uint32_t iVec = 0; iVec < nVec; iVec++) { if (signx == -1) { ComplexD arg; arg.x = 0; arg.y = +2 * CUDART_PI_D * i / (2 * n1); Xc[idx3(i, j, iVec, n1, n2)] = ComplexMul(Xc[idx3(i, j, iVec, n1, n2)], my_cexpf(arg)); } if (signy == -1) { ComplexD arg; arg.x = 0; arg.y = +2 * CUDART_PI_D * j / (2 * n2); Xc[idx3(i, j, iVec, n1, n2)] = ComplexMul(Xc[idx3(i, j, iVec, n1, n2)], my_cexpf(arg)); } } } } void conv1dnopadcuda(double *PhiGrid, double *VGrid, double h, uint32_t *const nGridDims, uint32_t nVec, int nDim,hipfftHandle &plan, hipfftHandle &plan_rhs, ComplexD *Kc, ComplexD *Xc) { uint32_t n1 = nGridDims[0]; double hsq = h * h; /*even*/ hipLaunchKernelGGL(( setDataFft1D), dim3(Blocks), dim3(Threads), 0, streamRep, Kc, Xc, n1, nVec, VGrid, hsq, 1); //hipDeviceSynchronize(streamRep); hipfftExecZ2Z(plan, reinterpret_cast<hipfftDoubleComplex *>(Kc), reinterpret_cast<hipfftDoubleComplex *>(Kc), HIPFFT_FORWARD); hipfftExecZ2Z(plan_rhs, reinterpret_cast<hipfftDoubleComplex *>(Xc), reinterpret_cast<hipfftDoubleComplex *>(Xc), HIPFFT_FORWARD); hipLaunchKernelGGL(( ComplexPointwiseMulAndScale), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, Kc, n1, nVec); hipfftExecZ2Z(plan_rhs, reinterpret_cast<hipfftDoubleComplex *>(Xc), reinterpret_cast<hipfftDoubleComplex *>(Xc), HIPFFT_BACKWARD); hipLaunchKernelGGL(( addToPhiGrid), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, PhiGrid, n1 * nVec, (0.5 / n1)); //hipDeviceSynchronize(streamRep); hipLaunchKernelGGL(( setDataFft1D), dim3(Blocks), dim3(Threads), 0, streamRep, Kc, Xc, n1, nVec, VGrid, hsq, -1); hipfftExecZ2Z(plan, reinterpret_cast<hipfftDoubleComplex *>(Kc), reinterpret_cast<hipfftDoubleComplex *>(Kc), HIPFFT_FORWARD); hipfftExecZ2Z(plan_rhs, reinterpret_cast<hipfftDoubleComplex *>(Xc), reinterpret_cast<hipfftDoubleComplex *>(Xc), HIPFFT_FORWARD); hipLaunchKernelGGL(( ComplexPointwiseMulAndScale), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, Kc, n1, nVec); hipfftExecZ2Z(plan_rhs, reinterpret_cast<hipfftDoubleComplex *>(Xc), reinterpret_cast<hipfftDoubleComplex *>(Xc), HIPFFT_BACKWARD); hipLaunchKernelGGL(( normalizeInverse), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, n1, nVec); hipLaunchKernelGGL(( addToPhiGrid), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, PhiGrid, n1 * nVec, (0.5 / n1)); // hipDeviceSynchronize(streamRep); return; } void conv2dnopadcuda(double *const PhiGrid, const double *const VGrid, const double h, uint32_t *const nGridDims, const uint32_t nVec, const uint32_t nDim,hipfftHandle &plan, hipfftHandle &plan_rhs, ComplexD *Kc, ComplexD* Xc) { double hsq = h * h; // find the size of the last dimension in FFTW (add padding) uint32_t n1 = nGridDims[0]; uint32_t n2 = nGridDims[1]; // ============================== EVEN-EVEN hipLaunchKernelGGL(( setDataFft2D), dim3(Blocks), dim3(Threads), 0, streamRep, Kc, Xc, n1, n2, nVec, VGrid, hsq, 1, 1); hipfftExecZ2Z(plan, reinterpret_cast<hipfftDoubleComplex *>(Kc), reinterpret_cast<hipfftDoubleComplex *>(Kc), HIPFFT_FORWARD); hipfftExecZ2Z(plan_rhs, reinterpret_cast<hipfftDoubleComplex *>(Xc), reinterpret_cast<hipfftDoubleComplex *>(Xc), HIPFFT_FORWARD); hipLaunchKernelGGL(( ComplexPointwiseMulAndScale), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, Kc, n1 * n2, nVec); hipfftExecZ2Z(plan_rhs, reinterpret_cast<hipfftDoubleComplex *>(Xc), reinterpret_cast<hipfftDoubleComplex *>(Xc), HIPFFT_BACKWARD); hipLaunchKernelGGL(( addToPhiGrid), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, PhiGrid, n1 * n2 * nVec, (0.25 / (n1 * n2))); // ============================== ODD-EVEN hipLaunchKernelGGL(( setDataFft2D), dim3(Blocks), dim3(Threads), 0, streamRep, Kc, Xc, n1, n2, nVec, VGrid, hsq, -1, 1); hipfftExecZ2Z(plan, reinterpret_cast<hipfftDoubleComplex *>(Kc), reinterpret_cast<hipfftDoubleComplex *>(Kc), HIPFFT_FORWARD); hipfftExecZ2Z(plan_rhs, reinterpret_cast<hipfftDoubleComplex *>(Xc), reinterpret_cast<hipfftDoubleComplex *>(Xc), HIPFFT_FORWARD); hipLaunchKernelGGL(( ComplexPointwiseMulAndScale), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, Kc, n1 * n2, nVec); hipfftExecZ2Z(plan_rhs, reinterpret_cast<hipfftDoubleComplex *>(Xc), reinterpret_cast<hipfftDoubleComplex *>(Xc), HIPFFT_BACKWARD); hipLaunchKernelGGL(( normalizeInverse2D), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, n1, n2, nVec, -1, 1); hipLaunchKernelGGL(( addToPhiGrid), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, PhiGrid, n1 * n2 * nVec, (0.25 / (n1 * n2))); // ============================== EVEN-ODD hipLaunchKernelGGL(( setDataFft2D), dim3(Blocks), dim3(Threads), 0, streamRep, Kc, Xc, n1, n2, nVec, VGrid, hsq, 1, -1); hipfftExecZ2Z(plan, reinterpret_cast<hipfftDoubleComplex *>(Kc), reinterpret_cast<hipfftDoubleComplex *>(Kc), HIPFFT_FORWARD); hipfftExecZ2Z(plan_rhs, reinterpret_cast<hipfftDoubleComplex *>(Xc), reinterpret_cast<hipfftDoubleComplex *>(Xc), HIPFFT_FORWARD); hipLaunchKernelGGL(( ComplexPointwiseMulAndScale), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, Kc, n1 * n2, nVec); hipfftExecZ2Z(plan_rhs, reinterpret_cast<hipfftDoubleComplex *>(Xc), reinterpret_cast<hipfftDoubleComplex *>(Xc), HIPFFT_BACKWARD); hipLaunchKernelGGL(( normalizeInverse2D), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, n1, n2, nVec, 1, -1); hipLaunchKernelGGL(( addToPhiGrid), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, PhiGrid, n1 * n2 * nVec, (0.25 / (n1 * n2))); // ============================== ODD-ODD hipLaunchKernelGGL(( setDataFft2D), dim3(Blocks), dim3(Threads), 0, streamRep, Kc, Xc, n1, n2, nVec, VGrid, hsq, -1, -1); hipfftExecZ2Z(plan, reinterpret_cast<hipfftDoubleComplex *>(Kc), reinterpret_cast<hipfftDoubleComplex *>(Kc), HIPFFT_FORWARD); hipfftExecZ2Z(plan_rhs, reinterpret_cast<hipfftDoubleComplex *>(Xc), reinterpret_cast<hipfftDoubleComplex *>(Xc), HIPFFT_FORWARD); hipLaunchKernelGGL(( ComplexPointwiseMulAndScale), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, Kc, n1 * n2, nVec); hipfftExecZ2Z(plan_rhs, reinterpret_cast<hipfftDoubleComplex *>(Xc), reinterpret_cast<hipfftDoubleComplex *>(Xc), HIPFFT_BACKWARD); hipLaunchKernelGGL(( normalizeInverse2D), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, n1, n2, nVec, -1, -1); hipLaunchKernelGGL(( addToPhiGrid), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, PhiGrid, n1 * n2 * nVec, (0.25 / (n1 * n2))); //hipDeviceSynchronize(); return; } __global__ void setDataFft3D(ComplexD *Kc, ComplexD *Xc, int n1, int n2, int n3, int nVec, const double *const VGrid, double hsq, int signx, int signy, int signz) { register int i, j, k; register ComplexD K, X; for (register uint32_t TID = blockIdx.x * blockDim.x + threadIdx.x; TID < n1 * n2 * n3; TID += blockDim.x * gridDim.x) { i = TID % n1; j = (TID / n1) % n2; k = (TID / n1) / n2; K.x = kernel3d(hsq, i, j, k); K.y = 0; if (i > 0) { K.x += signx * kernel3d(hsq, n1 - i, j, k); } if (j > 0) { K.x += signy * kernel3d(hsq, i, n2 - j, k); } if (i > 0 && j > 0) { K.x += signx * signy * kernel3d(hsq, n1 - i, n2 - j, k); } if (k > 0) { K.x += signz * kernel3d(hsq, i, j, n3 - k); } if (k > 0 && i > 0) { K.x += signx * signz * kernel3d(hsq, n1 - i, j, n3 - k); } if (k > 0 && j > 0) { K.x += signy * signz * kernel3d(hsq, i, n2 - j, n3 - k); } if (k > 0 && i > 0 && j > 0) { K.x += signx * signy * signz * kernel3d(hsq, n1 - i, n2 - j, n3 - k); } for (uint32_t iVec = 0; iVec < nVec; iVec++) { X.x = VGrid[idx4(i, j, k, iVec, n1, n2, n3)]; X.y = 0; if (signx == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * i / (2 * n1); X = ComplexMul(X, my_cexpf(arg)); } if (signy == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * j / (2 * n2); X = ComplexMul(X, my_cexpf(arg)); } if (signz == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * k / (2 * n3); X = ComplexMul(X, my_cexpf(arg)); } Xc[idx4(i, j, k, iVec, n1, n2, n3)] = X; } if (signx == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * i / (2 * n1); K = ComplexMul(K, my_cexpf(arg)); } if (signy == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * j / (2 * n2); K = ComplexMul(K, my_cexpf(arg)); } if (signz == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * k / (2 * n3); K = ComplexMul(K, my_cexpf(arg)); } Kc[idx3(i, j, k, n1, n2)] = K; } } __global__ void normalizeInverse3D(ComplexD *Xc, uint32_t n1, uint32_t n2, uint32_t n3, uint32_t nVec, int signx, int signy, int signz) { register int i, j, k; for (register uint32_t TID = blockIdx.x * blockDim.x + threadIdx.x; TID < n1 * n2 * n3; TID += blockDim.x * gridDim.x) { i = TID % n1; j = (TID / n1) % n2; k = (TID / n1) / n2; for (uint32_t iVec = 0; iVec < nVec; iVec++) { if (signx == -1) { ComplexD arg; arg.x = 0; arg.y = +2 * CUDART_PI_D * i / (2 * n1); Xc[idx4(i, j, k, iVec, n1, n2, n3)] = ComplexMul(Xc[idx4(i, j, k, iVec, n1, n2, n3)], my_cexpf(arg)); } if (signy == -1) { ComplexD arg; arg.x = 0; arg.y = +2 * CUDART_PI_D * j / (2 * n2); Xc[idx4(i, j, k, iVec, n1, n2, n3)] = ComplexMul(Xc[idx4(i, j, k, iVec, n1, n2, n3)], my_cexpf(arg)); } if (signz == -1) { ComplexD arg; arg.x = 0; arg.y = +2 * CUDART_PI_D * k / (2 * n3); Xc[idx4(i, j, k, iVec, n1, n2, n3)] = ComplexMul(Xc[idx4(i, j, k, iVec, n1, n2, n3)], my_cexpf(arg)); } } } } void term3D(ComplexD *Kc, ComplexD *Xc, uint32_t n1, uint32_t n2, uint32_t n3, uint32_t nVec, const double *const VGrid, double *PhiGrid, double hsq, hipfftHandle plan, hipfftHandle plan_rhs, int signx, int signy, int signz) { hipLaunchKernelGGL(( setDataFft3D), dim3(Blocks), dim3(Threads), 0, streamRep, Kc, Xc, n1, n2, n3, nVec, VGrid, hsq, signx, signy, signz); hipfftExecZ2Z(plan, reinterpret_cast<hipfftDoubleComplex *>(Kc), reinterpret_cast<hipfftDoubleComplex *>(Kc), HIPFFT_FORWARD); hipfftExecZ2Z(plan_rhs, reinterpret_cast<hipfftDoubleComplex *>(Xc), reinterpret_cast<hipfftDoubleComplex *>(Xc), HIPFFT_FORWARD); hipLaunchKernelGGL(( ComplexPointwiseMulAndScale), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, Kc, n1 * n2*n3, nVec); hipfftExecZ2Z(plan_rhs, reinterpret_cast<hipfftDoubleComplex *>(Xc), reinterpret_cast<hipfftDoubleComplex *>(Xc), HIPFFT_BACKWARD); if(signx==-1 || signy==-1 || signz==-1){ hipLaunchKernelGGL(( normalizeInverse3D), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, n1, n2, n3, nVec, signx, signy, signz);} hipLaunchKernelGGL(( addToPhiGrid), dim3(Blocks), dim3(Threads), 0, streamRep, Xc, PhiGrid, n1 * n2 * n3 * nVec, (0.125 / (n1 * n2 * n3))); } void conv3dnopadcuda(double *const PhiGrid, const double *const VGrid, const double h, uint32_t *const nGridDims, const uint32_t nVec, const uint32_t nDim,hipfftHandle &plan, hipfftHandle &plan_rhs, ComplexD *Kc, ComplexD *Xc) { double hsq = h * h; // find the size of the last dimension in FFTW (add padding) uint32_t n1 = nGridDims[0]; uint32_t n2 = nGridDims[1]; uint32_t n3 = nGridDims[2]; // ============================== EVEN-EVEN-EVEN term3D(Kc, Xc, n1, n2, n3, nVec, VGrid, PhiGrid, hsq, plan, plan_rhs, 1, 1,1); // ============================== ODD-EVEN-EVEN term3D(Kc, Xc, n1, n2, n3, nVec, VGrid, PhiGrid, hsq, plan, plan_rhs, -1, 1, 1); // ============================== EVEN-ODD-EVEN term3D(Kc, Xc, n1, n2, n3, nVec, VGrid, PhiGrid, hsq, plan, plan_rhs, 1, -1, 1); // ============================== ODD-ODD-EVEN term3D(Kc, Xc, n1, n2, n3, nVec, VGrid, PhiGrid, hsq, plan, plan_rhs, -1, -1, 1); // ============================== EVEN-EVEN-ODD term3D(Kc, Xc, n1, n2, n3, nVec, VGrid, PhiGrid, hsq, plan, plan_rhs, 1, 1, -1); // ============================== EVEN-ODD-EVEN term3D(Kc, Xc, n1, n2, n3, nVec, VGrid, PhiGrid, hsq, plan, plan_rhs, -1, 1, -1); // ============================== EVEN-ODD-ODD term3D(Kc, Xc, n1, n2, n3, nVec, VGrid, PhiGrid, hsq, plan, plan_rhs, 1, -1, -1); // ============================== ODD-ODD-ODD term3D(Kc, Xc, n1, n2, n3, nVec, VGrid, PhiGrid, hsq, plan, plan_rhs, -1, -1, -1); }
818aa17c20349d60c8774008f1182074daaae23c.cu
#include "matrix_indexing.hpp" #include "non_periodic_convD.cuh" #include "utils_cuda.cuh" extern cudaStream_t streamRep; #define idx2(i, j, d) (SUB2IND2D(i, j, d)) #define idx3(i, j, k, d1, d2) (SUB2IND3D(i, j, k, d1, d2)) #define idx4(i, j, k, l, m, n, o) (SUB2IND4D(i, j, k, l, m, n, o)) #define CUDART_PI_D acos(-1.0) extern int Blocks; extern int Threads; // Complex pointwise multiplication static __global__ void ComplexPointwiseMulAndScale(ComplexD *a, const ComplexD *b, int size, uint32_t nVec) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int j = 0; j < nVec; j++) { for (int i = threadID; i < size; i += numThreads) { a[i + j * size] = ComplexScale(ComplexMul(a[i + j * size], b[i]), 1.0f); } } } __global__ void setDataFft1D(ComplexD *Kc, ComplexD *Xc, int ng, int nVec, double *VGrid, double hsq, int sign) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int i = threadID; i < ng; i += numThreads) { Kc[i].x = kernel1d(hsq, i); Kc[i].y=0; if (i > 0) { Kc[i].x = Kc[i].x + sign * kernel1d(hsq, ng - i); if (sign == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * i / (2 * ng); Kc[i] = ComplexMul(Kc[i], my_cexpf(arg)); } } for (int j = 0; j < nVec; j++) { Xc[i + j * ng].x = VGrid[i + j * ng]; Xc[i + j * ng].y=0; if (sign == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * i / (2 * ng); Xc[i + j * ng] = ComplexMul(Xc[i + j * ng], my_cexpf(arg)); } } } } __global__ void setDataFft2D(ComplexD *Kc, ComplexD *Xc, int n1, int n2, int nVec, const double *const VGrid, double hsq, int signx, int signy) { register int i, j; for (register uint32_t TID = blockIdx.x * blockDim.x + threadIdx.x; TID < n1 * n2; TID += blockDim.x * gridDim.x) { i = TID % n1; j = (TID / n1); Kc[idx2(i, j, n1)].x = kernel2d(hsq, i, j); Kc[idx2(i, j, n1)].y = 0; if (i > 0) { Kc[idx2(i, j, n1)].x += signx * kernel2d(hsq, n1 - i, j); } if (j > 0) { Kc[idx2(i, j, n1)].x += signy * kernel2d(hsq, i, n2 - j); } if (i > 0 && j > 0) { Kc[idx2(i, j, n1)].x += signx * signy * kernel2d(hsq, n1 - i, n2 - j); } for (uint32_t iVec = 0; iVec < nVec; iVec++) { Xc[idx3(i, j, iVec, n1, n2)].x = VGrid[idx3(i, j, iVec, n1, n2)]; Xc[idx3(i, j, iVec, n1, n2)].y = 0; if (signx == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * i / (2 * n1); Xc[idx3(i, j, iVec, n1, n2)] = ComplexMul(Xc[idx3(i, j, iVec, n1, n2)], my_cexpf(arg)); } if (signy == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * j / (2 * n2); Xc[idx3(i, j, iVec, n1, n2)] = ComplexMul(Xc[idx3(i, j, iVec, n1, n2)], my_cexpf(arg)); } } if (signx == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * i / (2 * n1); Kc[idx2(i, j, n1)] = ComplexMul(Kc[idx2(i, j, n1)], my_cexpf(arg)); } if (signy == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * j / (2 * n2); Kc[idx2(i, j, n1)] = ComplexMul(Kc[idx2(i, j, n1)], my_cexpf(arg)); } } } __global__ void addToPhiGrid(ComplexD *Xc, double *PhiGrid, int ng, double scale) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int i = threadID; i < ng; i += numThreads) { PhiGrid[i] += scale * Xc[i].x; } } __global__ void normalizeInverse(ComplexD *Xc, int ng, uint32_t nVec) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int i = threadID; i < ng; i += numThreads) { for (uint32_t iVec = 0; iVec < nVec; iVec++) { ComplexD arg; arg.x = 0; arg.y = +2 * CUDART_PI_D * i / (2 * ng); Xc[i + iVec * ng] = ComplexMul(Xc[i + iVec * ng], my_cexpf(arg)); } } } __global__ void normalizeInverse2D(ComplexD *Xc, uint32_t n1, uint32_t n2, uint32_t nVec, int signx, int signy) { register int i, j; for (register uint32_t TID = blockIdx.x * blockDim.x + threadIdx.x; TID < n1 * n2; TID += blockDim.x * gridDim.x) { i = TID % n1; j = (TID / n1); for (uint32_t iVec = 0; iVec < nVec; iVec++) { if (signx == -1) { ComplexD arg; arg.x = 0; arg.y = +2 * CUDART_PI_D * i / (2 * n1); Xc[idx3(i, j, iVec, n1, n2)] = ComplexMul(Xc[idx3(i, j, iVec, n1, n2)], my_cexpf(arg)); } if (signy == -1) { ComplexD arg; arg.x = 0; arg.y = +2 * CUDART_PI_D * j / (2 * n2); Xc[idx3(i, j, iVec, n1, n2)] = ComplexMul(Xc[idx3(i, j, iVec, n1, n2)], my_cexpf(arg)); } } } } void conv1dnopadcuda(double *PhiGrid, double *VGrid, double h, uint32_t *const nGridDims, uint32_t nVec, int nDim,cufftHandle &plan, cufftHandle &plan_rhs, ComplexD *Kc, ComplexD *Xc) { uint32_t n1 = nGridDims[0]; double hsq = h * h; /*even*/ setDataFft1D<<<Blocks, Threads, 0, streamRep>>>(Kc, Xc, n1, nVec, VGrid, hsq, 1); //cudaDeviceSynchronize(streamRep); cufftExecZ2Z(plan, reinterpret_cast<cufftDoubleComplex *>(Kc), reinterpret_cast<cufftDoubleComplex *>(Kc), CUFFT_FORWARD); cufftExecZ2Z(plan_rhs, reinterpret_cast<cufftDoubleComplex *>(Xc), reinterpret_cast<cufftDoubleComplex *>(Xc), CUFFT_FORWARD); ComplexPointwiseMulAndScale<<<Blocks, Threads, 0, streamRep>>>(Xc, Kc, n1, nVec); cufftExecZ2Z(plan_rhs, reinterpret_cast<cufftDoubleComplex *>(Xc), reinterpret_cast<cufftDoubleComplex *>(Xc), CUFFT_INVERSE); addToPhiGrid<<<Blocks, Threads, 0, streamRep>>>(Xc, PhiGrid, n1 * nVec, (0.5 / n1)); //cudaDeviceSynchronize(streamRep); setDataFft1D<<<Blocks, Threads, 0, streamRep>>>(Kc, Xc, n1, nVec, VGrid, hsq, -1); cufftExecZ2Z(plan, reinterpret_cast<cufftDoubleComplex *>(Kc), reinterpret_cast<cufftDoubleComplex *>(Kc), CUFFT_FORWARD); cufftExecZ2Z(plan_rhs, reinterpret_cast<cufftDoubleComplex *>(Xc), reinterpret_cast<cufftDoubleComplex *>(Xc), CUFFT_FORWARD); ComplexPointwiseMulAndScale<<<Blocks, Threads, 0, streamRep>>>(Xc, Kc, n1, nVec); cufftExecZ2Z(plan_rhs, reinterpret_cast<cufftDoubleComplex *>(Xc), reinterpret_cast<cufftDoubleComplex *>(Xc), CUFFT_INVERSE); normalizeInverse<<<Blocks, Threads, 0, streamRep>>>(Xc, n1, nVec); addToPhiGrid<<<Blocks, Threads, 0, streamRep>>>(Xc, PhiGrid, n1 * nVec, (0.5 / n1)); // cudaDeviceSynchronize(streamRep); return; } void conv2dnopadcuda(double *const PhiGrid, const double *const VGrid, const double h, uint32_t *const nGridDims, const uint32_t nVec, const uint32_t nDim,cufftHandle &plan, cufftHandle &plan_rhs, ComplexD *Kc, ComplexD* Xc) { double hsq = h * h; // find the size of the last dimension in FFTW (add padding) uint32_t n1 = nGridDims[0]; uint32_t n2 = nGridDims[1]; // ============================== EVEN-EVEN setDataFft2D<<<Blocks, Threads, 0, streamRep>>>(Kc, Xc, n1, n2, nVec, VGrid, hsq, 1, 1); cufftExecZ2Z(plan, reinterpret_cast<cufftDoubleComplex *>(Kc), reinterpret_cast<cufftDoubleComplex *>(Kc), CUFFT_FORWARD); cufftExecZ2Z(plan_rhs, reinterpret_cast<cufftDoubleComplex *>(Xc), reinterpret_cast<cufftDoubleComplex *>(Xc), CUFFT_FORWARD); ComplexPointwiseMulAndScale<<<Blocks, Threads, 0, streamRep>>>(Xc, Kc, n1 * n2, nVec); cufftExecZ2Z(plan_rhs, reinterpret_cast<cufftDoubleComplex *>(Xc), reinterpret_cast<cufftDoubleComplex *>(Xc), CUFFT_INVERSE); addToPhiGrid<<<Blocks, Threads, 0, streamRep>>>(Xc, PhiGrid, n1 * n2 * nVec, (0.25 / (n1 * n2))); // ============================== ODD-EVEN setDataFft2D<<<Blocks, Threads, 0, streamRep>>>(Kc, Xc, n1, n2, nVec, VGrid, hsq, -1, 1); cufftExecZ2Z(plan, reinterpret_cast<cufftDoubleComplex *>(Kc), reinterpret_cast<cufftDoubleComplex *>(Kc), CUFFT_FORWARD); cufftExecZ2Z(plan_rhs, reinterpret_cast<cufftDoubleComplex *>(Xc), reinterpret_cast<cufftDoubleComplex *>(Xc), CUFFT_FORWARD); ComplexPointwiseMulAndScale<<<Blocks, Threads, 0, streamRep>>>(Xc, Kc, n1 * n2, nVec); cufftExecZ2Z(plan_rhs, reinterpret_cast<cufftDoubleComplex *>(Xc), reinterpret_cast<cufftDoubleComplex *>(Xc), CUFFT_INVERSE); normalizeInverse2D<<<Blocks, Threads, 0, streamRep>>>(Xc, n1, n2, nVec, -1, 1); addToPhiGrid<<<Blocks, Threads, 0, streamRep>>>(Xc, PhiGrid, n1 * n2 * nVec, (0.25 / (n1 * n2))); // ============================== EVEN-ODD setDataFft2D<<<Blocks, Threads, 0, streamRep>>>(Kc, Xc, n1, n2, nVec, VGrid, hsq, 1, -1); cufftExecZ2Z(plan, reinterpret_cast<cufftDoubleComplex *>(Kc), reinterpret_cast<cufftDoubleComplex *>(Kc), CUFFT_FORWARD); cufftExecZ2Z(plan_rhs, reinterpret_cast<cufftDoubleComplex *>(Xc), reinterpret_cast<cufftDoubleComplex *>(Xc), CUFFT_FORWARD); ComplexPointwiseMulAndScale<<<Blocks, Threads, 0, streamRep>>>(Xc, Kc, n1 * n2, nVec); cufftExecZ2Z(plan_rhs, reinterpret_cast<cufftDoubleComplex *>(Xc), reinterpret_cast<cufftDoubleComplex *>(Xc), CUFFT_INVERSE); normalizeInverse2D<<<Blocks, Threads, 0, streamRep>>>(Xc, n1, n2, nVec, 1, -1); addToPhiGrid<<<Blocks, Threads, 0, streamRep>>>(Xc, PhiGrid, n1 * n2 * nVec, (0.25 / (n1 * n2))); // ============================== ODD-ODD setDataFft2D<<<Blocks, Threads, 0, streamRep>>>(Kc, Xc, n1, n2, nVec, VGrid, hsq, -1, -1); cufftExecZ2Z(plan, reinterpret_cast<cufftDoubleComplex *>(Kc), reinterpret_cast<cufftDoubleComplex *>(Kc), CUFFT_FORWARD); cufftExecZ2Z(plan_rhs, reinterpret_cast<cufftDoubleComplex *>(Xc), reinterpret_cast<cufftDoubleComplex *>(Xc), CUFFT_FORWARD); ComplexPointwiseMulAndScale<<<Blocks, Threads, 0, streamRep>>>(Xc, Kc, n1 * n2, nVec); cufftExecZ2Z(plan_rhs, reinterpret_cast<cufftDoubleComplex *>(Xc), reinterpret_cast<cufftDoubleComplex *>(Xc), CUFFT_INVERSE); normalizeInverse2D<<<Blocks, Threads, 0, streamRep>>>(Xc, n1, n2, nVec, -1, -1); addToPhiGrid<<<Blocks, Threads, 0, streamRep>>>(Xc, PhiGrid, n1 * n2 * nVec, (0.25 / (n1 * n2))); //cudaDeviceSynchronize(); return; } __global__ void setDataFft3D(ComplexD *Kc, ComplexD *Xc, int n1, int n2, int n3, int nVec, const double *const VGrid, double hsq, int signx, int signy, int signz) { register int i, j, k; register ComplexD K, X; for (register uint32_t TID = blockIdx.x * blockDim.x + threadIdx.x; TID < n1 * n2 * n3; TID += blockDim.x * gridDim.x) { i = TID % n1; j = (TID / n1) % n2; k = (TID / n1) / n2; K.x = kernel3d(hsq, i, j, k); K.y = 0; if (i > 0) { K.x += signx * kernel3d(hsq, n1 - i, j, k); } if (j > 0) { K.x += signy * kernel3d(hsq, i, n2 - j, k); } if (i > 0 && j > 0) { K.x += signx * signy * kernel3d(hsq, n1 - i, n2 - j, k); } if (k > 0) { K.x += signz * kernel3d(hsq, i, j, n3 - k); } if (k > 0 && i > 0) { K.x += signx * signz * kernel3d(hsq, n1 - i, j, n3 - k); } if (k > 0 && j > 0) { K.x += signy * signz * kernel3d(hsq, i, n2 - j, n3 - k); } if (k > 0 && i > 0 && j > 0) { K.x += signx * signy * signz * kernel3d(hsq, n1 - i, n2 - j, n3 - k); } for (uint32_t iVec = 0; iVec < nVec; iVec++) { X.x = VGrid[idx4(i, j, k, iVec, n1, n2, n3)]; X.y = 0; if (signx == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * i / (2 * n1); X = ComplexMul(X, my_cexpf(arg)); } if (signy == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * j / (2 * n2); X = ComplexMul(X, my_cexpf(arg)); } if (signz == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * k / (2 * n3); X = ComplexMul(X, my_cexpf(arg)); } Xc[idx4(i, j, k, iVec, n1, n2, n3)] = X; } if (signx == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * i / (2 * n1); K = ComplexMul(K, my_cexpf(arg)); } if (signy == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * j / (2 * n2); K = ComplexMul(K, my_cexpf(arg)); } if (signz == -1) { ComplexD arg; arg.x = 0; arg.y = -2 * CUDART_PI_D * k / (2 * n3); K = ComplexMul(K, my_cexpf(arg)); } Kc[idx3(i, j, k, n1, n2)] = K; } } __global__ void normalizeInverse3D(ComplexD *Xc, uint32_t n1, uint32_t n2, uint32_t n3, uint32_t nVec, int signx, int signy, int signz) { register int i, j, k; for (register uint32_t TID = blockIdx.x * blockDim.x + threadIdx.x; TID < n1 * n2 * n3; TID += blockDim.x * gridDim.x) { i = TID % n1; j = (TID / n1) % n2; k = (TID / n1) / n2; for (uint32_t iVec = 0; iVec < nVec; iVec++) { if (signx == -1) { ComplexD arg; arg.x = 0; arg.y = +2 * CUDART_PI_D * i / (2 * n1); Xc[idx4(i, j, k, iVec, n1, n2, n3)] = ComplexMul(Xc[idx4(i, j, k, iVec, n1, n2, n3)], my_cexpf(arg)); } if (signy == -1) { ComplexD arg; arg.x = 0; arg.y = +2 * CUDART_PI_D * j / (2 * n2); Xc[idx4(i, j, k, iVec, n1, n2, n3)] = ComplexMul(Xc[idx4(i, j, k, iVec, n1, n2, n3)], my_cexpf(arg)); } if (signz == -1) { ComplexD arg; arg.x = 0; arg.y = +2 * CUDART_PI_D * k / (2 * n3); Xc[idx4(i, j, k, iVec, n1, n2, n3)] = ComplexMul(Xc[idx4(i, j, k, iVec, n1, n2, n3)], my_cexpf(arg)); } } } } void term3D(ComplexD *Kc, ComplexD *Xc, uint32_t n1, uint32_t n2, uint32_t n3, uint32_t nVec, const double *const VGrid, double *PhiGrid, double hsq, cufftHandle plan, cufftHandle plan_rhs, int signx, int signy, int signz) { setDataFft3D<<<Blocks, Threads, 0, streamRep>>>(Kc, Xc, n1, n2, n3, nVec, VGrid, hsq, signx, signy, signz); cufftExecZ2Z(plan, reinterpret_cast<cufftDoubleComplex *>(Kc), reinterpret_cast<cufftDoubleComplex *>(Kc), CUFFT_FORWARD); cufftExecZ2Z(plan_rhs, reinterpret_cast<cufftDoubleComplex *>(Xc), reinterpret_cast<cufftDoubleComplex *>(Xc), CUFFT_FORWARD); ComplexPointwiseMulAndScale<<<Blocks, Threads, 0, streamRep>>>(Xc, Kc, n1 * n2*n3, nVec); cufftExecZ2Z(plan_rhs, reinterpret_cast<cufftDoubleComplex *>(Xc), reinterpret_cast<cufftDoubleComplex *>(Xc), CUFFT_INVERSE); if(signx==-1 || signy==-1 || signz==-1){ normalizeInverse3D<<<Blocks, Threads, 0, streamRep>>>(Xc, n1, n2, n3, nVec, signx, signy, signz);} addToPhiGrid<<<Blocks, Threads, 0, streamRep>>>(Xc, PhiGrid, n1 * n2 * n3 * nVec, (0.125 / (n1 * n2 * n3))); } void conv3dnopadcuda(double *const PhiGrid, const double *const VGrid, const double h, uint32_t *const nGridDims, const uint32_t nVec, const uint32_t nDim,cufftHandle &plan, cufftHandle &plan_rhs, ComplexD *Kc, ComplexD *Xc) { double hsq = h * h; // find the size of the last dimension in FFTW (add padding) uint32_t n1 = nGridDims[0]; uint32_t n2 = nGridDims[1]; uint32_t n3 = nGridDims[2]; // ============================== EVEN-EVEN-EVEN term3D(Kc, Xc, n1, n2, n3, nVec, VGrid, PhiGrid, hsq, plan, plan_rhs, 1, 1,1); // ============================== ODD-EVEN-EVEN term3D(Kc, Xc, n1, n2, n3, nVec, VGrid, PhiGrid, hsq, plan, plan_rhs, -1, 1, 1); // ============================== EVEN-ODD-EVEN term3D(Kc, Xc, n1, n2, n3, nVec, VGrid, PhiGrid, hsq, plan, plan_rhs, 1, -1, 1); // ============================== ODD-ODD-EVEN term3D(Kc, Xc, n1, n2, n3, nVec, VGrid, PhiGrid, hsq, plan, plan_rhs, -1, -1, 1); // ============================== EVEN-EVEN-ODD term3D(Kc, Xc, n1, n2, n3, nVec, VGrid, PhiGrid, hsq, plan, plan_rhs, 1, 1, -1); // ============================== EVEN-ODD-EVEN term3D(Kc, Xc, n1, n2, n3, nVec, VGrid, PhiGrid, hsq, plan, plan_rhs, -1, 1, -1); // ============================== EVEN-ODD-ODD term3D(Kc, Xc, n1, n2, n3, nVec, VGrid, PhiGrid, hsq, plan, plan_rhs, 1, -1, -1); // ============================== ODD-ODD-ODD term3D(Kc, Xc, n1, n2, n3, nVec, VGrid, PhiGrid, hsq, plan, plan_rhs, -1, -1, -1); }
21f6cc6c4e2186cce0c8343296bccdd6f8816368.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void cube(float * d_out, float * d_in){ int idx = threadIdx.x; float f = d_in[dix]; d_out[idx] = f * f * f; } int main(int argc, char ** argv) { const int ARRAY_SIZE = 96; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory hipMalloc((void**) &d_in, ARRAY_BYTES); hipMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); // launch the kernel hipLaunchKernelGGL(( cube), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in); // copy back the result array to the CPU hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost); // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } hipFree(d_in); hipFree(d_out); return 0; }
21f6cc6c4e2186cce0c8343296bccdd6f8816368.cu
#include <stdio.h> __global__ void cube(float * d_out, float * d_in){ int idx = threadIdx.x; float f = d_in[dix]; d_out[idx] = f * f * f; } int main(int argc, char ** argv) { const int ARRAY_SIZE = 96; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory cudaMalloc((void**) &d_in, ARRAY_BYTES); cudaMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // launch the kernel cube<<<1, ARRAY_SIZE>>>(d_out, d_in); // copy back the result array to the CPU cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } cudaFree(d_in); cudaFree(d_out); return 0; }
37883ce940be24d5fda926001962ed0eccc3e970.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> // error checking macro #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, hipGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) const size_t N = 8ULL*1024ULL*1024ULL; // data size const int BLOCK_SIZE = 256; // CUDA maximum is 1024 __global__ void reduce(float *gdata, float *out, size_t n){ __shared__ float sdata[BLOCK_SIZE]; int tid = threadIdx.x; sdata[tid] = 0.0f; size_t idx = threadIdx.x+blockDim.x*blockIdx.x; while (idx < n) { // grid stride loop to load data sdata[tid] = max(gdata[idx], sdata[tid]); idx += gridDim.x*blockDim.x; } for (unsigned int s=blockDim.x/2; s>0; s>>=1) { __syncthreads(); if (tid < s) // parallel sweep reduction sdata[tid] = max(sdata[tid + s], sdata[tid]); } if (tid == 0) out[blockIdx.x] = sdata[0]; } int main(){ float *h_A, *h_sum, *d_A, *d_sums; const int blocks = 640; h_A = new float[N]; // allocate space for data in host memory h_sum = new float; float max_val = 5.0f; for (size_t i = 0; i < N; i++) // initialize matrix in host memory h_A[i] = 1.0f; h_A[100] = max_val; hipMalloc(&d_A, N*sizeof(float)); // allocate device space for A hipMalloc(&d_sums, blocks*sizeof(float)); // allocate device space for partial sums cudaCheckErrors("hipMalloc failure"); // error checking // copy matrix A to device: hipMemcpy(d_A, h_A, N*sizeof(float), hipMemcpyHostToDevice); cudaCheckErrors("hipMemcpy H2D failure"); //cuda processing sequence step 1 is complete hipLaunchKernelGGL(( reduce), dim3(blocks), dim3(BLOCK_SIZE), 0, 0, d_A, d_sums, N); // reduce stage 1 cudaCheckErrors("reduction kernel launch failure"); hipLaunchKernelGGL(( reduce), dim3(1), dim3(BLOCK_SIZE), 0, 0, d_sums, d_A, blocks); // reduce stage 2 cudaCheckErrors("reduction kernel launch failure"); //cuda processing sequence step 2 is complete // copy vector sums from device to host: hipMemcpy(h_sum, d_A, sizeof(float), hipMemcpyDeviceToHost); //cuda processing sequence step 3 is complete cudaCheckErrors("reduction w/atomic kernel execution failure or hipMemcpy D2H failure"); printf("reduction output: %f, expected sum reduction output: %f, expected max reduction output: %f\n", *h_sum, (float)((N-1)+max_val), max_val); return 0; }
37883ce940be24d5fda926001962ed0eccc3e970.cu
#include <stdio.h> // error checking macro #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) const size_t N = 8ULL*1024ULL*1024ULL; // data size const int BLOCK_SIZE = 256; // CUDA maximum is 1024 __global__ void reduce(float *gdata, float *out, size_t n){ __shared__ float sdata[BLOCK_SIZE]; int tid = threadIdx.x; sdata[tid] = 0.0f; size_t idx = threadIdx.x+blockDim.x*blockIdx.x; while (idx < n) { // grid stride loop to load data sdata[tid] = max(gdata[idx], sdata[tid]); idx += gridDim.x*blockDim.x; } for (unsigned int s=blockDim.x/2; s>0; s>>=1) { __syncthreads(); if (tid < s) // parallel sweep reduction sdata[tid] = max(sdata[tid + s], sdata[tid]); } if (tid == 0) out[blockIdx.x] = sdata[0]; } int main(){ float *h_A, *h_sum, *d_A, *d_sums; const int blocks = 640; h_A = new float[N]; // allocate space for data in host memory h_sum = new float; float max_val = 5.0f; for (size_t i = 0; i < N; i++) // initialize matrix in host memory h_A[i] = 1.0f; h_A[100] = max_val; cudaMalloc(&d_A, N*sizeof(float)); // allocate device space for A cudaMalloc(&d_sums, blocks*sizeof(float)); // allocate device space for partial sums cudaCheckErrors("cudaMalloc failure"); // error checking // copy matrix A to device: cudaMemcpy(d_A, h_A, N*sizeof(float), cudaMemcpyHostToDevice); cudaCheckErrors("cudaMemcpy H2D failure"); //cuda processing sequence step 1 is complete reduce<<<blocks, BLOCK_SIZE>>>(d_A, d_sums, N); // reduce stage 1 cudaCheckErrors("reduction kernel launch failure"); reduce<<<1, BLOCK_SIZE>>>(d_sums, d_A, blocks); // reduce stage 2 cudaCheckErrors("reduction kernel launch failure"); //cuda processing sequence step 2 is complete // copy vector sums from device to host: cudaMemcpy(h_sum, d_A, sizeof(float), cudaMemcpyDeviceToHost); //cuda processing sequence step 3 is complete cudaCheckErrors("reduction w/atomic kernel execution failure or cudaMemcpy D2H failure"); printf("reduction output: %f, expected sum reduction output: %f, expected max reduction output: %f\n", *h_sum, (float)((N-1)+max_val), max_val); return 0; }
100874a1a8689bed875423a0c3ce1ac0a99f9b93.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdint.h> #include <hip/hip_runtime.h> #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/unpack.cuh" #include "include/hip/hip_fp16.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" template <typename T> __global__ void Unpack(const size_t size, const size_t output_num, const size_t dims_after_axis, T** outputs, const T* input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) { size_t cur_input_index = pos / dims_after_axis % output_num; size_t cycle_len = output_num * dims_after_axis; size_t local_index = pos / cycle_len * dims_after_axis + pos % cycle_len % dims_after_axis; outputs[cur_input_index][local_index] = input[pos]; } return; } template <typename T> void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, T** outputs, const T* input, hipStream_t cuda_stream) { hipLaunchKernelGGL(( Unpack), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, output_num, dims_after_axis, outputs, input); return; } template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, int8_t** outputs, const int8_t* input, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, int16_t** outputs, const int16_t* input, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, int** outputs, const int* input, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, int64_t** outputs, const int64_t* input, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, uint8_t** outputs, const uint8_t* input, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, uint16_t** outputs, const uint16_t* input, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, uint32_t** outputs, const uint32_t* input, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, uint64_t** outputs, const uint64_t* input, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, half** outputs, const half* input, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, float** outputs, const float* input, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, bool** outputs, const bool* input, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, double** outputs, const double* input, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, Complex<float> **outputs, const Complex<float> *input, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, Complex<double> **outputs, const Complex<double> *input, hipStream_t cuda_stream);
100874a1a8689bed875423a0c3ce1ac0a99f9b93.cu
/** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdint.h> #include <cuda_runtime.h> #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/unpack.cuh" #include "include/cuda_fp16.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" template <typename T> __global__ void Unpack(const size_t size, const size_t output_num, const size_t dims_after_axis, T** outputs, const T* input) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) { size_t cur_input_index = pos / dims_after_axis % output_num; size_t cycle_len = output_num * dims_after_axis; size_t local_index = pos / cycle_len * dims_after_axis + pos % cycle_len % dims_after_axis; outputs[cur_input_index][local_index] = input[pos]; } return; } template <typename T> void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, T** outputs, const T* input, cudaStream_t cuda_stream) { Unpack<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, output_num, dims_after_axis, outputs, input); return; } template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, int8_t** outputs, const int8_t* input, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, int16_t** outputs, const int16_t* input, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, int** outputs, const int* input, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, int64_t** outputs, const int64_t* input, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, uint8_t** outputs, const uint8_t* input, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, uint16_t** outputs, const uint16_t* input, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, uint32_t** outputs, const uint32_t* input, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, uint64_t** outputs, const uint64_t* input, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, half** outputs, const half* input, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, float** outputs, const float* input, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, bool** outputs, const bool* input, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, double** outputs, const double* input, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, Complex<float> **outputs, const Complex<float> *input, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void UnpackKernel(const size_t size, const size_t output_num, const size_t dims_after_axis, Complex<double> **outputs, const Complex<double> *input, cudaStream_t cuda_stream);
9ef4efbd2529167e0e465c27a440b91b0e0731f5.hip
// !!! This is a file automatically generated by hipify!!! // @author: Sonu Gupta // @purpose: CUDA functions which runs on GPU // 'Extended version' (Ex) is used' #ifndef __CUDA_FUNCS_C__ #define __CUDA_FUNCS_C__ #include <iostream> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include "cuda_functions.h" // Extended version // // @purpose: This function takes whole image and all input_weights performs computation on GPU and writes a result . // The result is then given back to host where host copies and sends to another layer. __global__ void forward_matrixmulEx(int limit, double* input, double* weight, double* out, double* bias, double* dropped, bool isReLu, int depth, int height, int width) { //int id = threadIdx.x; int id = threadIdx.x + blockIdx.x * blockDim.x; //todo: sanity check here if(id < limit) { double* curr_weight = weight + (id * depth * height * width); float res = 0.0f; /* for(int i = 0; i<height; i++) { for(int j = 0; j <width; j++) { res += a[width*i+j] + m[j*width + i]; } } */ for(int i = 0; i< depth * height * width; i++) res += input[i] * curr_weight[i]; out[id] = res; out[id] += bias[id]; if(isReLu) { out[id] = (out[id] > 0.0) ? out[id] : 0.0; // Max doesnt work here } out[id] *= dropped[id]; } } // // @purpose: This function updates the weight matrix and biases all in one and then copied back to host // __global__ void update_weightmatrixEx(int limit, double* weight, double* weight_deriv, double* bias, double* bias_deriv, int depth, int height, int width, float rate) { // resulting matrix will be awidth * bheight //int index = threadIdx.x; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < limit) { //todo: sanity check here double* aa = weight + (index * depth * height * width); double* bb = weight_deriv + (index * depth * height * width); //compute and reset deriivative matrix to zero for(int i = 0; i< depth * height * width; i++) { aa[i] = aa[i] - rate * bb[i]; bb[i] = 0; } bias[index] -= rate * bias_deriv[index]; bias_deriv[index] = 0; } } // @purpose: backprop just backpropogates all 'weight_matrices'. Here, single node updates weights in one go. // // __global__ void backprop_weightmatrixEx(int limit, double* input_device, double* downstream_deriv_device, double* current_kept_device, double* upstream_deriv_device, \ double* weight_device, double* weight_deriv_device, double* bias_deriv_device, double* output, bool is_relu, int depth, int height, int width, double mb_size) { // int index = threadIdx.x; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < limit) { if(current_kept_device[index] > 0) { if (!is_relu || output[index] > 0) { double* t_weight_device = weight_device + (index * depth * height * width); double* t_weight_deriv_device = weight_deriv_device + (index * depth * height * width); double* t_downstream_deriv_device = downstream_deriv_device + (index * depth * height * width); for(int i = 0; i< depth * height * width; i++) { t_downstream_deriv_device[i] = current_kept_device[index] * upstream_deriv_device[index] * t_weight_device[i]; t_weight_deriv_device[i] += (current_kept_device[index] * upstream_deriv_device[index] * input_device[i])/mb_size; } bias_deriv_device[index] += (current_kept_device[index] * upstream_deriv_device[index])/mb_size; } } } } // OLD versins: No more used // These can be called in for loops. Initially I started with it. However, I kept them because I want to try running them with OpenMP. // todo: Integrate with OpenMP. __global__ void forward_matrixmul(double* a, double* b, double *c, int width) { // resulting matrix will be awidth * bheight int row = blockIdx.y * blockDim.y + threadIdx.y; int column = blockIdx.x * blockDim.x + threadIdx.x; //todo: sanity check here int index = row * width + column; c[index] = a[index] * b[index]; } __global__ void update_weightmatrix(double* a, double* b, int width, float rate) { // resulting matrix will be awidth * bheight int row = blockIdx.y * blockDim.y + threadIdx.y; int column = blockIdx.x * blockDim.x + threadIdx.x; //todo: sanity check here int index = row * width + column; //compute and reset deriivative matrix to zero a[index] = a[index] - rate * b[index]; b[index] = 0; } __global__ void backprop_weightmatrix(double* input_device, double* downstream_deriv_device, double current_kept_device, double upstream_deriv_device, \ double* weight_device, double* weight_deriv_device, int width, double mb_size) { // resulting matrix will be awidth * bheight int row = blockIdx.y * blockDim.y + threadIdx.y; int column = blockIdx.x * blockDim.x + threadIdx.x; //todo: sanity check here int index = row * width + column; //todo: compute common term once // downstream_deriv_device[index] = downstream_deriv_device[index] + current_kept_device * upstream_deriv_device * weight_device[index]; weight_deriv_device[index] = weight_deriv_device[index] + (current_kept_device * upstream_deriv_device * input_device[index])/mb_size; } #endif
9ef4efbd2529167e0e465c27a440b91b0e0731f5.cu
// @author: Sonu Gupta // @purpose: CUDA functions which runs on GPU // 'Extended version' (Ex) is used' #ifndef __CUDA_FUNCS_C__ #define __CUDA_FUNCS_C__ #include <iostream> #include <cuda_runtime_api.h> #include <cuda.h> #include "cuda_functions.h" // Extended version // // @purpose: This function takes whole image and all input_weights performs computation on GPU and writes a result . // The result is then given back to host where host copies and sends to another layer. __global__ void forward_matrixmulEx(int limit, double* input, double* weight, double* out, double* bias, double* dropped, bool isReLu, int depth, int height, int width) { //int id = threadIdx.x; int id = threadIdx.x + blockIdx.x * blockDim.x; //todo: sanity check here if(id < limit) { double* curr_weight = weight + (id * depth * height * width); float res = 0.0f; /* for(int i = 0; i<height; i++) { for(int j = 0; j <width; j++) { res += a[width*i+j] + m[j*width + i]; } } */ for(int i = 0; i< depth * height * width; i++) res += input[i] * curr_weight[i]; out[id] = res; out[id] += bias[id]; if(isReLu) { out[id] = (out[id] > 0.0) ? out[id] : 0.0; // Max doesnt work here } out[id] *= dropped[id]; } } // // @purpose: This function updates the weight matrix and biases all in one and then copied back to host // __global__ void update_weightmatrixEx(int limit, double* weight, double* weight_deriv, double* bias, double* bias_deriv, int depth, int height, int width, float rate) { // resulting matrix will be awidth * bheight //int index = threadIdx.x; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < limit) { //todo: sanity check here double* aa = weight + (index * depth * height * width); double* bb = weight_deriv + (index * depth * height * width); //compute and reset deriivative matrix to zero for(int i = 0; i< depth * height * width; i++) { aa[i] = aa[i] - rate * bb[i]; bb[i] = 0; } bias[index] -= rate * bias_deriv[index]; bias_deriv[index] = 0; } } // @purpose: backprop just backpropogates all 'weight_matrices'. Here, single node updates weights in one go. // // __global__ void backprop_weightmatrixEx(int limit, double* input_device, double* downstream_deriv_device, double* current_kept_device, double* upstream_deriv_device, \ double* weight_device, double* weight_deriv_device, double* bias_deriv_device, double* output, bool is_relu, int depth, int height, int width, double mb_size) { // int index = threadIdx.x; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < limit) { if(current_kept_device[index] > 0) { if (!is_relu || output[index] > 0) { double* t_weight_device = weight_device + (index * depth * height * width); double* t_weight_deriv_device = weight_deriv_device + (index * depth * height * width); double* t_downstream_deriv_device = downstream_deriv_device + (index * depth * height * width); for(int i = 0; i< depth * height * width; i++) { t_downstream_deriv_device[i] = current_kept_device[index] * upstream_deriv_device[index] * t_weight_device[i]; t_weight_deriv_device[i] += (current_kept_device[index] * upstream_deriv_device[index] * input_device[i])/mb_size; } bias_deriv_device[index] += (current_kept_device[index] * upstream_deriv_device[index])/mb_size; } } } } // OLD versins: No more used // These can be called in for loops. Initially I started with it. However, I kept them because I want to try running them with OpenMP. // todo: Integrate with OpenMP. __global__ void forward_matrixmul(double* a, double* b, double *c, int width) { // resulting matrix will be awidth * bheight int row = blockIdx.y * blockDim.y + threadIdx.y; int column = blockIdx.x * blockDim.x + threadIdx.x; //todo: sanity check here int index = row * width + column; c[index] = a[index] * b[index]; } __global__ void update_weightmatrix(double* a, double* b, int width, float rate) { // resulting matrix will be awidth * bheight int row = blockIdx.y * blockDim.y + threadIdx.y; int column = blockIdx.x * blockDim.x + threadIdx.x; //todo: sanity check here int index = row * width + column; //compute and reset deriivative matrix to zero a[index] = a[index] - rate * b[index]; b[index] = 0; } __global__ void backprop_weightmatrix(double* input_device, double* downstream_deriv_device, double current_kept_device, double upstream_deriv_device, \ double* weight_device, double* weight_deriv_device, int width, double mb_size) { // resulting matrix will be awidth * bheight int row = blockIdx.y * blockDim.y + threadIdx.y; int column = blockIdx.x * blockDim.x + threadIdx.x; //todo: sanity check here int index = row * width + column; //todo: compute common term once // downstream_deriv_device[index] = downstream_deriv_device[index] + current_kept_device * upstream_deriv_device * weight_device[index]; weight_deriv_device[index] = weight_deriv_device[index] + (current_kept_device * upstream_deriv_device * input_device[index])/mb_size; } #endif