hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
26c17aaf3220b42824608543bff440b03667f698.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at // the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights // reserved. See files LICENSE and NOTICE for details. // // This file is part of CEED, a collection of benchmarks, miniapps, software // libraries and APIs for efficient high-order finite element and spectral // element discretizations for exascale applications. For more information and // source code availability see http://github.com/ceed. // // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, // a collaborative effort of two U.S. Department of Energy organizations (Office // of Science and the National Nuclear Security Administration) responsible for // the planning and preparation of a capable exascale ecosystem, including // software, applications, hardware, advanced system engineering and early // testbed platforms, in support of the nation's exascale computing imperative. /// A structure used to pass additional data to f_build_diff and f_apply_diff struct BuildContext { CeedInt dim, space_dim; }; /// libCEED Q-function for building quadrature data for a diffusion operator extern "C" __global__ void f_build_diff(void *ctx, CeedInt Q, Fields_Cuda fields) { BuildContext *bc = (BuildContext*)ctx; // in[0] is Jacobians with shape [dim, nc=dim, Q] // in[1] is quadrature weights, size (Q) // // At every quadrature point, compute qw/det(J).adj(J).adj(J)^T and store // the symmetric part of the result. const CeedScalar *J = (const CeedScalar *)fields.inputs[0]; const CeedScalar *qw = (const CeedScalar *)fields.inputs[1]; CeedScalar *qd = fields.outputs[0]; switch (bc->dim + 10*bc->space_dim) { case 11: for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < Q; i += blockDim.x * gridDim.x) { qd[i] = qw[i] / J[i]; } break; case 22: for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < Q; i += blockDim.x * gridDim.x) { // J: 0 2 qd: 0 1 adj(J): J22 -J12 // 1 3 1 2 -J21 J11 const CeedScalar J11 = J[i+Q*0]; const CeedScalar J21 = J[i+Q*1]; const CeedScalar J12 = J[i+Q*2]; const CeedScalar J22 = J[i+Q*3]; const CeedScalar w = qw[i] / (J11*J22 - J21*J12); qd[i+Q*0] = w * (J12*J12 + J22*J22); qd[i+Q*1] = - w * (J11*J12 + J21*J22); qd[i+Q*2] = w * (J11*J11 + J21*J21); } break; case 33: for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < Q; i += blockDim.x * gridDim.x) { // J: 0 3 6 qd: 0 1 2 // 1 4 7 1 3 4 // 2 5 8 2 4 5 const CeedScalar J11 = J[i+Q*0]; const CeedScalar J21 = J[i+Q*1]; const CeedScalar J31 = J[i+Q*2]; const CeedScalar J12 = J[i+Q*3]; const CeedScalar J22 = J[i+Q*4]; const CeedScalar J32 = J[i+Q*5]; const CeedScalar J13 = J[i+Q*6]; const CeedScalar J23 = J[i+Q*7]; const CeedScalar J33 = J[i+Q*8]; const CeedScalar A11 = J22*J33 - J23*J32; const CeedScalar A12 = J13*J32 - J12*J33; const CeedScalar A13 = J12*J23 - J13*J22; const CeedScalar A21 = J23*J31 - J21*J33; const CeedScalar A22 = J11*J33 - J13*J31; const CeedScalar A23 = J13*J21 - J11*J23; const CeedScalar A31 = J21*J32 - J22*J31; const CeedScalar A32 = J12*J31 - J11*J32; const CeedScalar A33 = J11*J22 - J12*J21; const CeedScalar w = qw[i] / (J11*A11 + J21*A12 + J31*A13); qd[i+Q*0] = w * (A11*A11 + A12*A12 + A13*A13); qd[i+Q*1] = w * (A11*A21 + A12*A22 + A13*A23); qd[i+Q*2] = w * (A11*A31 + A12*A32 + A13*A33); qd[i+Q*3] = w * (A21*A21 + A22*A22 + A23*A23); qd[i+Q*4] = w * (A21*A31 + A22*A32 + A23*A33); qd[i+Q*5] = w * (A31*A31 + A32*A32 + A33*A33); } break; } } /// libCEED Q-function for applying a diff operator extern "C" __global__ void f_apply_diff(void *ctx, CeedInt Q, Fields_Cuda fields) { BuildContext *bc = (BuildContext*)ctx; // in[0], out[0] have shape [dim, nc=1, Q] const CeedScalar *ug = (const CeedScalar *)fields.inputs[0]; const CeedScalar *qd = (const CeedScalar *)fields.inputs[1]; CeedScalar *vg = fields.outputs[0]; switch (bc->dim) { case 1: for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < Q; i += blockDim.x * gridDim.x) { vg[i] = ug[i] * qd[i]; } break; case 2: for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < Q; i += blockDim.x * gridDim.x) { const CeedScalar ug0 = ug[i+Q*0]; const CeedScalar ug1 = ug[i+Q*1]; vg[i+Q*0] = qd[i+Q*0]*ug0 + qd[i+Q*1]*ug1; vg[i+Q*1] = qd[i+Q*1]*ug0 + qd[i+Q*2]*ug1; } break; case 3: for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < Q; i += blockDim.x * gridDim.x) { const CeedScalar ug0 = ug[i+Q*0]; const CeedScalar ug1 = ug[i+Q*1]; const CeedScalar ug2 = ug[i+Q*2]; vg[i+Q*0] = qd[i+Q*0]*ug0 + qd[i+Q*1]*ug1 + qd[i+Q*2]*ug2; vg[i+Q*1] = qd[i+Q*1]*ug0 + qd[i+Q*3]*ug1 + qd[i+Q*4]*ug2; vg[i+Q*2] = qd[i+Q*2]*ug0 + qd[i+Q*4]*ug1 + qd[i+Q*5]*ug2; } break; } }
26c17aaf3220b42824608543bff440b03667f698.cu
// Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at // the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights // reserved. See files LICENSE and NOTICE for details. // // This file is part of CEED, a collection of benchmarks, miniapps, software // libraries and APIs for efficient high-order finite element and spectral // element discretizations for exascale applications. For more information and // source code availability see http://github.com/ceed. // // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, // a collaborative effort of two U.S. Department of Energy organizations (Office // of Science and the National Nuclear Security Administration) responsible for // the planning and preparation of a capable exascale ecosystem, including // software, applications, hardware, advanced system engineering and early // testbed platforms, in support of the nation's exascale computing imperative. /// A structure used to pass additional data to f_build_diff and f_apply_diff struct BuildContext { CeedInt dim, space_dim; }; /// libCEED Q-function for building quadrature data for a diffusion operator extern "C" __global__ void f_build_diff(void *ctx, CeedInt Q, Fields_Cuda fields) { BuildContext *bc = (BuildContext*)ctx; // in[0] is Jacobians with shape [dim, nc=dim, Q] // in[1] is quadrature weights, size (Q) // // At every quadrature point, compute qw/det(J).adj(J).adj(J)^T and store // the symmetric part of the result. const CeedScalar *J = (const CeedScalar *)fields.inputs[0]; const CeedScalar *qw = (const CeedScalar *)fields.inputs[1]; CeedScalar *qd = fields.outputs[0]; switch (bc->dim + 10*bc->space_dim) { case 11: for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < Q; i += blockDim.x * gridDim.x) { qd[i] = qw[i] / J[i]; } break; case 22: for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < Q; i += blockDim.x * gridDim.x) { // J: 0 2 qd: 0 1 adj(J): J22 -J12 // 1 3 1 2 -J21 J11 const CeedScalar J11 = J[i+Q*0]; const CeedScalar J21 = J[i+Q*1]; const CeedScalar J12 = J[i+Q*2]; const CeedScalar J22 = J[i+Q*3]; const CeedScalar w = qw[i] / (J11*J22 - J21*J12); qd[i+Q*0] = w * (J12*J12 + J22*J22); qd[i+Q*1] = - w * (J11*J12 + J21*J22); qd[i+Q*2] = w * (J11*J11 + J21*J21); } break; case 33: for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < Q; i += blockDim.x * gridDim.x) { // J: 0 3 6 qd: 0 1 2 // 1 4 7 1 3 4 // 2 5 8 2 4 5 const CeedScalar J11 = J[i+Q*0]; const CeedScalar J21 = J[i+Q*1]; const CeedScalar J31 = J[i+Q*2]; const CeedScalar J12 = J[i+Q*3]; const CeedScalar J22 = J[i+Q*4]; const CeedScalar J32 = J[i+Q*5]; const CeedScalar J13 = J[i+Q*6]; const CeedScalar J23 = J[i+Q*7]; const CeedScalar J33 = J[i+Q*8]; const CeedScalar A11 = J22*J33 - J23*J32; const CeedScalar A12 = J13*J32 - J12*J33; const CeedScalar A13 = J12*J23 - J13*J22; const CeedScalar A21 = J23*J31 - J21*J33; const CeedScalar A22 = J11*J33 - J13*J31; const CeedScalar A23 = J13*J21 - J11*J23; const CeedScalar A31 = J21*J32 - J22*J31; const CeedScalar A32 = J12*J31 - J11*J32; const CeedScalar A33 = J11*J22 - J12*J21; const CeedScalar w = qw[i] / (J11*A11 + J21*A12 + J31*A13); qd[i+Q*0] = w * (A11*A11 + A12*A12 + A13*A13); qd[i+Q*1] = w * (A11*A21 + A12*A22 + A13*A23); qd[i+Q*2] = w * (A11*A31 + A12*A32 + A13*A33); qd[i+Q*3] = w * (A21*A21 + A22*A22 + A23*A23); qd[i+Q*4] = w * (A21*A31 + A22*A32 + A23*A33); qd[i+Q*5] = w * (A31*A31 + A32*A32 + A33*A33); } break; } } /// libCEED Q-function for applying a diff operator extern "C" __global__ void f_apply_diff(void *ctx, CeedInt Q, Fields_Cuda fields) { BuildContext *bc = (BuildContext*)ctx; // in[0], out[0] have shape [dim, nc=1, Q] const CeedScalar *ug = (const CeedScalar *)fields.inputs[0]; const CeedScalar *qd = (const CeedScalar *)fields.inputs[1]; CeedScalar *vg = fields.outputs[0]; switch (bc->dim) { case 1: for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < Q; i += blockDim.x * gridDim.x) { vg[i] = ug[i] * qd[i]; } break; case 2: for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < Q; i += blockDim.x * gridDim.x) { const CeedScalar ug0 = ug[i+Q*0]; const CeedScalar ug1 = ug[i+Q*1]; vg[i+Q*0] = qd[i+Q*0]*ug0 + qd[i+Q*1]*ug1; vg[i+Q*1] = qd[i+Q*1]*ug0 + qd[i+Q*2]*ug1; } break; case 3: for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < Q; i += blockDim.x * gridDim.x) { const CeedScalar ug0 = ug[i+Q*0]; const CeedScalar ug1 = ug[i+Q*1]; const CeedScalar ug2 = ug[i+Q*2]; vg[i+Q*0] = qd[i+Q*0]*ug0 + qd[i+Q*1]*ug1 + qd[i+Q*2]*ug2; vg[i+Q*1] = qd[i+Q*1]*ug0 + qd[i+Q*3]*ug1 + qd[i+Q*4]*ug2; vg[i+Q*2] = qd[i+Q*2]*ug0 + qd[i+Q*4]*ug1 + qd[i+Q*5]*ug2; } break; } }
f70624530cd0a5fc32837bba5876bc125e726f90.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" // __global__ void cvlUnit(const char *imgR,const char *imgG,const char *imgB,const char *core, char *outR,char *outG,char *outB,int lenX,int lenY,int lenCore) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int index=y*lenX+x; if(x>=lenX||y>=lenY)return; if(x-lenCore/2<0||x+lenCore/2>=lenX||y-lenCore/2<0||y+lenCore/2>=lenY){ outR[index]=imgR[index]; outG[index]=imgG[index]; outB[index]=imgB[index]; return ; } int i,j,tmpX,tmpY; int sumR=0; int sumG=0; int sumB=0; for(i=0;i<lenCore;i++){ for(j=0;j<lenCore;j++){ tmpX = x-lenCore/2+i; tmpY = y-lenCore/2+j; // if(x==8&&y==8){printf("tmpX=%d,tmpY=%d:\n",tmpX,tmpY);} sumR+=imgR[tmpY*lenX+tmpX]*core[j*lenCore+i]; // if(x==8&&y==8){ // printf("\tR:\t %d*%d,new=%d\n",imgR[tmpY*lenX+tmpX],core[j*lenCore+i],sumR); // } sumG+=imgG[tmpY*lenX+tmpX]*core[j*lenCore+i]; // if(x==8&&y==8){ // printf("\tG:\t %d*%d,new=%d\n",imgG[tmpY*lenX+tmpX],core[j*lenCore+i],sumG); // } sumB+=imgB[tmpY*lenX+tmpX]*core[j*lenCore+i]; // if(x==8&&y==8){ // printf("\tB:\t %d*%d,new=%d\n",imgB[tmpY*lenX+tmpX],core[j*lenCore+i],sumB); // } } } outR[index]=(char)(sumR*1.0/(lenCore*lenCore)); outG[index]=(char)(sumG*1.0/(lenCore*lenCore)); outB[index]=(char)(sumB*1.0/(lenCore*lenCore)); return; }
f70624530cd0a5fc32837bba5876bc125e726f90.cu
#include "includes.h" //设备端代码 __global__ void cvlUnit(const char *imgR,const char *imgG,const char *imgB,const char *core, char *outR,char *outG,char *outB,int lenX,int lenY,int lenCore) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int index=y*lenX+x; if(x>=lenX||y>=lenY)return; if(x-lenCore/2<0||x+lenCore/2>=lenX||y-lenCore/2<0||y+lenCore/2>=lenY){ outR[index]=imgR[index]; outG[index]=imgG[index]; outB[index]=imgB[index]; return ; } int i,j,tmpX,tmpY; int sumR=0; int sumG=0; int sumB=0; for(i=0;i<lenCore;i++){ for(j=0;j<lenCore;j++){ tmpX = x-lenCore/2+i; tmpY = y-lenCore/2+j; // if(x==8&&y==8){printf("tmpX=%d,tmpY=%d:\n",tmpX,tmpY);} sumR+=imgR[tmpY*lenX+tmpX]*core[j*lenCore+i]; // if(x==8&&y==8){ // printf("\tR:\t %d*%d,new=%d\n",imgR[tmpY*lenX+tmpX],core[j*lenCore+i],sumR); // } sumG+=imgG[tmpY*lenX+tmpX]*core[j*lenCore+i]; // if(x==8&&y==8){ // printf("\tG:\t %d*%d,new=%d\n",imgG[tmpY*lenX+tmpX],core[j*lenCore+i],sumG); // } sumB+=imgB[tmpY*lenX+tmpX]*core[j*lenCore+i]; // if(x==8&&y==8){ // printf("\tB:\t %d*%d,new=%d\n",imgB[tmpY*lenX+tmpX],core[j*lenCore+i],sumB); // } } } outR[index]=(char)(sumR*1.0/(lenCore*lenCore)); outG[index]=(char)(sumG*1.0/(lenCore*lenCore)); outB[index]=(char)(sumB*1.0/(lenCore*lenCore)); return; }
1953321ac8e1dce213f92e8e89fd1c1d010e9041.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // MP 1 #include <wb.h> __global__ void vecAdd(float * in1, float * in2, float * out, int len) { //@@ Insert code to implement vector addition here int i = blockIdx.x*blockDim.x+threadIdx.x; if(i<len) out[i] = in1[i]+in2[i]; } int main(int argc, char ** argv) { wbArg_t args; int inputLength; float * hostInput1; float * hostInput2; float * hostOutput; float * deviceInput1; float * deviceInput2; float * deviceOutput; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength); hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength); hostOutput = (float *) malloc(inputLength * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The input length is ", inputLength); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here float* d_A; float* d_B; float* d_C; int size = inputLength*sizeof(float); hipMalloc((void**) &d_A,size); hipMalloc((void**) &d_B,size); hipMalloc((void**) &d_C,size); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here hipMemcpy(d_A,hostInput1,size,hipMemcpyHostToDevice); hipMemcpy(d_B,hostInput2,size,hipMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 DimGrid(((inputLength-1)/256 +1.0),1,1); dim3 DimBlock(256,1,1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( vecAdd), dim3(DimGrid),dim3(DimBlock), 0, 0, d_A,d_B,d_C,inputLength); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here hipMemcpy(hostOutput,d_C,size,hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here hipFree(d_A); hipFree(d_B); hipFree(d_C); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostOutput, inputLength); free(hostInput1); free(hostInput2); free(hostOutput); return 0; }
1953321ac8e1dce213f92e8e89fd1c1d010e9041.cu
// MP 1 #include <wb.h> __global__ void vecAdd(float * in1, float * in2, float * out, int len) { //@@ Insert code to implement vector addition here int i = blockIdx.x*blockDim.x+threadIdx.x; if(i<len) out[i] = in1[i]+in2[i]; } int main(int argc, char ** argv) { wbArg_t args; int inputLength; float * hostInput1; float * hostInput2; float * hostOutput; float * deviceInput1; float * deviceInput2; float * deviceOutput; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength); hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength); hostOutput = (float *) malloc(inputLength * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The input length is ", inputLength); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here float* d_A; float* d_B; float* d_C; int size = inputLength*sizeof(float); cudaMalloc((void**) &d_A,size); cudaMalloc((void**) &d_B,size); cudaMalloc((void**) &d_C,size); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here cudaMemcpy(d_A,hostInput1,size,cudaMemcpyHostToDevice); cudaMemcpy(d_B,hostInput2,size,cudaMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 DimGrid(((inputLength-1)/256 +1.0),1,1); dim3 DimBlock(256,1,1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here vecAdd<<<DimGrid,DimBlock>>>(d_A,d_B,d_C,inputLength); cudaThreadSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here cudaMemcpy(hostOutput,d_C,size,cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostOutput, inputLength); free(hostInput1); free(hostInput2); free(hostOutput); return 0; }
da67c23eadada394d1de6918f554b8ea9444fc6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _MUMMERGPU_KERNEL_H_ #define _MUMMERGPU_KERNEL_H_ #include <stdio.h> #include "common.hip" #ifdef n__DEVICE_EMULATION__ #define XPRINTF(...) printf(__VA_ARGS__) #define VERBOSE 0 #else #define XPRINTF(...) do{}while(0) #define VERBOSE 0 #endif #define WARP_SIZE 16 #if REORDER_TREE #define fNID "%d,%d" #define NID(addr) (addr & 0x0000FFFF), ((addr & 0xFFFF0000)>>16) #define GOROOT(addr) addr = 0x00010000 //#define GOROOT(addr) addr.x = 0; addr.y = 1 #else #define fNID "%d" #define NID(addr) addr #define GOROOT(addr) addr = 1 #endif #if COALESCED_QUERIES #define GETQCHAR(qrypos) ((queries[((qrypos) >> 2) << 4]) & ((0xFF) << (((qrypos) & 0x00000003)) << 3)) >> ((((qrypos) & 0x00000003 )) << 3) #elif QRYTEX #define GETQCHAR(qrypos) tex1Dfetch(qrytex, qryAddr + qrypos) #else #define GETQCHAR(qrypos) queries[qrypos] #endif #if COALESCED_QUERIES #define RESULT_SPAN WARP_SIZE #define MATCH_BASE(match_coords, qryid) (_MatchCoord*)match_coords + coordAddrs[qryid] #else #define RESULT_SPAN 1 #define MATCH_BASE(match_coords, qryid) (_MatchCoord*)match_coords + qryAddr - __umul24(qryid, min_match_len + 1) #endif #if REFTEX #define GETRCHAR(refpos) getRef(refpos) #else #define GETRCHAR(refpos) getRef(refpos, ref) #endif #if MERGETEX #if TREE_ACCESS_HISTOGRAM #if NODETEX #define GETNODE(addr, two_level) getMerged(addr, two_level, 0, NULL, NULL) #define GETNODEHIST(addr, two_level) getMerged(addr, two_level, 0, node_hist, child_hist) #define GETCHILDREN(addr, two_level) getMerged(addr, two_level, 1, NULL, NULL) #define GETCHILDRENHIST(addr, two_level) getMerged(addr, two_level, 1, node_hist, child_hist) #else #define GETNODE(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0, NULL, NULL) #define GETNODEHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0, node_hist, child_hist) #define GETCHILDREN(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1, NULL, NULL) #define GETCHILDRENHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1, node_hist, child_hist) #endif #else #if NODETEX #define GETNODE(addr, two_level) getMerged(addr, two_level, 0) #define GETNODEHIST(addr, two_level) getMerged(addr, two_level, 0) #define GETCHILDREN(addr, two_level) getMerged(addr, two_level, 1) #define GETCHILDRENHIST(addr, two_level) getMerged(addr, two_level, 1) #else #define GETNODE(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0) #define GETNODEHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0) #define GETCHILDREN(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1) #define GETCHILDRENHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1) #endif #endif #else #if NODETEX #if TREE_ACCESS_HISTOGRAM #define GETNODEHIST(addr, two_level) getNode(addr, two_level, node_hist) #define GETNODE(addr, two_level) getNode(addr, two_level, NULL) #else #define GETNODEHIST(addr, two_level) getNode(addr, two_level) #define GETNODE(addr, two_level) getNode(addr, two_level) #endif #else #if TREE_ACCESS_HISTOGRAM #define GETNODEHIST(addr, two_level) getNode(addr, two_level, nodes, node_hist) #define GETNODE(addr, two_level) getNode(addr, two_level, nodes, NULL) #else #define GETNODEHIST(addr, two_level) getNode(addr, two_level, nodes) #define GETNODE(addr, two_level) getNode(addr, two_level, nodes) #endif #endif #if CHILDTEX #if TREE_ACCESS_HISTOGRAM #define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, child_hist) #define GETCHILDREN(addr, two_level) getChildren(addr, two_level, NULL) #else #define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level) #define GETCHILDREN(addr, two_level) getChildren(addr, two_level) #endif #else #if TREE_ACCESS_HISTOGRAM #define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, childrenarr, child_hist) #define GETCHILDREN(addr, two_level) getChildren(addr, two_level, childrenarr, NULL) #else #define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, childrenarr) #define GETCHILDREN(addr, two_level) getChildren(addr, two_level, childrenarr) #endif #endif #endif #if QRYTEX #define SHIFT_QUERIES(queries, qryAddr) #else #define SHIFT_QUERIES(queries, qryAddr) queries += qryAddr #endif #if REORDER_TREE texture<uint4, 2, hipReadModeElementType> nodetex; texture<uint4, 2, hipReadModeElementType> childrentex; #else texture<uint4, 1, hipReadModeElementType> nodetex; texture<uint4, 1, hipReadModeElementType> childrentex; #endif #if REORDER_REF texture<char, 2, hipReadModeElementType> reftex; #else texture<char, 1, hipReadModeElementType> reftex; #endif texture<char, 1, hipReadModeElementType> qrytex; struct __align__(8) _MatchCoord { union { int2 data; struct { int node; // match node int edge_match_length; // number of matching characters UP the parent edge }; }; }; // If leafchar is 0, store the ACGT$ links, else store the leafid struct _PixelOfChildren { union { uint4 data; union { struct { uchar3 a; uchar3 c; uchar3 g; uchar3 t; uchar3 d; char leafchar; }; struct { uchar3 leafid; unsigned char pad [12]; char leafchar0; }; }; }; }; // Store the start, end coordinate of node, and the parent, suffix links struct _PixelOfNode { union { uint4 data; struct { uchar3 parent; uchar3 suffix; uchar3 start; uchar3 end; uchar3 depth; unsigned char pad; }; }; }; #if TWO_LEVEL_CHILD_TREE #define CHILD_THRESH 128 __constant__ _PixelOfChildren child_tree_top[CHILD_THRESH]; #endif #if TWO_LEVEL_NODE_TREE #define NODE_THRESH 128 __constant__ _PixelOfNode node_tree_top[NODE_THRESH]; #endif //////////////////////////////////////////////////////////////////// ////////////////////////////////// /// addr2id ////////////////////////////////// __device__ int addr2id(unsigned int addr) { #if MERGETEX & REORDER_TREE addr |= (((addr & 0x800) << 1) << 16); addr &= 0xFFFF07FF; int blocky = (addr >> 16) & 0x1F; int bigy = (addr >> 16) >> 5; int bigx = ((addr & 0x0000FFFF) << 5) + blocky; return bigx + (bigy << 16); #elif REORDER_TREE int blocky = (addr >> 16) & 0x1F; int bigy = (addr >> 16) >> 5; int bigx = ((addr & 0x0000FFFF) << 5) + blocky; return bigx + (bigy << 17); #elif MERGETEX return addr; #else return addr; #endif } __device__ TextureAddress id2addr(int id) { TextureAddress retval; #if MERGETEX & REORDER_TREE // Half width is 2048 => 11 bits // TEXBLOCKSIZE is 32 => 5 bits int bigx = id & 0xFFFF; // 11 + 5 bits int bigy = id >> 16; retval.y = (bigy << 5) + (bigx & 0x1F); retval.x = bigx >> 5; // now stuff y's 13th bit into x's 12th bit retval.x |= (retval.y & 0x1000) >> 1; retval.y &= 0xFFF; #elif REORDER_TREE int bigx = id & 0x1FFFF; int bigy = id >> 17; retval.y = (bigy << 5) + (bigx & 0x1F); retval.x = bigx >> 5; #elif MERGETEX retval.x = id; #else retval.x = id; #endif return retval; } #define MKI(uc3) (uc3.x | (uc3.y << 8) | (uc3.z << 16)) ////////////////////////////////// /// arrayToAddress ////////////////////////////////// __device__ void arrayToAddress(uchar3 arr, unsigned int& addr) { #if REORDER_TREE addr = (arr.x | ((arr.z & 0xF) << 8)) | ((arr.y | ((arr.z & 0xF0) << 4)) << 16); #else addr = MKI(arr); #endif } ////////////////////////////////// /// getRef ////////////////////////////////// __device__ char getRef(int refpos #if !REFTEX ,char* ref #endif ) { #if REORDER_REF int bigx = refpos & 0x3FFFF; int bigy = refpos >> 18; int y = (bigy << 2) + (bigx & 0x3); int x = bigx >> 2; #if REFTEX return tex2D(reftex, x, y); #else return *(ref + 65536 * y + x); #endif #else #if REFTEX return tex1Dfetch(reftex, refpos); #else return ref[refpos]; #endif #endif } ////////////////////////////////// /// RC ////////////////////////////////// __device__ char rc(char c) { switch(c) { case 'A': return 'T'; case 'C': return 'G'; case 'G': return 'C'; case 'T': return 'A'; case 'q': return '\0'; default: return c; }; } ////////////////////////////////// /// getNode ////////////////////////////////// __device__ uint4 getNode(unsigned int cur, bool use_two_level #if !NODETEX , _PixelOfNode* nodes #endif #if TREE_ACCESS_HISTOGRAM , int* node_hist #endif ) { #if TREE_ACCESS_HISTOGRAM int id = addr2id(cur); if (node_hist) { node_hist[id]++; } #endif #if TWO_LEVEL_NODE_TREE int id = addr2id(cur); if (use_two_level && id < NODE_THRESH) { return node_tree_top[id].data; } #endif #if NODETEX #if REORDER_TREE return tex2D(nodetex, cur & 0x0000FFFF, (cur & 0xFFFF0000) >> 16); #else return tex1Dfetch(nodetex, cur); #endif #else #if REORDER_TREE return (nodes + (cur & 0x0000FFFF) + (((cur & 0xFFFF0000)>>16) * MAX_TEXTURE_DIMENSION))->data; #else return (nodes + cur)->data; #endif #endif } ////////////////////////////////// /// getChildren ////////////////////////////////// __device__ uint4 getChildren(unsigned int cur, bool use_two_level #if !CHILDTEX , _PixelOfChildren* childrenarr #endif #if TREE_ACCESS_HISTOGRAM , int* child_hist #endif ) { #if TREE_ACCESS_HISTOGRAM int id = addr2id(cur); if (child_hist) { child_hist[id]++; } #endif #if TWO_LEVEL_CHILD_TREE int id = addr2id(cur); if (id < CHILD_THRESH) { return child_tree_top[id].data; } #endif #if CHILDTEX #if REORDER_TREE return tex2D(childrentex, cur & 0x0000FFFF, (cur & 0xFFFF0000) >> 16); #else return tex1Dfetch(childrentex, cur); #endif #else #if REORDER_TREE return (childrenarr + (cur & 0x0000FFFF) + (((cur & 0xFFFF0000)>>16) * MAX_TEXTURE_DIMENSION))->data; #else return (childrenarr + cur)->data; #endif #endif } #if MERGETEX ////////////////////////////////// /// getMerged ////////////////////////////////// __device__ uint4 getMerged( #if !NODETEX _PixelOfNode * nodes, _PixelOfChildren * childrenarr, #endif unsigned int cur, int use_two_level, int getChildrenData #if TREE_ACCESS_HISTOGRAM , int* node_hist , int* child_hist #endif ) { // TextureAddress cur = _cur; #if !REORDER_TREE //cur.x *= 2; unsigned int x = cur * 2; int useChildrenForData = 0; if (x >= MAX_TEXTURE_DIMENSION*MAX_TEXTURE_DIMENSION) { x -= MAX_TEXTURE_DIMENSION*MAX_TEXTURE_DIMENSION; useChildrenForData = 1; } #else unsigned short x = cur & 0x0000FFFF; unsigned short y = (cur & 0xFFFF0000) >> 16; int useChildrenForData = 0; // WARNING INSANE HACK TO WORK AROUND NVCC BUG goto TEST; MASK: x &= 0x7FF; x *= 2; goto INC; TEST: if (x >= 2048) { useChildrenForData = 1; } goto MASK; INC: #endif x += getChildrenData; #if !REORDER_TREE cur = x; #else cur = (y << 16) | x; #endif if (useChildrenForData) { return getChildren(cur, use_two_level #if !CHILDTEX , childrenarr #endif #if TREE_ACCESS_HISTOGRAM , child_hist #endif ); } else { return getNode(cur, use_two_level #if !NODETEX , nodes #endif #if TREE_ACCESS_HISTOGRAM , node_hist #endif ); } } #endif ////////////////////////////////// /// printNode, Emulator only ////////////////////////////////// #if VERBOSE #if CHILDTEX && NODETEX #define PRINTNODE(id) printNode(id) #define PRINTNODES(s,e) printNodes(s,e) #elif CHILDTEX #define PRINTNODE(id) printNode(id, nodes) #define PRINTNODES(s,e) printNodes(s, e, nodes) #elif NODETEX #define PRINTNODE(id) printNode(id, childarr) #define PRINTNODES(s,e) printNodes(s, e, childrenarr) #else #define PRINTNODE(id) printNode(id, nodes, childrenarr) #define PRINTNODES(s,e) printNodes(s, e, nodes, childrenarr) #endif __device__ void printNode(int nodeid #if !NODETEX , _PixelOfNode* nodes #endif #if !CHILDTEX , _PixelOfChildren* childrenarr #endif ) { TextureAddress addr = id2addr(nodeid); _PixelOfNode nd; nd.data = GETNODE(addr.data, false); _PixelOfChildren cd; cd.data = GETCHILDREN(addr.data, false); unsigned int a; arrayToAddress(cd.a, a); unsigned int c; arrayToAddress(cd.c, c); unsigned int g; arrayToAddress(cd.g, g); unsigned int t; arrayToAddress(cd.t, t); unsigned int d; arrayToAddress(cd.d, d); unsigned int p; arrayToAddress(nd.parent, p); unsigned int s; arrayToAddress(nd.suffix, s); int start = MKI(nd.start); int end = MKI(nd.end); int depth = MKI(nd.depth); char leafchar = cd.leafchar; XPRINTF("%d\t"fNID"\t%d\t%d\t%d\t%d\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\n", nodeid, NID(addr), start, end, depth, leafchar, NID(a), NID(c), NID(g), NID(t), NID(d), NID(p), NID(s)); } __device__ void printNodes(int start, int end #if !NODETEX , _PixelOfNode * nodes #endif #if !CHILDTEX ,_PixelOfChildren * childrenarr #endif ) { XPRINTF("id\taddr\tstart\tend\tdepth\tleaf\ta\tc\tg\tt\t$\tp\ts\n"); for (int i = start; i <= end; i++) { PRINTNODE(i); } } #else // !VERBOSE #define PRINTNODE(id) #define PRINTNODES(s,e) #endif #if VERBOSE #if NODETEX && CHILDTEX #define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc) #elif NODETEX #define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, childrenarr) #elif CHILDTEX #define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, nodes) #else #define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, nodes, childrenarr) #endif #else #define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc) #endif ////////////////////////////////// /// set_result ////////////////////////////////// __device__ void set_result(unsigned int cur, _MatchCoord* result, int edge_match_length, int qry_match_len, int min_match_len, int rc #if VERBOSE #if !NODETEX , _PixelOfNode * nodes #endif #if !CHILDTEX , _PixelOfChildren * childrenarr #endif #endif ) { if (qry_match_len > min_match_len) { edge_match_length |= rc; result->data = make_int2(cur, edge_match_length); #if VERBOSE _PixelOfNode nd; nd.data = GETNODE(cur, false); XPRINTF(" saving match cur=%d "fNID" len=%d edge_match=%d depth=%d\n", result->data.x, NID(cur), qry_match_len, edge_match_length, MKI(nd.depth)); #endif } else { XPRINTF(" match too short (%d < %d)\n", qry_match_len, min_match_len); } } ///////////////////////////////////// // Compute forward substring matches ///////////////////////////////////// /*! @todo This kernel uses no shared memory. Perhaps one or more of the strings could be read in a coalesced fashion? */ __global__ void mummergpuKernel(void* match_coords, #if COALESCED_QUERIES int* coordAddrs, #endif #if !QRYTEX #if COALESCED_QUERIES int* queries, #else char* queries, #endif #endif #if !NODETEX _PixelOfNode* nodes, #endif #if !CHILDTEX _PixelOfChildren* childrenarr, #endif #if !REFTEX char* ref, #endif const int* queryAddrs, const int* queryLengths, const int numQueries, const int min_match_len #if TREE_ACCESS_HISTOGRAM ,int* node_hist, int* child_hist #endif ) { int qryid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (qryid >= numQueries) { return; } XPRINTF("> qryid: %d\n", qryid); if (qryid == 0) { PRINTNODES(0,200); } int qlen = queryLengths[qryid]; int qryAddr = queryAddrs[qryid]; //TextureAddress cur; unsigned int cur = 0; //cur.data = 0; int mustmatch = 0; int qry_match_len = 0; _MatchCoord * result = MATCH_BASE(match_coords, qryid); SHIFT_QUERIES(queries, qryAddr); int last = qlen - min_match_len; for (int qrystart = 0; qrystart <= last; qrystart++, result += RESULT_SPAN) { //_PixelOfNode node; unsigned int node_start; unsigned int prev; if ((cur == 0) || (qry_match_len < 1)) { // start at root of tree GOROOT(cur); qry_match_len = 1; mustmatch = 0; } char c = GETQCHAR(qrystart + qry_match_len); XPRINTF("In node ("fNID"): starting with %c [%d] => \n", NID(cur), c, qry_match_len); int refpos = 0; while ((c != '\0')) { XPRINTF("Next edge to follow: %c (%d)\n", c, qry_match_len); _PixelOfChildren children; children.data = GETCHILDRENHIST(cur, false); prev = cur; uchar3 next; switch (c) { case 'A': next = children.a; break; case 'C': next = children.c; break; case 'G': next = children.g; break; case 'T': next = children.t; break; default: next = make_uchar3(0,0,0); break; }; arrayToAddress(next, cur); XPRINTF(" In node: ("fNID")\n", NID(cur)); // No edge to follow out of the node if (cur == 0) { XPRINTF(" no edge\n"); SET_RESULT(prev, result, 0, qry_match_len, min_match_len, FORWARD); qry_match_len -= 1; mustmatch = 0; goto NEXT_SUBSTRING; } _PixelOfNode node; node.data = GETNODEHIST(cur, true); node_start = MKI(node.start); unsigned int node_end = MKI(node.end); XPRINTF(" Edge coordinates: %d - %d\n", node_start, node_end); { int edgelen = node_end - node_start + 1; int edge_matchlen = node_start + mustmatch; int past_node_end = node_end + 1; int dist_to_edge_end = mustmatch - edgelen; if (mustmatch) { refpos = min(edge_matchlen, past_node_end); qry_match_len += min(edgelen, mustmatch); mustmatch = max(dist_to_edge_end, 0); } else { // Try to walk the edge, the first char definitely matches qry_match_len++; refpos = node_start + 1; } } c = GETQCHAR(qrystart + qry_match_len); while (refpos <= node_end && c != '\0') { char r = GETRCHAR(refpos); XPRINTF(" Edge cmp ref: %d %c, qry: %d %c\n", refpos, r, qry_match_len, c); if (r != c) { // mismatch on edge XPRINTF("mismatch on edge: %d, edge_pos: %d\n", qry_match_len, refpos - (node_start)); goto RECORD_RESULT; } qry_match_len++; refpos++; c = GETQCHAR(qrystart + qry_match_len); } } // while(c) XPRINTF("end of string\n"); RECORD_RESULT: { //_PixelOfNode node; //node.data = getnodehist(cur, false); SET_RESULT(cur, result, refpos - node_start, qry_match_len, min_match_len, FORWARD); mustmatch = refpos - node_start; qry_match_len -= mustmatch + 1; } NEXT_SUBSTRING: { _PixelOfNode node; node.data = GETNODEHIST(prev, false); arrayToAddress(node.suffix, cur); } //XPRINTF(" following suffix link. mustmatch:%d qry_match_len:%d sl:("fNID")\n", // mustmatch, qry_match_len, NID(cur)); do {} while (0); } //for(int qrystart = 0; qrystart <= last; qrystart++, result += RESULT_SPAN) return; } /////////////////////////////////////// //// Compute reverse substring matches /////////////////////////////////////// __global__ void mummergpuRCKernel(MatchCoord* match_coords, char* queries, const int* queryAddrs, const int* queryLengths, const int numQueries, const int min_match_len) { return; } __global__ void printKernel(MatchInfo * matches, int totalMatches, Alignment * alignments, #if !QRYTEX #if COALESCED_QUERIES int * queries, #else char * queries, #endif #endif #if !NODETEX _PixelOfNode* nodes, #endif #if !CHILDTEX _PixelOfChildren* childrenarr, #endif const int * queryAddrs, const int * queryLengths, const int page_begin, const int page_end, const int page_shadow_left, const int page_shadow_right, const int min_match_length #if TREE_ACCESS_HISTOGRAM ,int* node_hist, int* child_hist #endif ) { int matchid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (matchid >= totalMatches) { return; } XPRINTF(">matchid: %d qry: %d\n", matchid, matches[matchid].queryid); int qryAddr = queryAddrs[matches[matchid].queryid]; SHIFT_QUERIES(queries, qryAddr); #if !QRYTEX XPRINTF("query: %s\n", queries); #endif char queryflankingbase = GETQCHAR(matches[matchid].qrystartpos); // Find the top node to start printing from unsigned int matchaddr = matches[matchid].matchnode.data; unsigned int cur = matchaddr; unsigned int printParent = cur; _PixelOfNode node; node.data = GETNODE(cur, true); XPRINTF("starting node: %d "fNID" depth: %d\n", matches[matchid].matchnode, NID(cur), MKI(node.depth)); while (MKI(node.depth) > min_match_length) { printParent = cur; arrayToAddress(node.parent, cur); node.data = GETNODE(cur, true); XPRINTF("par: "fNID" depth: %d\n", NID(cur), MKI(node.depth)); } // traverse the tree starting at printParent unsigned int badParent = cur; cur = printParent; XPRINTF(" printParent: "fNID"\n", NID(printParent)); char curchild = 'A'; bool forceToParent = false; node.data = GETNODE(printParent, true); int matchlen = MKI(node.depth) - 1; int depthToGoldenPath = 0; int matchnum = matches[matchid].resultsoffset; // If the printparent is the matchnode, then we are already off the golden path if (printParent == matchaddr) { if (matches[matchid].edgematch > 0) { node.data = GETNODE(badParent, true); matchlen = MKI(node.depth)-1+matches[matchid].edgematch; } depthToGoldenPath = 1; } // keep going until I hit the printParent's parent while (cur != badParent) { _PixelOfChildren children; children.data = GETCHILDREN(cur, true); char isLeaf = children.leafchar; XPRINTF(" cur: "fNID" curchild: %c isLeaf:%d forceToParent:%d\n", NID(cur), curchild, isLeaf, forceToParent); if (isLeaf || forceToParent) { // See if I am left maximal and print if (isLeaf && isLeaf != queryflankingbase) { int leafid = MKI(children.leafid); int left_in_ref = (leafid - 1) + page_begin; int right_in_ref = left_in_ref + matchlen; if ((left_in_ref != page_begin || page_shadow_left == -1) && (right_in_ref != page_end || page_shadow_right == -1)) { if (!(left_in_ref > page_begin && right_in_ref < page_shadow_left)) { //sprintf(buf, "%8d%10d%10d\n", left_in_ref, qrystartpos+1, matchlen); XPRINTF("%8d%10d%10d\n", left_in_ref, matches[matchid].qrystartpos+1, matchlen); alignments[matchnum].left_in_ref = left_in_ref; alignments[matchnum].matchlen = matchlen; matchnum++; } } } forceToParent = false; // now return to my parent and advance curchild node.data = GETNODE(cur, true); unsigned int myParent; arrayToAddress(node.parent, myParent); _PixelOfChildren pchildren; pchildren.data = GETCHILDREN(myParent, true); unsigned int pa, pc, pg, pt; arrayToAddress(pchildren.a, pa); arrayToAddress(pchildren.c, pc); arrayToAddress(pchildren.g, pg); arrayToAddress(pchildren.t, pt); if (pa == cur) { curchild = 'C'; } else if (pc == cur) { curchild = 'G'; } else if (pg == cur) { curchild = 'T'; } else if (pt == cur) { curchild = '$'; } else { // I must be the $ child, go up a level forceToParent = true; } cur = myParent; if (depthToGoldenPath) { depthToGoldenPath--; } if (depthToGoldenPath == 0) { node.data = GETNODE(cur, true); matchlen = MKI(node.depth)-1; } } else { // try to walk down the tree _PixelOfChildren children; children.data = GETCHILDREN(cur, true); char goldenChild = 0; if (depthToGoldenPath == 0) { // we are currently on the golden path // one of the children is also on the golden path goldenChild = GETQCHAR(matches[matchid].qrystartpos+matchlen+1); } do { if (curchild == 'A') { if (children.a.x || children.a.y || children.a.z) { XPRINTF(" -> A\n"); arrayToAddress(children.a, cur); break; } curchild = 'C'; } if (curchild == 'C') { if (children.c.x || children.c.y || children.c.z) { XPRINTF(" -> C\n"); arrayToAddress(children.c, cur); break; } curchild = 'G'; } if (curchild == 'G') { if (children.g.x || children.g.y || children.g.z) { XPRINTF(" -> G\n"); arrayToAddress(children.g, cur); break; } curchild = 'T'; } if (curchild == 'T') { if (children.t.x || children.t.y || children.t.z) { XPRINTF(" -> T\n"); arrayToAddress(children.t, cur); break; } curchild = '$'; } if (curchild == '$') { if (children.d.x || children.d.y || children.d.z) { XPRINTF(" -> $\n"); arrayToAddress(children.d, cur); break; } } // checked all of the children, go back to parent forceToParent = true; } while (0); if (!forceToParent) { if (depthToGoldenPath == 0) { if (curchild == goldenChild) { node.data = GETNODE(cur, true); matchlen = MKI(node.depth)-1; if (cur == matchaddr) { // we overextended the golden path depthToGoldenPath = 1; if (matches[matchid].edgematch > 0) { unsigned int par; arrayToAddress(node.parent, par); node.data = GETNODE(par, true); matchlen = MKI(node.depth) - 1 + matches[matchid].edgematch; } } } else { depthToGoldenPath = 1; } } else { depthToGoldenPath++; } curchild = 'A'; } } } } #endif // #ifndef _MUMMERGPU_HH_
da67c23eadada394d1de6918f554b8ea9444fc6f.cu
#ifndef _MUMMERGPU_KERNEL_H_ #define _MUMMERGPU_KERNEL_H_ #include <stdio.h> #include "common.cu" #ifdef n__DEVICE_EMULATION__ #define XPRINTF(...) printf(__VA_ARGS__) #define VERBOSE 0 #else #define XPRINTF(...) do{}while(0) #define VERBOSE 0 #endif #define WARP_SIZE 16 #if REORDER_TREE #define fNID "%d,%d" #define NID(addr) (addr & 0x0000FFFF), ((addr & 0xFFFF0000)>>16) #define GOROOT(addr) addr = 0x00010000 //#define GOROOT(addr) addr.x = 0; addr.y = 1 #else #define fNID "%d" #define NID(addr) addr #define GOROOT(addr) addr = 1 #endif #if COALESCED_QUERIES #define GETQCHAR(qrypos) ((queries[((qrypos) >> 2) << 4]) & ((0xFF) << (((qrypos) & 0x00000003)) << 3)) >> ((((qrypos) & 0x00000003 )) << 3) #elif QRYTEX #define GETQCHAR(qrypos) tex1Dfetch(qrytex, qryAddr + qrypos) #else #define GETQCHAR(qrypos) queries[qrypos] #endif #if COALESCED_QUERIES #define RESULT_SPAN WARP_SIZE #define MATCH_BASE(match_coords, qryid) (_MatchCoord*)match_coords + coordAddrs[qryid] #else #define RESULT_SPAN 1 #define MATCH_BASE(match_coords, qryid) (_MatchCoord*)match_coords + qryAddr - __umul24(qryid, min_match_len + 1) #endif #if REFTEX #define GETRCHAR(refpos) getRef(refpos) #else #define GETRCHAR(refpos) getRef(refpos, ref) #endif #if MERGETEX #if TREE_ACCESS_HISTOGRAM #if NODETEX #define GETNODE(addr, two_level) getMerged(addr, two_level, 0, NULL, NULL) #define GETNODEHIST(addr, two_level) getMerged(addr, two_level, 0, node_hist, child_hist) #define GETCHILDREN(addr, two_level) getMerged(addr, two_level, 1, NULL, NULL) #define GETCHILDRENHIST(addr, two_level) getMerged(addr, two_level, 1, node_hist, child_hist) #else #define GETNODE(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0, NULL, NULL) #define GETNODEHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0, node_hist, child_hist) #define GETCHILDREN(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1, NULL, NULL) #define GETCHILDRENHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1, node_hist, child_hist) #endif #else #if NODETEX #define GETNODE(addr, two_level) getMerged(addr, two_level, 0) #define GETNODEHIST(addr, two_level) getMerged(addr, two_level, 0) #define GETCHILDREN(addr, two_level) getMerged(addr, two_level, 1) #define GETCHILDRENHIST(addr, two_level) getMerged(addr, two_level, 1) #else #define GETNODE(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0) #define GETNODEHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0) #define GETCHILDREN(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1) #define GETCHILDRENHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1) #endif #endif #else #if NODETEX #if TREE_ACCESS_HISTOGRAM #define GETNODEHIST(addr, two_level) getNode(addr, two_level, node_hist) #define GETNODE(addr, two_level) getNode(addr, two_level, NULL) #else #define GETNODEHIST(addr, two_level) getNode(addr, two_level) #define GETNODE(addr, two_level) getNode(addr, two_level) #endif #else #if TREE_ACCESS_HISTOGRAM #define GETNODEHIST(addr, two_level) getNode(addr, two_level, nodes, node_hist) #define GETNODE(addr, two_level) getNode(addr, two_level, nodes, NULL) #else #define GETNODEHIST(addr, two_level) getNode(addr, two_level, nodes) #define GETNODE(addr, two_level) getNode(addr, two_level, nodes) #endif #endif #if CHILDTEX #if TREE_ACCESS_HISTOGRAM #define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, child_hist) #define GETCHILDREN(addr, two_level) getChildren(addr, two_level, NULL) #else #define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level) #define GETCHILDREN(addr, two_level) getChildren(addr, two_level) #endif #else #if TREE_ACCESS_HISTOGRAM #define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, childrenarr, child_hist) #define GETCHILDREN(addr, two_level) getChildren(addr, two_level, childrenarr, NULL) #else #define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, childrenarr) #define GETCHILDREN(addr, two_level) getChildren(addr, two_level, childrenarr) #endif #endif #endif #if QRYTEX #define SHIFT_QUERIES(queries, qryAddr) #else #define SHIFT_QUERIES(queries, qryAddr) queries += qryAddr #endif #if REORDER_TREE texture<uint4, 2, cudaReadModeElementType> nodetex; texture<uint4, 2, cudaReadModeElementType> childrentex; #else texture<uint4, 1, cudaReadModeElementType> nodetex; texture<uint4, 1, cudaReadModeElementType> childrentex; #endif #if REORDER_REF texture<char, 2, cudaReadModeElementType> reftex; #else texture<char, 1, cudaReadModeElementType> reftex; #endif texture<char, 1, cudaReadModeElementType> qrytex; struct __align__(8) _MatchCoord { union { int2 data; struct { int node; // match node int edge_match_length; // number of matching characters UP the parent edge }; }; }; // If leafchar is 0, store the ACGT$ links, else store the leafid struct _PixelOfChildren { union { uint4 data; union { struct { uchar3 a; uchar3 c; uchar3 g; uchar3 t; uchar3 d; char leafchar; }; struct { uchar3 leafid; unsigned char pad [12]; char leafchar0; }; }; }; }; // Store the start, end coordinate of node, and the parent, suffix links struct _PixelOfNode { union { uint4 data; struct { uchar3 parent; uchar3 suffix; uchar3 start; uchar3 end; uchar3 depth; unsigned char pad; }; }; }; #if TWO_LEVEL_CHILD_TREE #define CHILD_THRESH 128 __constant__ _PixelOfChildren child_tree_top[CHILD_THRESH]; #endif #if TWO_LEVEL_NODE_TREE #define NODE_THRESH 128 __constant__ _PixelOfNode node_tree_top[NODE_THRESH]; #endif //////////////////////////////////////////////////////////////////// ////////////////////////////////// /// addr2id ////////////////////////////////// __device__ int addr2id(unsigned int addr) { #if MERGETEX & REORDER_TREE addr |= (((addr & 0x800) << 1) << 16); addr &= 0xFFFF07FF; int blocky = (addr >> 16) & 0x1F; int bigy = (addr >> 16) >> 5; int bigx = ((addr & 0x0000FFFF) << 5) + blocky; return bigx + (bigy << 16); #elif REORDER_TREE int blocky = (addr >> 16) & 0x1F; int bigy = (addr >> 16) >> 5; int bigx = ((addr & 0x0000FFFF) << 5) + blocky; return bigx + (bigy << 17); #elif MERGETEX return addr; #else return addr; #endif } __device__ TextureAddress id2addr(int id) { TextureAddress retval; #if MERGETEX & REORDER_TREE // Half width is 2048 => 11 bits // TEXBLOCKSIZE is 32 => 5 bits int bigx = id & 0xFFFF; // 11 + 5 bits int bigy = id >> 16; retval.y = (bigy << 5) + (bigx & 0x1F); retval.x = bigx >> 5; // now stuff y's 13th bit into x's 12th bit retval.x |= (retval.y & 0x1000) >> 1; retval.y &= 0xFFF; #elif REORDER_TREE int bigx = id & 0x1FFFF; int bigy = id >> 17; retval.y = (bigy << 5) + (bigx & 0x1F); retval.x = bigx >> 5; #elif MERGETEX retval.x = id; #else retval.x = id; #endif return retval; } #define MKI(uc3) (uc3.x | (uc3.y << 8) | (uc3.z << 16)) ////////////////////////////////// /// arrayToAddress ////////////////////////////////// __device__ void arrayToAddress(uchar3 arr, unsigned int& addr) { #if REORDER_TREE addr = (arr.x | ((arr.z & 0xF) << 8)) | ((arr.y | ((arr.z & 0xF0) << 4)) << 16); #else addr = MKI(arr); #endif } ////////////////////////////////// /// getRef ////////////////////////////////// __device__ char getRef(int refpos #if !REFTEX ,char* ref #endif ) { #if REORDER_REF int bigx = refpos & 0x3FFFF; int bigy = refpos >> 18; int y = (bigy << 2) + (bigx & 0x3); int x = bigx >> 2; #if REFTEX return tex2D(reftex, x, y); #else return *(ref + 65536 * y + x); #endif #else #if REFTEX return tex1Dfetch(reftex, refpos); #else return ref[refpos]; #endif #endif } ////////////////////////////////// /// RC ////////////////////////////////// __device__ char rc(char c) { switch(c) { case 'A': return 'T'; case 'C': return 'G'; case 'G': return 'C'; case 'T': return 'A'; case 'q': return '\0'; default: return c; }; } ////////////////////////////////// /// getNode ////////////////////////////////// __device__ uint4 getNode(unsigned int cur, bool use_two_level #if !NODETEX , _PixelOfNode* nodes #endif #if TREE_ACCESS_HISTOGRAM , int* node_hist #endif ) { #if TREE_ACCESS_HISTOGRAM int id = addr2id(cur); if (node_hist) { node_hist[id]++; } #endif #if TWO_LEVEL_NODE_TREE int id = addr2id(cur); if (use_two_level && id < NODE_THRESH) { return node_tree_top[id].data; } #endif #if NODETEX #if REORDER_TREE return tex2D(nodetex, cur & 0x0000FFFF, (cur & 0xFFFF0000) >> 16); #else return tex1Dfetch(nodetex, cur); #endif #else #if REORDER_TREE return (nodes + (cur & 0x0000FFFF) + (((cur & 0xFFFF0000)>>16) * MAX_TEXTURE_DIMENSION))->data; #else return (nodes + cur)->data; #endif #endif } ////////////////////////////////// /// getChildren ////////////////////////////////// __device__ uint4 getChildren(unsigned int cur, bool use_two_level #if !CHILDTEX , _PixelOfChildren* childrenarr #endif #if TREE_ACCESS_HISTOGRAM , int* child_hist #endif ) { #if TREE_ACCESS_HISTOGRAM int id = addr2id(cur); if (child_hist) { child_hist[id]++; } #endif #if TWO_LEVEL_CHILD_TREE int id = addr2id(cur); if (id < CHILD_THRESH) { return child_tree_top[id].data; } #endif #if CHILDTEX #if REORDER_TREE return tex2D(childrentex, cur & 0x0000FFFF, (cur & 0xFFFF0000) >> 16); #else return tex1Dfetch(childrentex, cur); #endif #else #if REORDER_TREE return (childrenarr + (cur & 0x0000FFFF) + (((cur & 0xFFFF0000)>>16) * MAX_TEXTURE_DIMENSION))->data; #else return (childrenarr + cur)->data; #endif #endif } #if MERGETEX ////////////////////////////////// /// getMerged ////////////////////////////////// __device__ uint4 getMerged( #if !NODETEX _PixelOfNode * nodes, _PixelOfChildren * childrenarr, #endif unsigned int cur, int use_two_level, int getChildrenData #if TREE_ACCESS_HISTOGRAM , int* node_hist , int* child_hist #endif ) { // TextureAddress cur = _cur; #if !REORDER_TREE //cur.x *= 2; unsigned int x = cur * 2; int useChildrenForData = 0; if (x >= MAX_TEXTURE_DIMENSION*MAX_TEXTURE_DIMENSION) { x -= MAX_TEXTURE_DIMENSION*MAX_TEXTURE_DIMENSION; useChildrenForData = 1; } #else unsigned short x = cur & 0x0000FFFF; unsigned short y = (cur & 0xFFFF0000) >> 16; int useChildrenForData = 0; // WARNING INSANE HACK TO WORK AROUND NVCC BUG goto TEST; MASK: x &= 0x7FF; x *= 2; goto INC; TEST: if (x >= 2048) { useChildrenForData = 1; } goto MASK; INC: #endif x += getChildrenData; #if !REORDER_TREE cur = x; #else cur = (y << 16) | x; #endif if (useChildrenForData) { return getChildren(cur, use_two_level #if !CHILDTEX , childrenarr #endif #if TREE_ACCESS_HISTOGRAM , child_hist #endif ); } else { return getNode(cur, use_two_level #if !NODETEX , nodes #endif #if TREE_ACCESS_HISTOGRAM , node_hist #endif ); } } #endif ////////////////////////////////// /// printNode, Emulator only ////////////////////////////////// #if VERBOSE #if CHILDTEX && NODETEX #define PRINTNODE(id) printNode(id) #define PRINTNODES(s,e) printNodes(s,e) #elif CHILDTEX #define PRINTNODE(id) printNode(id, nodes) #define PRINTNODES(s,e) printNodes(s, e, nodes) #elif NODETEX #define PRINTNODE(id) printNode(id, childarr) #define PRINTNODES(s,e) printNodes(s, e, childrenarr) #else #define PRINTNODE(id) printNode(id, nodes, childrenarr) #define PRINTNODES(s,e) printNodes(s, e, nodes, childrenarr) #endif __device__ void printNode(int nodeid #if !NODETEX , _PixelOfNode* nodes #endif #if !CHILDTEX , _PixelOfChildren* childrenarr #endif ) { TextureAddress addr = id2addr(nodeid); _PixelOfNode nd; nd.data = GETNODE(addr.data, false); _PixelOfChildren cd; cd.data = GETCHILDREN(addr.data, false); unsigned int a; arrayToAddress(cd.a, a); unsigned int c; arrayToAddress(cd.c, c); unsigned int g; arrayToAddress(cd.g, g); unsigned int t; arrayToAddress(cd.t, t); unsigned int d; arrayToAddress(cd.d, d); unsigned int p; arrayToAddress(nd.parent, p); unsigned int s; arrayToAddress(nd.suffix, s); int start = MKI(nd.start); int end = MKI(nd.end); int depth = MKI(nd.depth); char leafchar = cd.leafchar; XPRINTF("%d\t"fNID"\t%d\t%d\t%d\t%d\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\n", nodeid, NID(addr), start, end, depth, leafchar, NID(a), NID(c), NID(g), NID(t), NID(d), NID(p), NID(s)); } __device__ void printNodes(int start, int end #if !NODETEX , _PixelOfNode * nodes #endif #if !CHILDTEX ,_PixelOfChildren * childrenarr #endif ) { XPRINTF("id\taddr\tstart\tend\tdepth\tleaf\ta\tc\tg\tt\t$\tp\ts\n"); for (int i = start; i <= end; i++) { PRINTNODE(i); } } #else // !VERBOSE #define PRINTNODE(id) #define PRINTNODES(s,e) #endif #if VERBOSE #if NODETEX && CHILDTEX #define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc) #elif NODETEX #define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, childrenarr) #elif CHILDTEX #define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, nodes) #else #define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, nodes, childrenarr) #endif #else #define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc) #endif ////////////////////////////////// /// set_result ////////////////////////////////// __device__ void set_result(unsigned int cur, _MatchCoord* result, int edge_match_length, int qry_match_len, int min_match_len, int rc #if VERBOSE #if !NODETEX , _PixelOfNode * nodes #endif #if !CHILDTEX , _PixelOfChildren * childrenarr #endif #endif ) { if (qry_match_len > min_match_len) { edge_match_length |= rc; result->data = make_int2(cur, edge_match_length); #if VERBOSE _PixelOfNode nd; nd.data = GETNODE(cur, false); XPRINTF(" saving match cur=%d "fNID" len=%d edge_match=%d depth=%d\n", result->data.x, NID(cur), qry_match_len, edge_match_length, MKI(nd.depth)); #endif } else { XPRINTF(" match too short (%d < %d)\n", qry_match_len, min_match_len); } } ///////////////////////////////////// // Compute forward substring matches ///////////////////////////////////// /*! @todo This kernel uses no shared memory. Perhaps one or more of the strings could be read in a coalesced fashion? */ __global__ void mummergpuKernel(void* match_coords, #if COALESCED_QUERIES int* coordAddrs, #endif #if !QRYTEX #if COALESCED_QUERIES int* queries, #else char* queries, #endif #endif #if !NODETEX _PixelOfNode* nodes, #endif #if !CHILDTEX _PixelOfChildren* childrenarr, #endif #if !REFTEX char* ref, #endif const int* queryAddrs, const int* queryLengths, const int numQueries, const int min_match_len #if TREE_ACCESS_HISTOGRAM ,int* node_hist, int* child_hist #endif ) { int qryid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (qryid >= numQueries) { return; } XPRINTF("> qryid: %d\n", qryid); if (qryid == 0) { PRINTNODES(0,200); } int qlen = queryLengths[qryid]; int qryAddr = queryAddrs[qryid]; //TextureAddress cur; unsigned int cur = 0; //cur.data = 0; int mustmatch = 0; int qry_match_len = 0; _MatchCoord * result = MATCH_BASE(match_coords, qryid); SHIFT_QUERIES(queries, qryAddr); int last = qlen - min_match_len; for (int qrystart = 0; qrystart <= last; qrystart++, result += RESULT_SPAN) { //_PixelOfNode node; unsigned int node_start; unsigned int prev; if ((cur == 0) || (qry_match_len < 1)) { // start at root of tree GOROOT(cur); qry_match_len = 1; mustmatch = 0; } char c = GETQCHAR(qrystart + qry_match_len); XPRINTF("In node ("fNID"): starting with %c [%d] => \n", NID(cur), c, qry_match_len); int refpos = 0; while ((c != '\0')) { XPRINTF("Next edge to follow: %c (%d)\n", c, qry_match_len); _PixelOfChildren children; children.data = GETCHILDRENHIST(cur, false); prev = cur; uchar3 next; switch (c) { case 'A': next = children.a; break; case 'C': next = children.c; break; case 'G': next = children.g; break; case 'T': next = children.t; break; default: next = make_uchar3(0,0,0); break; }; arrayToAddress(next, cur); XPRINTF(" In node: ("fNID")\n", NID(cur)); // No edge to follow out of the node if (cur == 0) { XPRINTF(" no edge\n"); SET_RESULT(prev, result, 0, qry_match_len, min_match_len, FORWARD); qry_match_len -= 1; mustmatch = 0; goto NEXT_SUBSTRING; } _PixelOfNode node; node.data = GETNODEHIST(cur, true); node_start = MKI(node.start); unsigned int node_end = MKI(node.end); XPRINTF(" Edge coordinates: %d - %d\n", node_start, node_end); { int edgelen = node_end - node_start + 1; int edge_matchlen = node_start + mustmatch; int past_node_end = node_end + 1; int dist_to_edge_end = mustmatch - edgelen; if (mustmatch) { refpos = min(edge_matchlen, past_node_end); qry_match_len += min(edgelen, mustmatch); mustmatch = max(dist_to_edge_end, 0); } else { // Try to walk the edge, the first char definitely matches qry_match_len++; refpos = node_start + 1; } } c = GETQCHAR(qrystart + qry_match_len); while (refpos <= node_end && c != '\0') { char r = GETRCHAR(refpos); XPRINTF(" Edge cmp ref: %d %c, qry: %d %c\n", refpos, r, qry_match_len, c); if (r != c) { // mismatch on edge XPRINTF("mismatch on edge: %d, edge_pos: %d\n", qry_match_len, refpos - (node_start)); goto RECORD_RESULT; } qry_match_len++; refpos++; c = GETQCHAR(qrystart + qry_match_len); } } // while(c) XPRINTF("end of string\n"); RECORD_RESULT: { //_PixelOfNode node; //node.data = getnodehist(cur, false); SET_RESULT(cur, result, refpos - node_start, qry_match_len, min_match_len, FORWARD); mustmatch = refpos - node_start; qry_match_len -= mustmatch + 1; } NEXT_SUBSTRING: { _PixelOfNode node; node.data = GETNODEHIST(prev, false); arrayToAddress(node.suffix, cur); } //XPRINTF(" following suffix link. mustmatch:%d qry_match_len:%d sl:("fNID")\n", // mustmatch, qry_match_len, NID(cur)); do {} while (0); } //for(int qrystart = 0; qrystart <= last; qrystart++, result += RESULT_SPAN) return; } /////////////////////////////////////// //// Compute reverse substring matches /////////////////////////////////////// __global__ void mummergpuRCKernel(MatchCoord* match_coords, char* queries, const int* queryAddrs, const int* queryLengths, const int numQueries, const int min_match_len) { return; } __global__ void printKernel(MatchInfo * matches, int totalMatches, Alignment * alignments, #if !QRYTEX #if COALESCED_QUERIES int * queries, #else char * queries, #endif #endif #if !NODETEX _PixelOfNode* nodes, #endif #if !CHILDTEX _PixelOfChildren* childrenarr, #endif const int * queryAddrs, const int * queryLengths, const int page_begin, const int page_end, const int page_shadow_left, const int page_shadow_right, const int min_match_length #if TREE_ACCESS_HISTOGRAM ,int* node_hist, int* child_hist #endif ) { int matchid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (matchid >= totalMatches) { return; } XPRINTF(">matchid: %d qry: %d\n", matchid, matches[matchid].queryid); int qryAddr = queryAddrs[matches[matchid].queryid]; SHIFT_QUERIES(queries, qryAddr); #if !QRYTEX XPRINTF("query: %s\n", queries); #endif char queryflankingbase = GETQCHAR(matches[matchid].qrystartpos); // Find the top node to start printing from unsigned int matchaddr = matches[matchid].matchnode.data; unsigned int cur = matchaddr; unsigned int printParent = cur; _PixelOfNode node; node.data = GETNODE(cur, true); XPRINTF("starting node: %d "fNID" depth: %d\n", matches[matchid].matchnode, NID(cur), MKI(node.depth)); while (MKI(node.depth) > min_match_length) { printParent = cur; arrayToAddress(node.parent, cur); node.data = GETNODE(cur, true); XPRINTF("par: "fNID" depth: %d\n", NID(cur), MKI(node.depth)); } // traverse the tree starting at printParent unsigned int badParent = cur; cur = printParent; XPRINTF(" printParent: "fNID"\n", NID(printParent)); char curchild = 'A'; bool forceToParent = false; node.data = GETNODE(printParent, true); int matchlen = MKI(node.depth) - 1; int depthToGoldenPath = 0; int matchnum = matches[matchid].resultsoffset; // If the printparent is the matchnode, then we are already off the golden path if (printParent == matchaddr) { if (matches[matchid].edgematch > 0) { node.data = GETNODE(badParent, true); matchlen = MKI(node.depth)-1+matches[matchid].edgematch; } depthToGoldenPath = 1; } // keep going until I hit the printParent's parent while (cur != badParent) { _PixelOfChildren children; children.data = GETCHILDREN(cur, true); char isLeaf = children.leafchar; XPRINTF(" cur: "fNID" curchild: %c isLeaf:%d forceToParent:%d\n", NID(cur), curchild, isLeaf, forceToParent); if (isLeaf || forceToParent) { // See if I am left maximal and print if (isLeaf && isLeaf != queryflankingbase) { int leafid = MKI(children.leafid); int left_in_ref = (leafid - 1) + page_begin; int right_in_ref = left_in_ref + matchlen; if ((left_in_ref != page_begin || page_shadow_left == -1) && (right_in_ref != page_end || page_shadow_right == -1)) { if (!(left_in_ref > page_begin && right_in_ref < page_shadow_left)) { //sprintf(buf, "%8d%10d%10d\n", left_in_ref, qrystartpos+1, matchlen); XPRINTF("%8d%10d%10d\n", left_in_ref, matches[matchid].qrystartpos+1, matchlen); alignments[matchnum].left_in_ref = left_in_ref; alignments[matchnum].matchlen = matchlen; matchnum++; } } } forceToParent = false; // now return to my parent and advance curchild node.data = GETNODE(cur, true); unsigned int myParent; arrayToAddress(node.parent, myParent); _PixelOfChildren pchildren; pchildren.data = GETCHILDREN(myParent, true); unsigned int pa, pc, pg, pt; arrayToAddress(pchildren.a, pa); arrayToAddress(pchildren.c, pc); arrayToAddress(pchildren.g, pg); arrayToAddress(pchildren.t, pt); if (pa == cur) { curchild = 'C'; } else if (pc == cur) { curchild = 'G'; } else if (pg == cur) { curchild = 'T'; } else if (pt == cur) { curchild = '$'; } else { // I must be the $ child, go up a level forceToParent = true; } cur = myParent; if (depthToGoldenPath) { depthToGoldenPath--; } if (depthToGoldenPath == 0) { node.data = GETNODE(cur, true); matchlen = MKI(node.depth)-1; } } else { // try to walk down the tree _PixelOfChildren children; children.data = GETCHILDREN(cur, true); char goldenChild = 0; if (depthToGoldenPath == 0) { // we are currently on the golden path // one of the children is also on the golden path goldenChild = GETQCHAR(matches[matchid].qrystartpos+matchlen+1); } do { if (curchild == 'A') { if (children.a.x || children.a.y || children.a.z) { XPRINTF(" -> A\n"); arrayToAddress(children.a, cur); break; } curchild = 'C'; } if (curchild == 'C') { if (children.c.x || children.c.y || children.c.z) { XPRINTF(" -> C\n"); arrayToAddress(children.c, cur); break; } curchild = 'G'; } if (curchild == 'G') { if (children.g.x || children.g.y || children.g.z) { XPRINTF(" -> G\n"); arrayToAddress(children.g, cur); break; } curchild = 'T'; } if (curchild == 'T') { if (children.t.x || children.t.y || children.t.z) { XPRINTF(" -> T\n"); arrayToAddress(children.t, cur); break; } curchild = '$'; } if (curchild == '$') { if (children.d.x || children.d.y || children.d.z) { XPRINTF(" -> $\n"); arrayToAddress(children.d, cur); break; } } // checked all of the children, go back to parent forceToParent = true; } while (0); if (!forceToParent) { if (depthToGoldenPath == 0) { if (curchild == goldenChild) { node.data = GETNODE(cur, true); matchlen = MKI(node.depth)-1; if (cur == matchaddr) { // we overextended the golden path depthToGoldenPath = 1; if (matches[matchid].edgematch > 0) { unsigned int par; arrayToAddress(node.parent, par); node.data = GETNODE(par, true); matchlen = MKI(node.depth) - 1 + matches[matchid].edgematch; } } } else { depthToGoldenPath = 1; } } else { depthToGoldenPath++; } curchild = 'A'; } } } } #endif // #ifndef _MUMMERGPU_HH_
external_source.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <utility> #include <memory> #include <list> #include "dali/pipeline/operator/builtin/external_source.h" namespace dali { template<> void ExternalSource<GPUBackend>::RunImpl(DeviceWorkspace &ws) { std::list<uptr_tl_type> tensor_list_elm; std::list<uptr_cuda_event_type> internal_copy_to_storage; ExternalSourceState state_info; { std::unique_lock<std::mutex> busy_lock(busy_m_); tensor_list_elm = tl_data_.PopFront(); state_info = state_.front(); state_.pop_front(); // even with no_copy we may have copied from TensorVector to TensorList and we // need to sync with that if (!no_copy_ || state_info.copied_shared_data) { internal_copy_to_storage = copy_to_storage_events_.PopFront(); } } auto &output = ws.Output<GPUBackend>(0); hipStream_t stream_used = ws.has_stream() ? ws.stream() : 0; if (!no_copy_ || state_info.copied_shared_data) { CUDA_CALL(hipStreamWaitEvent(stream_used, *internal_copy_to_storage.front(), 0)); } std::swap(output, *tensor_list_elm.front()); if (!no_copy_ || state_info.copied_shared_data) { RecycleBuffer(tensor_list_elm, &internal_copy_to_storage); } else { RecycleBuffer(tensor_list_elm); } } DALI_REGISTER_OPERATOR(_ExternalSource, ExternalSource<GPUBackend>, GPU); DALI_REGISTER_OPERATOR(ExternalSource, ExternalSource<GPUBackend>, GPU); } // namespace dali
external_source.cu
// Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <utility> #include <memory> #include <list> #include "dali/pipeline/operator/builtin/external_source.h" namespace dali { template<> void ExternalSource<GPUBackend>::RunImpl(DeviceWorkspace &ws) { std::list<uptr_tl_type> tensor_list_elm; std::list<uptr_cuda_event_type> internal_copy_to_storage; ExternalSourceState state_info; { std::unique_lock<std::mutex> busy_lock(busy_m_); tensor_list_elm = tl_data_.PopFront(); state_info = state_.front(); state_.pop_front(); // even with no_copy we may have copied from TensorVector to TensorList and we // need to sync with that if (!no_copy_ || state_info.copied_shared_data) { internal_copy_to_storage = copy_to_storage_events_.PopFront(); } } auto &output = ws.Output<GPUBackend>(0); cudaStream_t stream_used = ws.has_stream() ? ws.stream() : 0; if (!no_copy_ || state_info.copied_shared_data) { CUDA_CALL(cudaStreamWaitEvent(stream_used, *internal_copy_to_storage.front(), 0)); } std::swap(output, *tensor_list_elm.front()); if (!no_copy_ || state_info.copied_shared_data) { RecycleBuffer(tensor_list_elm, &internal_copy_to_storage); } else { RecycleBuffer(tensor_list_elm); } } DALI_REGISTER_OPERATOR(_ExternalSource, ExternalSource<GPUBackend>, GPU); DALI_REGISTER_OPERATOR(ExternalSource, ExternalSource<GPUBackend>, GPU); } // namespace dali
1ff467c555dd17ad08d07a008962d15bda6f9ab2.hip
// !!! This is a file automatically generated by hipify!!! //quotientFilter.cu /* * Copyright 2021 Regents of the University of California * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <limits.h> #include <assert.h> #include <hip/hip_runtime_api.h> #include "sqf.cuh" #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/scan.h> #include <thrust/unique.h> #include "hipcub/hipcub.hpp" #include "../moderngpu/src/moderngpu/kernel_merge.hxx" namespace sqf_filter { #ifndef LOW_BIT_MASK #define LOW_BIT_MASK(n) ((1U << n) - 1U) #endif #ifndef NOT_FOUND #define NOT_FOUND UINT_MAX #endif __device__ __host__ size_t calcNumSlotsGPU(unsigned int q, unsigned int r) { size_t tableBits = (1 << q) * (r + 3); size_t tableSlots = tableBits / 8; return tableSlots * 1.1; //allow an extra 10% for overflow } __host__ void initFilterGPU(struct quotient_filter* qf, unsigned int q, unsigned int r) { assert((q + r) <= 32); //need to be able to store fingerprints in unsigned int assert(((r + 3) % 8) == 0); //slot size is one or more full bytes qf->qbits = q; qf->rbits = r; qf->bytesPerElement = (r + 3) / 8; size_t slots = calcNumSlotsGPU(q, r); unsigned char* d_filterTable; hipMalloc((void**) &d_filterTable, slots * sizeof(unsigned char)); qf->table = d_filterTable; printf("SQF using %llu bytes\n",slots*sizeof(unsigned char)); } __device__ bool isOccupiedGPU(unsigned int element) { return element & 4; } __device__ bool isContinuationGPU(unsigned int element) { return element & 2; } __device__ bool isShiftedGPU(unsigned int element) { return element & 1; } __device__ bool isEmptyGPU(unsigned int element) { return ((element & 7) == 0); } __device__ unsigned int setOccupiedGPU(unsigned int element) { return element | 4; } __device__ unsigned int clearOccupiedGPU(unsigned int element) { return element & ~4; } __device__ unsigned int setContinuationGPU(unsigned int element) { return element | 2; } __device__ unsigned int clearContinuationGPU(unsigned int element) { return element & ~2; } __device__ unsigned int setShiftedGPU(unsigned int element) { return element | 1; } __device__ unsigned int clearShiftedGPU(unsigned int element) { return element & ~1; } __device__ __host__ unsigned int getRemainderGPU(unsigned int element) { return element >> 3; } __device__ unsigned int isolateOccupiedBit(unsigned int element) { return element & 4; } __device__ __host__ unsigned int FNVhashGPU(unsigned int value, unsigned int tableSize) { unsigned char p[4]; p[0] = (value >> 24) & 0xFF; p[1] = (value >> 16) & 0xFF; p[2] = (value >> 8) & 0xFF; p[3] = value & 0xFF; unsigned int h = 2166136261; for (int i = 0; i < 4; i++){ h = (h * 16777619) ^ p[i]; } return h % tableSize; } __device__ __host__ unsigned int Normal_APHash(unsigned int value, unsigned int tableSize) { unsigned char p[4]; p[0] = (value >> 24) & 0xFF; p[1] = (value >> 16) & 0xFF; p[2] = (value >> 8) & 0xFF; p[3] = value & 0xFF; unsigned int hash = 0xAAAAAAAA; for (int i = 0; i < 4; i++){ hash ^= ((i & 1) == 0) ? ((hash << 7) ^ p[i] ^ (hash >> 3)) : (~((hash << 11) ^ p[i] ^ (hash >> 5))); } return hash % tableSize; } __device__ __host__ unsigned int getElementGPU(struct quotient_filter* qf, unsigned int index) { unsigned int startSlot = index * qf->bytesPerElement; unsigned int element = qf->table[startSlot]; for (int i = 1; i < qf->bytesPerElement; i++){ element = (element << 8) | qf->table[startSlot + i]; } return element; } __device__ void setElementGPU(struct quotient_filter* qf, unsigned int index, unsigned int value) { unsigned int startSlot = index * qf->bytesPerElement; for (int i = 0; i < qf->bytesPerElement; i++){ unsigned int shift = qf->bytesPerElement - 1 - i; qf->table[startSlot + i] = (value >> (8 * shift)) & LOW_BIT_MASK(8); } } __device__ unsigned int findRunStartGPU(struct quotient_filter* qf, unsigned int fq) { unsigned int numElements = (1 << qf->qbits) * 1.1; //start bucket is fq unsigned int b = fq; //find beginning of cluster: while(isShiftedGPU(getElementGPU(qf, b))){ b--; } //find start of run we're interested in: //slot counter starts at beginning of cluster unsigned int s = b; while(b != fq){ do{ s++; }while((isContinuationGPU(getElementGPU(qf, s))) && (s < numElements)); //find end of current run do{ b++; }while((!isOccupiedGPU(getElementGPU(qf, b))) && (b < numElements)); //count number of runs passed } //now s is first value in correct run return s; } __device__ void insertItemHereGPU(struct quotient_filter* qf, unsigned int index, unsigned int value) { unsigned int previousElement; unsigned int newElement = value; bool empty = false; while(!empty){ previousElement = getElementGPU(qf, index); empty = isEmptyGPU(previousElement); previousElement = setShiftedGPU(previousElement); if(isOccupiedGPU(previousElement)){ //Need to preserve correct is_occupied bits previousElement = clearOccupiedGPU(previousElement); newElement = setOccupiedGPU(newElement); } setElementGPU(qf, index, newElement); newElement = previousElement; index++; } } __global__ void lookUp(int numItems, struct quotient_filter qfilter, unsigned int* hashValues, unsigned int* slotValues) { //returns NOT_FOUND (UINT_MAX) in slotValues[idx] if value is not in the filter, and returns the location of the remainder if it is in the filter unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; unsigned int hashValue = hashValues[idx]; //separate into quotient and remainder unsigned int fq = (hashValue >> qfilter.rbits) & LOW_BIT_MASK(qfilter.qbits); unsigned int fr = hashValue & LOW_BIT_MASK(qfilter.rbits); unsigned int element = getElementGPU(&qfilter, fq); if(!isOccupiedGPU(element)){ slotValues[idx] = NOT_FOUND; return; } unsigned int s = findRunStartGPU(&qfilter, fq); //search through elements in run do{ unsigned int remainder = getRemainderGPU(getElementGPU(&qfilter, s)); if(remainder == fr){ slotValues[idx] = s; return; } else if(remainder > fr){ slotValues[idx] = NOT_FOUND; return; } s++; }while(isContinuationGPU(getElementGPU(&qfilter, s))); slotValues[idx] = NOT_FOUND; } __global__ void hashInputs(int numItems, quotient_filter qfilter, unsigned int* insertValues, unsigned int* fingerprints) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; //hash values to get fingerprints // unsigned int hashValue = FNVhashGPU(insertValues[idx], (1 << (qfilter.qbits + qfilter.rbits))); unsigned int hashValue = Normal_APHash(insertValues[idx], (1 << (qfilter.qbits + qfilter.rbits))); fingerprints[idx] = hashValue; } __host__ float launchSortedLookups(quotient_filter qfilter, int numValues, unsigned int* d_lookupValues, unsigned int* d_returnValuesArray) { //Allocate array for hash values thrust::device_vector<unsigned int> d_hashValues(numValues); thrust::fill(d_hashValues.begin(), d_hashValues.end(), 0); unsigned int* d_hashValuesArray = thrust::raw_pointer_cast(&d_hashValues[0]); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); //Hash input values int numBlocks = (numValues + 127) / 128; dim3 hashBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( hashInputs), dim3(hashBlockDims), dim3(128), 0, 0, numValues, qfilter, d_lookupValues, d_hashValuesArray); //Create index array to track inputs -> outputs thrust::device_vector<unsigned int> d_indices(numValues); thrust::fill(d_indices.begin(), d_indices.end(), 1); thrust::exclusive_scan(d_indices.begin(), d_indices.end(), d_indices.begin(), 0); //Sort by fingerprint thrust::sort_by_key(d_hashValues.begin(), d_hashValues.end(), d_indices.begin()); //Launch lookup kernel numBlocks = (numValues + 1023) / 1024; dim3 blockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( lookUp), dim3(blockDims), dim3(1024), 0, 0, numValues, qfilter, d_hashValuesArray, d_returnValuesArray); //Sort outputs thrust::device_ptr<unsigned int> d_returnValues(d_returnValuesArray); thrust::sort_by_key(d_indices.begin(), d_indices.end(), d_returnValues); hipEventRecord(stop); //Calculate timing results hipEventSynchronize(stop); float lookupTime = 0; hipEventElapsedTime(&lookupTime, start, stop); //Free Memory d_hashValues.~device_vector<unsigned int>(); hipEventDestroy(start); hipEventDestroy(stop); return lookupTime; } __global__ void hashAndLookUp(int numItems, struct quotient_filter qfilter, unsigned int* lookupValues, unsigned int* slotValues) { //returns NOT_FOUND (UINT_MAX) in slotValues[idx] if value is not in the filter, and returns the location of the remainder if it is in the filter unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; unsigned int hashValue = Normal_APHash(lookupValues[idx], (1 << (qfilter.qbits + qfilter.rbits))); //separate into quotient and remainder unsigned int fq = (hashValue >> qfilter.rbits) & LOW_BIT_MASK(qfilter.qbits); unsigned int fr = hashValue & LOW_BIT_MASK(qfilter.rbits); unsigned int element = getElementGPU(&qfilter, fq); if(!isOccupiedGPU(element)){ slotValues[idx] = NOT_FOUND; return; } unsigned int s = findRunStartGPU(&qfilter, fq); //search through elements in run do{ unsigned int remainder = getRemainderGPU(getElementGPU(&qfilter, s)); if(remainder == fr){ slotValues[idx] = s; return; } else if(remainder > fr){ slotValues[idx] = NOT_FOUND; return; } s++; }while(isContinuationGPU(getElementGPU(&qfilter, s))); slotValues[idx] = NOT_FOUND; } __host__ float launchUnsortedLookups(quotient_filter qfilter, int numValues, unsigned int* d_lookupValues, unsigned int* d_returnValuesArray) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); //Launch lookup kernel int numBlocks = (numValues + 1023) / 1024; dim3 blockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( hashAndLookUp), dim3(blockDims), dim3(1024), 0, 0, numValues, qfilter, d_lookupValues, d_returnValuesArray); hipEventRecord(stop); //Calculate timing results hipEventSynchronize(stop); float lookupTime = 0; hipEventElapsedTime(&lookupTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); return lookupTime; } __host__ void printQuotientFilterGPU(struct quotient_filter* qf) { unsigned char* h_filterTable = new unsigned char[calcNumSlotsGPU(qf->qbits, qf->rbits)]; hipMemcpy(h_filterTable, qf->table, calcNumSlotsGPU(qf->qbits, qf->rbits) * sizeof(unsigned char), hipMemcpyDeviceToHost); unsigned char* d_filterTable = qf->table; qf->table = h_filterTable; int filterSize = (1 << qf->qbits) * 1.1; printf("Printing metadata and remainders:\n"); for(int i = 0; i < filterSize/10; i++){ for(int j = 0; j < 10; j++){ int element = getElementGPU(qf, 10*i + j); printf("%u \t", element & 7); } printf("\n"); for(int j = 0; j < 10; j++){ int element = getElementGPU(qf, 10*i + j); printf("%u \t", getRemainderGPU(element)); } printf("\n --------------------------------------------------------------------- \n"); } printf("\n"); qf->table = d_filterTable; } __global__ void locateInsertSuperclusters(int numItems, quotient_filter qfilter, unsigned int* superclusterStarts) { //marks the beginning of each supercluster by looking for empty slots unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; superclusterStarts[idx] = 0; if(idx == 0) return; if(isEmptyGPU(getElementGPU(&qfilter, idx - 1))){ superclusterStarts[idx] = 1; } } __global__ void superclusterBidding(int numItems, quotient_filter qfilter, unsigned int* insertValues, unsigned int* superclusterIDs, bool* insertFlags, unsigned int* slotWinners) { //Outputs an array with one value per supercluster. These values can be inserted in parallel without collisions. unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; //initialize insert flags insertFlags[idx] = 1; if(insertValues[idx] == NOT_FOUND){ return; } //calculate fingerprint // unsigned int hashValue = FNVhashGPU(insertValues[idx], (1 << (qfilter.qbits + qfilter.rbits))); unsigned int hashValue = Normal_APHash(insertValues[idx], (1 << (qfilter.qbits + qfilter.rbits))); //separate out the quotient/canonical slot bits unsigned int fq = (hashValue >> qfilter.rbits) & LOW_BIT_MASK(qfilter.qbits); //determine which supercluster the item belongs in unsigned int superclusterNumber = superclusterIDs[fq]; //write the item's index to the supercluster slot to bid for insert slotWinners[superclusterNumber] = idx; } __global__ void insertItemGPU(int numItems, quotient_filter qfilter, unsigned int* insertValues, unsigned int* winnerIndices, unsigned int* finalLocationValues, bool* insertFlags) { //inserts items into the filter, returning their slot locations in slotValues[idx] //if the item is already in the filter, it still returns the item location, although no changes are made unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; //check that there is an item to insert for this supercluster if(winnerIndices[idx] == NOT_FOUND){ finalLocationValues[idx] = NOT_FOUND; return; } //determine which value is being added to the QF unsigned int originalIndex = winnerIndices[idx]; //reset winnerIndices for next bidding round winnerIndices[idx] = NOT_FOUND; insertFlags[originalIndex] = 0; //want to remove this item from insert queue unsigned int value = insertValues[originalIndex]; //calculate fingerprint // unsigned int hashValue = FNVhashGPU(value, (1 << (qfilter.qbits + qfilter.rbits))); unsigned int hashValue = Normal_APHash(value, (1 << (qfilter.qbits + qfilter.rbits))); //separate into quotient and remainder unsigned int fq = (hashValue >> qfilter.rbits) & LOW_BIT_MASK(qfilter.qbits); unsigned int fr = hashValue & LOW_BIT_MASK(qfilter.rbits); unsigned int canonElement = getElementGPU(&qfilter, fq); unsigned int newElement = fr << 3; if(isEmptyGPU(canonElement)){ setElementGPU(&qfilter, fq, setOccupiedGPU(newElement)); finalLocationValues[idx] = fq; return; } if(!isOccupiedGPU(canonElement)){ //set is_occupied to show that there is now a run for this slot setElementGPU(&qfilter, fq, setOccupiedGPU(canonElement)); } //Find beginning of item's run unsigned int runStart = findRunStartGPU(&qfilter, fq); unsigned int s = runStart; if(isOccupiedGPU(canonElement)){ //If slot already has a run, search through its elements. do{ unsigned int remainder = getRemainderGPU(getElementGPU(&qfilter, s)); if(remainder == fr){ //the item is already in the filter finalLocationValues[idx] = s; return; } else if(remainder > fr){ //s now points to where item goes break; } s++; }while(isContinuationGPU(getElementGPU(&qfilter, s))); if(s == runStart){ //The new element is now the start of the run, but we must move old start over, so it will be continuation unsigned int oldStartElement = getElementGPU(&qfilter, runStart); setElementGPU(&qfilter, runStart, setContinuationGPU(oldStartElement)); } else{ //New element is not the start, so set its continuation bit newElement = setContinuationGPU(newElement); } } if(s != fq){ //If it's not being inserted into the canonical slot, the element is shifted. newElement = setShiftedGPU(newElement); } insertItemHereGPU(&qfilter, s, newElement); finalLocationValues[idx] = s; return; } __host__ float insert(quotient_filter qfilter, int numValues, unsigned int* d_insertValues) { int filterSize = (1 << qfilter.qbits) * 1.1; //number of (r + 3)-bit slots in the filter //Allocate all necessary memory for inserts int* h_numItemsLeft = new int[1]; //counts number of elements in insert queue h_numItemsLeft[0] = numValues; int* d_numItemsLeft; hipMalloc((void**) &d_numItemsLeft, sizeof(int)); unsigned int* d_superclusterIndicators; //stores bits marking beginning of superclusters hipMalloc((void**) &d_superclusterIndicators, filterSize * sizeof(unsigned int)); //Variables for CUB function temporary storage void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; unsigned int* d_superclusterLabels = NULL; //labels each slot with its supercluster number hipMalloc((void**) &d_superclusterLabels, filterSize * sizeof(unsigned int)); int* h_lastSuperclusterLabel = new int[1]; int maxNumSuperclusters = calcNumSlotsGPU(qfilter.qbits, qfilter.rbits) + 1; unsigned int* d_slotWinners; hipMalloc((void**) &d_slotWinners, maxNumSuperclusters * sizeof(unsigned int)); unsigned int* h_slotWinners = new unsigned int[maxNumSuperclusters]; for(int i = 0; i < maxNumSuperclusters; i++){ h_slotWinners[i] = NOT_FOUND; } hipMemcpy(d_slotWinners, h_slotWinners, maxNumSuperclusters * sizeof(unsigned int), hipMemcpyHostToDevice); unsigned int* d_insertLocations; //Output for actual locations where items are inserted hipMalloc((void**) &d_insertLocations, maxNumSuperclusters * sizeof(unsigned int)); bool* d_insertFlags; //Flags for removing items from insert queue hipMalloc((void**) &d_insertFlags, numValues * sizeof(bool)); unsigned int* d_insertItemsQueue; hipMalloc((void**) &d_insertItemsQueue, numValues * sizeof(unsigned int)); hipMemcpy(d_insertItemsQueue, d_insertValues, numValues * sizeof(unsigned int), hipMemcpyDeviceToDevice); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //hipProfilerStart(); hipEventRecord(start); do{ //TODO: could consider marking superclusters from previous rounds with no items to insert so that we don't continue to launch threads for these superclusters to do no work //Find supercluster array: int numBlocks = (filterSize + 1023) / 1024; dim3 SCBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( locateInsertSuperclusters), dim3(SCBlockDims), dim3(1024), 0, 0, filterSize, qfilter, d_superclusterIndicators); //CUB Inclusive Prefix Sum d_temp_storage = NULL; temp_storage_bytes = 0; CubDebugExit(hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_superclusterIndicators, d_superclusterLabels, filterSize)); hipMalloc(&d_temp_storage, temp_storage_bytes); CubDebugExit(hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_superclusterIndicators, d_superclusterLabels, filterSize)); //Determine how many superclusters there are hipMemcpy(h_lastSuperclusterLabel, d_superclusterLabels + (filterSize - 1), sizeof(unsigned int), hipMemcpyDeviceToHost); int numSuperclusters = h_lastSuperclusterLabel[0] + 1; //Pick one element per supercluster to insert numBlocks = (h_numItemsLeft[0] + 127) / 128; dim3 biddingBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( superclusterBidding), dim3(biddingBlockDims), dim3(128), 0, 0, h_numItemsLeft[0], qfilter, d_insertItemsQueue, d_superclusterLabels, d_insertFlags, d_slotWinners); //Insert items into QF numBlocks = (numSuperclusters + 255) / 256; dim3 insertBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( insertItemGPU), dim3(insertBlockDims), dim3(256), 0, 0, numSuperclusters, qfilter, d_insertItemsQueue, d_slotWinners, d_insertLocations, d_insertFlags); //Remove successfully inserted items from the queue d_temp_storage = NULL; temp_storage_bytes = 0; CubDebugExit(hipcub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_insertItemsQueue, d_insertFlags, d_insertItemsQueue, d_numItemsLeft, h_numItemsLeft[0])); hipMalloc(&d_temp_storage, temp_storage_bytes); CubDebugExit(hipcub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_insertItemsQueue, d_insertFlags, d_insertItemsQueue, d_numItemsLeft, h_numItemsLeft[0])); hipMemcpy(h_numItemsLeft, d_numItemsLeft, sizeof(int), hipMemcpyDeviceToHost); }while(h_numItemsLeft[0] > 0); hipEventRecord(stop); //hipProfilerStop(); //Calculate timing results hipEventSynchronize(stop); float insertTime = 0; hipEventElapsedTime(&insertTime, start, stop); //Free memory delete[] h_numItemsLeft; hipFree(d_numItemsLeft); hipFree(d_superclusterIndicators); hipFree(d_temp_storage); hipFree(d_superclusterLabels); hipFree(d_slotWinners); delete[] h_slotWinners; hipFree(d_insertLocations); hipFree(d_insertFlags); hipFree(d_insertItemsQueue); hipEventDestroy(start); hipEventDestroy(stop); return insertTime; } __global__ void quotienting(int numItems, unsigned int qbits, unsigned int rbits, unsigned int* quotients, unsigned int* remainders) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; //return quotients and remainders unsigned int hashValue = quotients[idx]; //quotients array initially stores the fingerprint values unsigned int canonicalSlot = (hashValue >> rbits) & LOW_BIT_MASK(qbits); quotients[idx] = canonicalSlot; unsigned int remainderBits = hashValue & LOW_BIT_MASK(rbits); remainders[idx] = remainderBits; } __global__ void findSegmentHeads(int numItems, unsigned int* quotients, unsigned int* segStarts) { //locate the beginnings of segments unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; if(idx != 0){ if(quotients[idx] != quotients[idx - 1]){ segStarts[idx] = 1; } } } __global__ void calcOffsets(int numItems, unsigned int* locations, unsigned int* segLabels, int* offsets, int* credits, int* creditCarryover) { //compute the shift/credits for a group of elements when merging their segments unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems || idx ==0) return; unsigned int segmentIdx = segLabels[idx]; if((segmentIdx != segLabels[idx - 1]) && (segmentIdx % 2 == 1)){ offsets[segmentIdx] = locations[idx - 1] - locations[idx] + 1; creditCarryover[segmentIdx] = credits[idx - 1]; } } __global__ void shiftElements(int numItems, int* offsets, int* credits, unsigned int* locations, int* creditCarryover, unsigned int* segLabels) { //calculate the shifts for merging segments unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems || idx == 0) return; unsigned int segmentIdx = segLabels[idx]; int netShift = offsets[segmentIdx] - credits[idx]; int newCredits = 0; if(netShift > 0){ //merging the segments causes the items to shift. locations[idx] += netShift; newCredits = 0; } else{ //there are extra slots between segments. Track these with credits. newCredits = -netShift; } credits[idx] = newCredits + creditCarryover[segmentIdx]; segLabels[idx] /= 2; } __global__ void setMetadata(int numItems, unsigned int* remainders, unsigned int* quotients, unsigned int* locations) { //set is_continuation and is_shifted bits for each item in the filter unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; unsigned int element = remainders[idx]; element = element << 3; //shift remainder left to make room for metadata bits //is_continuation: check if quotient[i-1] = quotient[i] (this is already stored in segStarts) if(idx != 0 && quotients[idx] == quotients[idx - 1]) element = setContinuationGPU(element); //is_shifted: if location > quotient if(locations[idx] != quotients[idx]) element = setShiftedGPU(element); remainders[idx] = element; } __global__ void writeRemainders(int numItems, quotient_filter qfilter, unsigned int* remainders, unsigned int* locations) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; setElementGPU(&qfilter, locations[idx], remainders[idx]); } __global__ void setOccupiedBits(int numItems, quotient_filter qfilter, unsigned int* quotients) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; unsigned int element = getElementGPU(&qfilter, quotients[idx]); setElementGPU(&qfilter, quotients[idx], setOccupiedGPU(element)); } __host__ float bulkBuildParallelMerging(quotient_filter qfilter, int numValues, unsigned int* d_insertValues, bool NoDuplicates) { //build a quotient filter by inserting all (or a large batch of) items all at once /* 1. Compute all fingerprints 2. Sort list of fingerprints 3. Quotienting 4. Segmented scan of array of 1's 5. Add to quotients 6. Create array of credits[number items], initialized to 0 7. "Associative scan" with saturating operator to find end positions 8. Write values to filter */ //Memory Allocation thrust::device_vector<unsigned int> d_quotients(numValues); thrust::fill(d_quotients.begin(), d_quotients.end(), 0); unsigned int* d_quotientsArray = thrust::raw_pointer_cast(&d_quotients[0]); thrust::device_vector<unsigned int> d_locations(numValues); thrust::fill_n(d_locations.begin(), numValues, 1); unsigned int* d_locationsArray = thrust::raw_pointer_cast(&d_locations[0]); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //hipProfilerStart(); hipEventRecord(start); //Hash input values int numBlocks = (numValues + 127) / 128; dim3 hashBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( hashInputs), dim3(hashBlockDims), dim3(128), 0, 0, numValues, qfilter, d_insertValues, d_quotientsArray); //store fingerprints in quotients array //Sort by fingerprint thrust::sort(d_quotients.begin(), d_quotients.end()); //Remove duplicates, if desired if(NoDuplicates == true){ thrust::detail::normal_iterator< thrust::device_ptr<unsigned int> > fingerprintEnd = thrust::unique(d_quotients.begin(), d_quotients.end()); d_quotients.erase(fingerprintEnd, d_quotients.end()); numValues = d_quotients.end() - d_quotients.begin(); } //Divide fingerprints into quotients and remainders unsigned int* d_remaindersArray; hipMalloc((void**) &d_remaindersArray, numValues * sizeof(unsigned int)); hipMemset(d_remaindersArray, 0, numValues * sizeof(unsigned int)); numBlocks = (numValues + 767) / 768; dim3 quotientingBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( quotienting), dim3(quotientingBlockDims), dim3(768), 0, 0, numValues, qfilter.qbits, qfilter.rbits, d_quotientsArray, d_remaindersArray); //Segmented scan of array of 1's thrust::exclusive_scan_by_key(d_quotients.begin(), d_quotients.end(), d_locations.begin(), d_locations.begin()); //Add scanned values to quotients to find initial locations before shifts thrust::transform(d_quotients.begin(), d_quotients.end(), d_locations.begin(), d_locations.begin(), thrust::plus<unsigned int>()); //Associative scans: //1. Each quotient is a segment //2. Pair up segments //3. offset = L_tail - R_head + 1 //4. net shift = offset - credit //5. for each element: //if (net shift > 0): shift = net shift; credit = 0 //if (net shift < 0): shift = 0; credit = -net shift //segmentLabel = segmentLabel/2 thrust::device_vector<unsigned int> d_segStarts(numValues); thrust::fill(d_segStarts.begin(), d_segStarts.end(), 0); unsigned int* d_segStartsArray = thrust::raw_pointer_cast(&d_segStarts[0]); thrust::device_vector<unsigned int> d_segLabels(numValues); unsigned int* d_segLabelsArray = thrust::raw_pointer_cast(&d_segLabels[0]); //Label segments for grouping items numBlocks = (numValues + 767) / 768; dim3 findSegHeadsBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( findSegmentHeads), dim3(findSegHeadsBlockDims), dim3(768), 0, 0, numValues, d_quotientsArray, d_segStartsArray); thrust::inclusive_scan(d_segStarts.begin(), d_segStarts.end(), d_segLabels.begin()); d_segStarts.~device_vector<unsigned int>(); //Join segments, calculating shifts along the way int numSegments = d_segLabels[numValues - 1] + 1; int numLoops = (int) ceil(log2((float)numSegments)); int* d_offsets; hipMalloc((void**) &d_offsets, numSegments * sizeof(int)); int* d_creditCarryover; hipMalloc((void**) &d_creditCarryover, numSegments * sizeof(int)); int* d_credits; hipMalloc((void**) &d_credits, numValues * sizeof(int)); hipMemset(d_credits, 0, numValues * sizeof(int)); for(int i = 0; i < numLoops; i++){ hipMemset(d_offsets, 0, numSegments * sizeof(int)); hipMemset(d_creditCarryover, 0, numSegments * sizeof(int)); //Calculate offsets between segments numBlocks = (numValues + 255) / 256; dim3 findSegHeadsBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( calcOffsets), dim3(findSegHeadsBlockDims), dim3(256), 0, 0, numValues, d_locationsArray, d_segLabelsArray, d_offsets, d_credits, d_creditCarryover); //Calculate the shifts/credits for each item in this round of merging //Relabel segments so that pairs have now merged numBlocks = (numValues + 767) / 768; dim3 shiftElementsBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( shiftElements), dim3(shiftElementsBlockDims), dim3(768), 0, 0, numValues, d_offsets, d_credits, d_locationsArray, d_creditCarryover, d_segLabelsArray); } //Shift the remainder values to left to make room for metadata //Then determine metadata bits and set them numBlocks = (numValues + 1023) / 1024; dim3 setMetadataBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( setMetadata), dim3(setMetadataBlockDims), dim3(1024), 0, 0, numValues, d_remaindersArray, d_quotientsArray, d_locationsArray); //Scatter remainder values to the filter numBlocks = (numValues + 1023) / 1024; dim3 writeRemaindersBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( writeRemainders), dim3(writeRemaindersBlockDims), dim3(1024), 0, 0, numValues, qfilter, d_remaindersArray, d_locationsArray); //Set the is_occupied bits numBlocks = (numValues + 511) / 512; dim3 setOccupiedBitsBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( setOccupiedBits), dim3(setOccupiedBitsBlockDims), dim3(512), 0, 0, numValues, qfilter, d_quotientsArray); //Calculate and print timing results hipEventRecord(stop); //hipProfilerStop(); hipEventSynchronize(stop); float filterBuildTime = 0; hipEventElapsedTime(&filterBuildTime, start, stop); //Free memory d_quotients.~device_vector<unsigned int>(); hipFree(d_remaindersArray); d_locations.~device_vector<unsigned int>(); d_segLabels.~device_vector<unsigned int>(); hipFree(d_credits); hipFree(d_offsets); hipFree(d_creditCarryover); hipEventDestroy(start); hipEventDestroy(stop); return filterBuildTime; } __global__ void segmentStartLocations(int numItems, unsigned int* segLabels, unsigned int* segStartLocations) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; if(idx == 0){ segStartLocations[0] = 0; return; } if(segLabels[idx] == segLabels[idx-1]) return; segStartLocations[segLabels[idx]] = idx; } __global__ void shiftSegments(int numItems, unsigned int* segStartLocations, unsigned int* locations, int numValues, bool* changesMade) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems || idx ==0) return; int arrayIndex = segStartLocations[idx]; int shift = locations[arrayIndex-1] - locations[arrayIndex] + 1; if(shift > 0){ int segmentLength; if(idx == (numItems - 1)){ segmentLength = numValues - segStartLocations[idx]; } else{ segmentLength = segStartLocations[idx+1] - segStartLocations[idx]; } for(int i = 0; i < segmentLength; i++){ locations[arrayIndex + i] += shift; } changesMade[0] = 1; } } void printArray(int numValues, int* array) { for(int i = 0; i < numValues/10; i++){ for(int j = 0; j < 10; j++){ printf("%i\t", array[i*10 + j]); } printf("\n"); } for(int i = 0; i < numValues % 10; i++){ printf("%i\t", array[((numValues/10)*10) + i]); } printf("\n"); } __host__ float bulkBuildSequentialShifts(quotient_filter qfilter, int numValues, unsigned int* d_insertValues, bool NoDuplicates) { //build a quotient filter by inserting all (or a large batch of) items all at once //compute locations by shifting one run at a time //exit when shifting stops /* 1. Compute all quotients & remainders List of pairs of (fq, fr) 2. Sort list by fq, then by fr within groups of same fq (or maybe in reverse order?) 3. Segmented scan of array of 1's 4. Add to quotients 5. Iterate: a. Compute shift at every boundary between runs and shift all items in run if needed b. Write to Bool to indicate shift happened b. Check if a shift happened; if not, done! 6. Write values to filter */ //Memory Allocation thrust::device_vector<unsigned int> d_quotients(numValues); thrust::fill(d_quotients.begin(), d_quotients.end(), 0); unsigned int* d_quotientsArray = thrust::raw_pointer_cast(&d_quotients[0]); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); //Hash input values int numBlocks = (numValues + 127) / 128; dim3 hashBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( hashInputs), dim3(hashBlockDims), dim3(128), 0, 0, numValues, qfilter, d_insertValues, d_quotientsArray); //store fingerprints in quotients array //Sort by fingerprint thrust::sort(d_quotients.begin(), d_quotients.end()); //Remove duplicates, if desired if(NoDuplicates == true){ thrust::detail::normal_iterator< thrust::device_ptr<unsigned int> > fingerprintEnd = thrust::unique(d_quotients.begin(), d_quotients.end()); d_quotients.erase(fingerprintEnd, d_quotients.end()); numValues = d_quotients.end() - d_quotients.begin(); } //Divide fingerprints into quotients and remainders unsigned int* d_remaindersArray; hipMalloc((void**) &d_remaindersArray, numValues * sizeof(unsigned int)); hipMemset(d_remaindersArray, 0, numValues * sizeof(unsigned int)); thrust::device_vector<unsigned int> d_locations(numValues); thrust::fill_n(d_locations.begin(), numValues, 1); unsigned int* d_locationsArray = thrust::raw_pointer_cast(&d_locations[0]); numBlocks = (numValues + 767) / 768; dim3 quotientingBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( quotienting), dim3(quotientingBlockDims), dim3(768), 0, 0, numValues, qfilter.qbits, qfilter.rbits, d_quotientsArray, d_remaindersArray); //Segmented scan of array of 1's thrust::exclusive_scan_by_key(d_quotients.begin(), d_quotients.end(), d_locations.begin(), d_locations.begin()); //Add scanned values to quotients to find initial locations before shifts thrust::transform(d_quotients.begin(), d_quotients.end(), d_locations.begin(), d_locations.begin(), thrust::plus<unsigned int>()); //Label segments for grouping items thrust::device_vector<unsigned int> d_segStarts(numValues); thrust::fill(d_segStarts.begin(), d_segStarts.end(), 0); unsigned int* d_segStartsArray = thrust::raw_pointer_cast(&d_segStarts[0]); thrust::device_vector<unsigned int> d_segLabels(numValues); unsigned int* d_segLabelsArray = thrust::raw_pointer_cast(&d_segLabels[0]); numBlocks = (numValues + 767) / 768; dim3 findSegHeadsBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( findSegmentHeads), dim3(findSegHeadsBlockDims), dim3(768), 0, 0, numValues, d_quotientsArray, d_segStartsArray); thrust::inclusive_scan(d_segStarts.begin(), d_segStarts.end(), d_segLabels.begin()); d_segStarts.~device_vector<unsigned int>(); int numSegments = d_segLabels[numValues - 1] + 1; unsigned int* d_segStartLocations; hipMalloc((void**) &d_segStartLocations, numSegments * sizeof(unsigned int)); //Create array with the location of first item in each run numBlocks = (numValues + 1023) / 1024; dim3 segStartLocationsBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( segmentStartLocations), dim3(findSegHeadsBlockDims), dim3(1024), 0, 0, numValues, d_segLabelsArray, d_segStartLocations); bool* h_changesMade = new bool[1]; h_changesMade[0] = 1; bool* d_changesMade; hipMalloc((void**) &d_changesMade, sizeof(bool)); hipMemset(d_changesMade, 0, sizeof(bool)); while(h_changesMade[0] == 1){ h_changesMade[0] = 0; numBlocks = (numSegments + 191) / 192; dim3 shiftSegsBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( shiftSegments), dim3(shiftSegsBlockDims), dim3(192), 0, 0, numSegments, d_segStartLocations, d_locationsArray, numValues, d_changesMade); hipMemcpy(h_changesMade, d_changesMade, sizeof(bool), hipMemcpyDeviceToHost); hipMemset(d_changesMade, 0, sizeof(bool)); } //Shift the remainder values to left to make room for metadata //Then determine metadata bits and set them numBlocks = (numValues + 1023) / 1024; dim3 setMetadataBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( setMetadata), dim3(setMetadataBlockDims), dim3(1024), 0, 0, numValues, d_remaindersArray, d_quotientsArray, d_locationsArray); //Scatter remainder values to the filter numBlocks = (numValues + 1023) / 1024; dim3 writeRemaindersBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( writeRemainders), dim3(writeRemaindersBlockDims), dim3(1024), 0, 0, numValues, qfilter, d_remaindersArray, d_locationsArray); //Set the is_occupied bits numBlocks = (numValues + 511) / 512; dim3 setOccupiedBitsBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( setOccupiedBits), dim3(setOccupiedBitsBlockDims), dim3(512), 0, 0, numValues, qfilter, d_quotientsArray); //Calculate and print timing results hipEventRecord(stop); hipEventSynchronize(stop); float filterBuildTime = 0; hipEventElapsedTime(&filterBuildTime, start, stop); //Free memory d_quotients.~device_vector<unsigned int>(); hipFree(d_remaindersArray); d_locations.~device_vector<unsigned int>(); d_segLabels.~device_vector<unsigned int>(); hipFree(d_segStartLocations); delete[] h_changesMade; hipFree(d_changesMade); hipEventDestroy(start); hipEventDestroy(stop); return filterBuildTime; } __global__ void locateDeleteSuperclusters(int numItems, quotient_filter qfilter, unsigned int* superclusterStarts) { //marks the beginning of each "supercluster" -> really, for deletes this is same as a cluster unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; superclusterStarts[idx] = 0; if(idx == 0) return; if((!isEmptyGPU(getElementGPU(&qfilter, idx))) && (!isShiftedGPU(getElementGPU(&qfilter, idx)))){ superclusterStarts[idx] = 1; } } __global__ void deleteItemGPU(int numItems, quotient_filter qfilter, unsigned int* deleteValues, unsigned int* winnerIndices, bool* deleteFlags) { //deletes items from the quotient filter, shifting other items left if required unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; //check that there is an item assigned to be deleted for supercluster idx if(winnerIndices[idx] == NOT_FOUND){ return; } //determine which value is being added to the QF unsigned int originalIndex = winnerIndices[idx]; //reset winnerIndices for next bidding round winnerIndices[idx] = NOT_FOUND; deleteFlags[originalIndex] = 0; //want to remove this item from delete queue after this finishes unsigned int value = deleteValues[originalIndex]; //calculate fingerprint // unsigned int hashValue = FNVhashGPU(value, (1 << (qfilter.qbits + qfilter.rbits))); unsigned int hashValue = Normal_APHash(value, (1 << (qfilter.qbits + qfilter.rbits))); //separate into quotient and remainder unsigned int fq = (hashValue >> qfilter.rbits) & LOW_BIT_MASK(qfilter.qbits); unsigned int fr = hashValue & LOW_BIT_MASK(qfilter.rbits); unsigned int canonElement = getElementGPU(&qfilter, fq); if(!isOccupiedGPU(canonElement)){ //if canonical slot is not occupied, the item isn't in the filter; we're done. return; } //start bucket is fq unsigned int b = fq; //find beginning of cluster: while(isShiftedGPU(getElementGPU(&qfilter, b))){ b--; } //find start of run we're interested in: //slot counter starts at beginning of cluster unsigned int s = b; while(b != fq){ do{ s++; }while((isContinuationGPU(getElementGPU(&qfilter, s)))); //find end of current run do{ b++; }while((!isOccupiedGPU(getElementGPU(&qfilter, b)))); //count number of runs passed } //now s is first value in run of item to be deleted unsigned int runStart = s; //Search through the run's elements to find item needing to be deleted unsigned int remainder; do{ remainder = getRemainderGPU(getElementGPU(&qfilter, s)); if(remainder == fr){ //found it! break; } else if(remainder > fr){ //the item is not in the filter //nothing to delete here return; } s++; }while(isContinuationGPU(getElementGPU(&qfilter, s))); //If we searched entire run without finding it: if(remainder != fr){ return; //the item is not in the filter } if(!isContinuationGPU(getElementGPU(&qfilter, (s + 1)))){ do{ //if next item is a new run, add to run count b++; }while(!isOccupiedGPU(getElementGPU(&qfilter, b))); } //We have now located the item that needs to be deleted, stored in slot s. //Special conditions for deleted run starts if(s == runStart){ if(!isContinuationGPU(getElementGPU(&qfilter, (s + 1)))){ //the run is empty; clear the occupied bit setElementGPU(&qfilter, fq, clearOccupiedGPU(getElementGPU(&qfilter, fq))); } else{ //next item is now the first in the run setElementGPU(&qfilter, (s + 1), clearContinuationGPU(getElementGPU(&qfilter, (s + 1)))); } } //now check the item to the right to see whether it will need to be moved //if it was shifted, it is part of the same cluster and can be shifted left while(isShiftedGPU(getElementGPU(&qfilter, (s + 1)))){ //want to check if s = b for clearing shifted bit if(b == s){ //in this case, run about to be shifted into its correct slot -> unshifted setElementGPU(&qfilter, (s + 1), clearShiftedGPU(getElementGPU(&qfilter, (s + 1)))); } do{ unsigned int nextElement = getElementGPU(&qfilter, (s + 1)); if(isOccupiedGPU(getElementGPU(&qfilter, s))){ setElementGPU(&qfilter, s, setOccupiedGPU(nextElement)); } else{ setElementGPU(&qfilter, s, clearOccupiedGPU(nextElement)); } s++; }while((isContinuationGPU(getElementGPU(&qfilter, (s + 1))))); //shift the items in current run do{ b++; }while(!isOccupiedGPU(getElementGPU(&qfilter, b)) ); //keep track of current run } //Last item is always a new empty slot setElementGPU(&qfilter, s, 0); return; } __host__ float superclusterDeletes(quotient_filter qfilter, int numValues, unsigned int* d_deleteValues) { int filterSize = (1 << qfilter.qbits) * 1.1; //number of (r + 3)-bit slots in the filter //Allocate all necessary memory for deletes int* h_numItemsLeft = new int[1]; //counts number of elements in delete queue h_numItemsLeft[0] = numValues; int* d_numItemsLeft; hipMalloc((void**) &d_numItemsLeft, sizeof(int)); unsigned int* d_superclusterIndicators; //stores bits marking beginning of superclusters hipMalloc((void**) &d_superclusterIndicators, filterSize * sizeof(unsigned int)); //Variables for CUB function temporary storage void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; unsigned int* d_superclusterLabels = NULL; //labels each slot with its supercluster number hipMalloc((void**) &d_superclusterLabels, filterSize * sizeof(unsigned int)); int* h_lastSuperclusterLabel = new int[1]; int maxNumSuperclusters = calcNumSlotsGPU(qfilter.qbits, qfilter.rbits) + 1; unsigned int* d_slotWinners; hipMalloc((void**) &d_slotWinners, maxNumSuperclusters * sizeof(unsigned int)); unsigned int* h_slotWinners = new unsigned int[maxNumSuperclusters]; for(int i = 0; i < maxNumSuperclusters; i++){ h_slotWinners[i] = NOT_FOUND; } hipMemcpy(d_slotWinners, h_slotWinners, maxNumSuperclusters * sizeof(unsigned int), hipMemcpyHostToDevice); bool* d_deleteFlags; //Flags for removing items from delete queue hipMalloc((void**) &d_deleteFlags, numValues * sizeof(bool)); unsigned int* d_deleteItemsQueue; hipMalloc((void**) &d_deleteItemsQueue, numValues * sizeof(unsigned int)); hipMemcpy(d_deleteItemsQueue, d_deleteValues, numValues * sizeof(unsigned int), hipMemcpyDeviceToDevice); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); do{ //Find supercluster array: int numBlocks = (filterSize + 1023) / 1024; dim3 deleteSCBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( locateDeleteSuperclusters), dim3(deleteSCBlockDims), dim3(1024), 0, 0, filterSize, qfilter, d_superclusterIndicators); //CUB Inclusive Prefix Sum d_temp_storage = NULL; temp_storage_bytes = 0; CubDebugExit(hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_superclusterIndicators, d_superclusterLabels, filterSize)); hipMalloc(&d_temp_storage, temp_storage_bytes); CubDebugExit(hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_superclusterIndicators, d_superclusterLabels, filterSize)); //Determine how many superclusters there are hipMemcpy(h_lastSuperclusterLabel, d_superclusterLabels + (filterSize - 1), sizeof(unsigned int), hipMemcpyDeviceToHost); int numSuperclusters = h_lastSuperclusterLabel[0] + 1; //Pick one element per supercluster to delete numBlocks = (h_numItemsLeft[0] + 127) / 128; dim3 biddingBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( superclusterBidding), dim3(biddingBlockDims), dim3(128), 0, 0, h_numItemsLeft[0], qfilter, d_deleteItemsQueue, d_superclusterLabels, d_deleteFlags, d_slotWinners); //Insert items into QF numBlocks = (numSuperclusters + 1023) / 1024; dim3 deleteBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( deleteItemGPU), dim3(deleteBlockDims), dim3(1024), 0, 0, numSuperclusters, qfilter, d_deleteItemsQueue, d_slotWinners, d_deleteFlags); //Remove successfully deleted items from the queue d_temp_storage = NULL; temp_storage_bytes = 0; CubDebugExit(hipcub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_deleteItemsQueue, d_deleteFlags, d_deleteItemsQueue, d_numItemsLeft, h_numItemsLeft[0])); hipMalloc(&d_temp_storage, temp_storage_bytes); CubDebugExit(hipcub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_deleteItemsQueue, d_deleteFlags, d_deleteItemsQueue, d_numItemsLeft, h_numItemsLeft[0])); hipMemcpy(h_numItemsLeft, d_numItemsLeft, sizeof(int), hipMemcpyDeviceToHost); }while(h_numItemsLeft[0] > 0); hipEventRecord(stop); //Calculate timing results hipEventSynchronize(stop); float deleteTime = 0; hipEventElapsedTime(&deleteTime, start, stop); //Free memory delete[] h_numItemsLeft; hipFree(d_numItemsLeft); hipFree(d_superclusterIndicators); hipFree(d_temp_storage); hipFree(d_superclusterLabels); hipFree(d_slotWinners); delete[] h_slotWinners; hipFree(d_deleteFlags); hipFree(d_deleteItemsQueue); hipEventDestroy(start); hipEventDestroy(stop); return deleteTime; } __global__ void findSegmentStarts(int numItems, unsigned int q, unsigned int* quotients, unsigned int* segmentStarts) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems || idx ==0) return; unsigned int currentSegment = quotients[idx] / q; unsigned int previousSegment = quotients[idx - 1] / q; if(currentSegment != previousSegment){ segmentStarts[currentSegment] = idx; } } __global__ void layout(int numItems, unsigned int qbits, unsigned int* quotients, unsigned int* segmentAssignments, int* shift, int* overflow, bool* changesMade, int numInsertValues) { //computes layout for the idx-th segment of the filter unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; int firstNonemptySegment = quotients[0] / qbits; if(idx < firstNonemptySegment){ overflow[idx] = 0; return; } int segStart; int firstItemIdx; if(idx == 0){ segStart = 0; if(quotients[0] < qbits){ firstItemIdx = 0; } else{ //no items in segment 0 overflow[0] = 0; return; } } else{ segStart = idx * qbits + shift[idx-1]; //start the layout to right of shifted values from previous segment firstItemIdx = segmentAssignments[idx]; if(firstItemIdx == 0 && (quotients[0] < segStart)){ //the segment has no items overflow[idx] = 0; return; } } int lastItemIdx; if(idx == (numItems - 1)){ //last segment lastItemIdx = numInsertValues - 1; } else{ lastItemIdx = segmentAssignments[idx + 1] - 1; int j = idx + 1; while(lastItemIdx == -1 && j < numItems){ //in case of empty segments to the right if(j == numItems - 1){ lastItemIdx = numInsertValues - 1; } else{ lastItemIdx = segmentAssignments[j] - 1; } j++; } } int numSegItems = lastItemIdx - firstItemIdx + 1; if(numSegItems <= 0){ overflow[idx] = 0; return; } int maxSlot = segStart; //maxSlot = next open slot for(int i = firstItemIdx; i <= lastItemIdx; i++){ if(quotients[i] > maxSlot) maxSlot = quotients[i]; maxSlot++; } int segEnd = ((idx + 1) * qbits) - 1; int segmentOverflow = (maxSlot - 1) - segEnd; if(segmentOverflow > 0){ overflow[idx] = segmentOverflow; if(segmentOverflow > shift[idx]){ //check if there has been change from last iteration changesMade[0] = 1; } } else{ overflow[idx] = 0; } } __global__ void segmentedQFWrite(int numItems, quotient_filter qfilter, unsigned int* quotients, unsigned int* remainders, unsigned int* segmentAssignments, int* shift, int numInsertValues) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; int segStart; int firstItemIdx; if(idx == 0){ segStart = 0; if(quotients[0] < qfilter.qbits){ firstItemIdx = 0; } else{ //no items in segment 0 return; } } else{ segStart = idx * qfilter.qbits + shift[idx-1]; //start the layout to right of shifted values from previous segment firstItemIdx = segmentAssignments[idx]; if(firstItemIdx == 0 && (quotients[0] < segStart)){ //the segment has no items return; } } int lastItemIdx; if(idx == (numItems - 1)){ lastItemIdx = numInsertValues - 1; } else{ lastItemIdx = segmentAssignments[idx + 1] - 1; int j = idx + 1; while(lastItemIdx == -1 && j < numItems){ //in case of empty segments to the right if(j == numItems - 1){ lastItemIdx = numInsertValues - 1; } else{ lastItemIdx = segmentAssignments[j] - 1; } j++; } } int numSegItems = lastItemIdx - firstItemIdx + 1; if(numSegItems <= 0){ return; } int maxSlot = segStart; //maxSlot = location of last/currently inserted item for(int i = firstItemIdx; i <= lastItemIdx; i++){ unsigned int currentRemainder = remainders[i] << 3; if(quotients[i] >= maxSlot){ //item is not shifted maxSlot = quotients[i]; setElementGPU(&qfilter, maxSlot, currentRemainder); } else{ currentRemainder = setShiftedGPU(currentRemainder); if(quotients[i] == quotients[i - 1]) currentRemainder = setContinuationGPU(currentRemainder); setElementGPU(&qfilter, maxSlot, currentRemainder); } maxSlot++; } } __host__ float bulkBuildSegmentedLayouts(quotient_filter qfilter, int numValues, unsigned int* d_insertValues, bool NoDuplicates) { //build a quotient filter by partitioning into segments, inserting items into segments, then computing overflow /* 1. Compute all fingerprints 2. Sort list of fingerprints 3. Quotienting 4. Assign items to a segment 5. Compute layouts and overflow 6. Repeat until convergence 7. Write final values to filter */ //Memory Allocation thrust::device_vector<unsigned int> d_quotients(numValues); thrust::fill(d_quotients.begin(), d_quotients.end(), 0); unsigned int* d_quotientsArray = thrust::raw_pointer_cast(&d_quotients[0]); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //hipProfilerStart(); hipEventRecord(start); //Hash input values int numBlocks = (numValues + 127) / 128; dim3 hashBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( hashInputs), dim3(hashBlockDims), dim3(128), 0, 0, numValues, qfilter, d_insertValues, d_quotientsArray); //store fingerprints in quotients array //Sort by fingerprint thrust::sort(d_quotients.begin(), d_quotients.end()); //Remove duplicates, if desired if(NoDuplicates == true){ thrust::detail::normal_iterator< thrust::device_ptr<unsigned int> > fingerprintEnd = thrust::unique(d_quotients.begin(), d_quotients.end()); d_quotients.erase(fingerprintEnd, d_quotients.end()); numValues = d_quotients.end() - d_quotients.begin(); } //Divide fingerprints into quotients and remainders unsigned int* d_remaindersArray; hipMalloc((void**) &d_remaindersArray, numValues * sizeof(unsigned int)); hipMemset(d_remaindersArray, 0, numValues * sizeof(unsigned int)); numBlocks = (numValues + 767) / 768; dim3 quotientingBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( quotienting), dim3(quotientingBlockDims), dim3(768), 0, 0, numValues, qfilter.qbits, qfilter.rbits, d_quotientsArray, d_remaindersArray); unsigned int q = qfilter.qbits; int numSegments = ((1 << q) / q) + 1; //Determine which items belong in each segment unsigned int* d_segmentStarts; hipMalloc((void**) &d_segmentStarts, numSegments * sizeof(unsigned int)); hipMemset(d_segmentStarts, 0, numSegments * sizeof(unsigned int)); numBlocks = (numValues + 255) / 256; dim3 findSegStartsBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( findSegmentStarts), dim3(findSegStartsBlockDims), dim3(256), 0, 0, numValues, q, d_quotientsArray, d_segmentStarts); //Each segment has an input shift value and outputs overflow value int* d_shifts; hipMalloc((void**) &d_shifts, numSegments * sizeof(int)); hipMemset(d_shifts, 0, numSegments * sizeof(int)); int* d_overflows; hipMalloc((void**) &d_overflows, numSegments * sizeof(int)); hipMemset(d_overflows, 0, numSegments * sizeof(int)); bool* h_changesMade = new bool[1]; h_changesMade[0] = 1; bool* d_changesMade; hipMalloc((void**) &d_changesMade, sizeof(bool)); hipMemset(d_changesMade, 0, sizeof(bool)); while(h_changesMade[0] == 1){ h_changesMade[0] = 0; //since I set d_changesMade to 0 already might not need this //copy overflows into shifts //shifts[idx] represents the shift caused by segment idx, to be carried over into segment idx+1 hipMemcpy(d_shifts, d_overflows, sizeof(int) * numSegments, hipMemcpyDeviceToDevice); //Launch one thread per segment //Layout numBlocks = (numSegments + 255) / 256; dim3 layoutBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( layout), dim3(layoutBlockDims), dim3(256), 0, 0, numSegments, qfilter.qbits, d_quotientsArray, d_segmentStarts, d_shifts, d_overflows, d_changesMade, numValues); hipMemcpy(h_changesMade, d_changesMade, sizeof(bool), hipMemcpyDeviceToHost); hipMemset(d_changesMade, 0, sizeof(bool)); } //Write final values to filter numBlocks = (numSegments + 127) / 128; dim3 segmentedQFWriteBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( segmentedQFWrite), dim3(segmentedQFWriteBlockDims), dim3(128), 0, 0, numSegments, qfilter, d_quotientsArray, d_remaindersArray, d_segmentStarts, d_shifts, numValues); numBlocks = (numValues + 511) / 512; dim3 setOccupiedBitsBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( setOccupiedBits), dim3(setOccupiedBitsBlockDims), dim3(512), 0, 0, numValues, qfilter, d_quotientsArray); hipEventRecord(stop); //Calculate timing results hipEventSynchronize(stop); float buildTime = 0; hipEventElapsedTime(&buildTime, start, stop); //Free memory d_quotients.~device_vector<unsigned int>(); hipFree(d_remaindersArray); hipFree(d_segmentStarts); hipFree(d_shifts); hipFree(d_overflows); delete[] h_changesMade; hipFree(d_changesMade); hipEventDestroy(start); hipEventDestroy(stop); return buildTime; } __global__ void extractQuotients(int numItems, quotient_filter qfilter, unsigned int* fingerprints, bool* emptySlotFlags) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; unsigned int element = getElementGPU(&qfilter, idx); //Empty slots: if(isEmptyGPU(element)){ emptySlotFlags[idx] = true; return; } //Unshifted elements(beginning of cluster): if(!isShiftedGPU(element)){ fingerprints[idx] = (idx << qfilter.rbits) | getRemainderGPU(element); return; } //Shifted elements: //Find beginning of cluster: unsigned int b = idx; do{ b--; }while(isShiftedGPU(getElementGPU(&qfilter, b))); //Step through cluster, counting the runs: unsigned int s = b; while(s <= idx){ do{ s++; }while((isContinuationGPU(getElementGPU(&qfilter, s)))); //find end of each run if(s > idx) break; do{ b++; }while((!isOccupiedGPU(getElementGPU(&qfilter, b)))); //keeping track of canonical slot } fingerprints[idx] = (b << qfilter.rbits) | getRemainderGPU(element); } __host__ float insertViaMerge(quotient_filter qfilter, unsigned int* d_insertedValues, int numOldValues, unsigned int* d_newValues, int numNewValues, bool NoDuplicates) { //d_insertedValues and numOldValues are just for checking results of fingerprint extraction. They are not needed for the merge operation. // printQuotientFilterGPU(&qfilter); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); //Extract fingerprints from quotient filter int numSlots = calcNumSlotsGPU(qfilter.qbits, qfilter.rbits); thrust::device_vector<bool> d_emptySlotFlags(numSlots); thrust::fill(d_emptySlotFlags.begin(), d_emptySlotFlags.end(), 0); bool* d_emptySlotFlagsArray = thrust::raw_pointer_cast(&d_emptySlotFlags[0]); thrust::device_vector<unsigned int> d_fingerprintsBySlot(numSlots); thrust::fill(d_fingerprintsBySlot.begin(), d_fingerprintsBySlot.end(), 0); unsigned int* d_fingerprintsBySlotArray = thrust::raw_pointer_cast(&d_fingerprintsBySlot[0]); int numBlocks = (numSlots + 191) / 192; dim3 extractQuotientsBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( extractQuotients), dim3(extractQuotientsBlockDims), dim3(192), 0, 0, numSlots, qfilter, d_fingerprintsBySlotArray, d_emptySlotFlagsArray); thrust::detail::normal_iterator< thrust::device_ptr<unsigned int> > fingerprintsEnd = thrust::remove_if(d_fingerprintsBySlot.begin(), d_fingerprintsBySlot.end(), d_emptySlotFlags.begin(), thrust::identity<bool>()); d_fingerprintsBySlot.erase(fingerprintsEnd, d_fingerprintsBySlot.end()); int numExtractedValues = d_fingerprintsBySlot.end() - d_fingerprintsBySlot.begin(); //Merge with new array //Hash and quotientize new values to insert thrust::device_vector<unsigned int> d_newFingerprints(numNewValues); thrust::fill(d_newFingerprints.begin(), d_newFingerprints.end(), 0); unsigned int* d_newFingerprintsArray = thrust::raw_pointer_cast(&d_newFingerprints[0]); numBlocks = (numNewValues + 127) / 128; dim3 hashBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( hashInputs), dim3(hashBlockDims), dim3(128), 0, 0, numNewValues, qfilter, d_newValues, d_newFingerprintsArray); //Sort by fingerprint thrust::sort(d_newFingerprints.begin(), d_newFingerprints.end()); //Merge d_newValues with extracted quotients and remainders mgpu::standard_context_t context(false); int outputSize = numExtractedValues + numNewValues; mgpu::mem_t<unsigned int> d_fingerprintsOutput(outputSize, context); mgpu::mem_t<unsigned int> d_newFingerprintsMem = copy_to_mem(d_newFingerprintsArray, numNewValues, context); mgpu::mem_t<unsigned int> d_extractedFingerprintsMem = copy_to_mem(d_fingerprintsBySlotArray, numExtractedValues, context); mgpu::merge(d_extractedFingerprintsMem.data(), numExtractedValues, d_newFingerprintsMem.data(), numNewValues, d_fingerprintsOutput.data(), mgpu::less_t<unsigned int>(), context); unsigned int* d_combinedQuotients = d_fingerprintsOutput.data(); //Rebuild filter using segmented layouts method //Clear old filter hipMemset(qfilter.table, 0, numSlots * sizeof(unsigned char)); //Remove duplicates, if desired thrust::device_vector<unsigned int> d_thrustQuotients(d_combinedQuotients, d_combinedQuotients + outputSize); //must copy values to get them into thrust device_vector if(NoDuplicates == true){ thrust::detail::normal_iterator< thrust::device_ptr<unsigned int> > fingerprintEnd = thrust::unique(d_thrustQuotients.begin(), d_thrustQuotients.end()); d_thrustQuotients.erase(fingerprintEnd, d_thrustQuotients.end()); outputSize = d_thrustQuotients.end() - d_thrustQuotients.begin(); } d_combinedQuotients = thrust::raw_pointer_cast(&d_thrustQuotients[0]); //Divide fingerprints into quotients and remainders unsigned int* d_combinedRemainders; hipMalloc((void**) &d_combinedRemainders, outputSize * sizeof(unsigned int)); hipMemset(d_combinedRemainders, 0, outputSize * sizeof(unsigned int)); numBlocks = (outputSize + 767) / 768; dim3 quotientingBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( quotienting), dim3(quotientingBlockDims), dim3(768), 0, 0, outputSize, qfilter.qbits, qfilter.rbits, d_combinedQuotients, d_combinedRemainders); unsigned int q = qfilter.qbits; int numSegments = ((1 << q) / q) + 1; //Determine which items belong in each segment unsigned int* d_segmentStarts; hipMalloc((void**) &d_segmentStarts, numSegments * sizeof(unsigned int)); hipMemset(d_segmentStarts, 0, numSegments * sizeof(unsigned int)); numBlocks = (outputSize + 255) / 256; dim3 findSegStartsBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( findSegmentStarts), dim3(findSegStartsBlockDims), dim3(256), 0, 0, outputSize, q, d_combinedQuotients, d_segmentStarts); //Each segment has an input shift value and outputs overflow value int* d_shifts; hipMalloc((void**) &d_shifts, numSegments * sizeof(int)); hipMemset(d_shifts, 0, numSegments * sizeof(int)); int* d_overflows; hipMalloc((void**) &d_overflows, numSegments * sizeof(int)); hipMemset(d_overflows, 0, numSegments * sizeof(int)); bool* h_changesMade = new bool[1]; h_changesMade[0] = 1; bool* d_changesMade; hipMalloc((void**) &d_changesMade, sizeof(bool)); hipMemset(d_changesMade, 0, sizeof(bool)); while(h_changesMade[0] == 1){ //copy overflows into shifts hipMemcpy(d_shifts, d_overflows, sizeof(int) * numSegments, hipMemcpyDeviceToDevice); //Layout numBlocks = (numSegments + 255) / 256; dim3 layoutBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( layout), dim3(layoutBlockDims), dim3(256), 0, 0, numSegments, qfilter.qbits, d_combinedQuotients, d_segmentStarts, d_shifts, d_overflows, d_changesMade, outputSize); hipMemcpy(h_changesMade, d_changesMade, sizeof(bool), hipMemcpyDeviceToHost); hipMemset(d_changesMade, 0, sizeof(bool)); } //Write final values to filter numBlocks = (numSegments + 127) / 128; dim3 segmentedQFWriteBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( segmentedQFWrite), dim3(segmentedQFWriteBlockDims), dim3(128), 0, 0, numSegments, qfilter, d_combinedQuotients, d_combinedRemainders, d_segmentStarts, d_shifts, outputSize); numBlocks = (outputSize + 511) / 512; dim3 setOccupiedBitsBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( setOccupiedBits), dim3(setOccupiedBitsBlockDims), dim3(512), 0, 0, outputSize, qfilter, d_combinedQuotients); //Timing hipEventRecord(stop); hipEventSynchronize(stop); float rebuildTime = 0; hipEventElapsedTime(&rebuildTime, start, stop); //Free Memory d_emptySlotFlags.~device_vector<bool>(); d_fingerprintsBySlot.~device_vector<unsigned int>(); d_newFingerprints.~device_vector<unsigned int>(); d_fingerprintsOutput.~mem_t<unsigned int>(); d_newFingerprintsMem.~mem_t<unsigned int>(); d_extractedFingerprintsMem.~mem_t<unsigned int>(); hipFree(d_combinedRemainders); hipFree(d_segmentStarts); hipFree(d_shifts); hipFree(d_overflows); delete[] h_changesMade; hipFree(d_changesMade); return rebuildTime; } __host__ float mergeTwoFilters(quotient_filter qfilter1, quotient_filter qfilter2, bool NoDuplicates) { //merges filters qfilter1 and qfilter2 and outputs the result to qfilter1 //Check that filters are the same size if(qfilter1.qbits != qfilter2.qbits || qfilter1.rbits != qfilter2.rbits){ printf("Error: two filters to be merged must have same number of quotient and remainder bits\n"); return 0.0f; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); //Extract fingerprints from first quotient filter int numSlots = calcNumSlotsGPU(qfilter1.qbits, qfilter1.rbits); thrust::device_vector<bool> d_emptySlotFlags(numSlots); thrust::fill(d_emptySlotFlags.begin(), d_emptySlotFlags.end(), 0); bool* d_emptySlotFlagsArray = thrust::raw_pointer_cast(&d_emptySlotFlags[0]); thrust::device_vector<unsigned int> d_fingerprintsBySlot1(numSlots); thrust::fill(d_fingerprintsBySlot1.begin(), d_fingerprintsBySlot1.end(), 0); unsigned int* d_fingerprintsBySlotArray1 = thrust::raw_pointer_cast(&d_fingerprintsBySlot1[0]); int numBlocks = (numSlots + 191) / 192; dim3 extractQuotientsBlockDims1((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( extractQuotients), dim3(extractQuotientsBlockDims1), dim3(192), 0, 0, numSlots, qfilter1, d_fingerprintsBySlotArray1, d_emptySlotFlagsArray); thrust::detail::normal_iterator< thrust::device_ptr<unsigned int> > fingerprintsEnd = thrust::remove_if(d_fingerprintsBySlot1.begin(), d_fingerprintsBySlot1.end(), d_emptySlotFlags.begin(), thrust::identity<bool>()); d_fingerprintsBySlot1.erase(fingerprintsEnd, d_fingerprintsBySlot1.end()); int numExtractedValues1 = d_fingerprintsBySlot1.end() - d_fingerprintsBySlot1.begin(); //Extract fingerprints from second quotient filter thrust::fill(d_emptySlotFlags.begin(), d_emptySlotFlags.end(), 0); thrust::device_vector<unsigned int> d_fingerprintsBySlot2(numSlots); thrust::fill(d_fingerprintsBySlot2.begin(), d_fingerprintsBySlot2.end(), 0); unsigned int* d_fingerprintsBySlotArray2 = thrust::raw_pointer_cast(&d_fingerprintsBySlot2[0]); numBlocks = (numSlots + 191) / 192; dim3 extractQuotientsBlockDims2((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( extractQuotients), dim3(extractQuotientsBlockDims2), dim3(192), 0, 0, numSlots, qfilter2, d_fingerprintsBySlotArray2, d_emptySlotFlagsArray); fingerprintsEnd = thrust::remove_if(d_fingerprintsBySlot2.begin(), d_fingerprintsBySlot2.end(), d_emptySlotFlags.begin(), thrust::identity<bool>()); d_fingerprintsBySlot2.erase(fingerprintsEnd, d_fingerprintsBySlot2.end()); int numExtractedValues2 = d_fingerprintsBySlot2.end() - d_fingerprintsBySlot2.begin(); //Merge arrays of extracted values mgpu::standard_context_t context(false); int outputSize = numExtractedValues1 + numExtractedValues2; mgpu::mem_t<unsigned int> d_fingerprintsOutput(outputSize, context); mgpu::mem_t<unsigned int> d_extractedFingerprintsMem1 = copy_to_mem(d_fingerprintsBySlotArray1, numExtractedValues1, context); mgpu::mem_t<unsigned int> d_extractedFingerprintsMem2 = copy_to_mem(d_fingerprintsBySlotArray2, numExtractedValues2, context); mgpu::merge(d_extractedFingerprintsMem1.data(), numExtractedValues1, d_extractedFingerprintsMem2.data(), numExtractedValues2, d_fingerprintsOutput.data(), mgpu::less_t<unsigned int>(), context); unsigned int* d_combinedQuotients = d_fingerprintsOutput.data(); //Rebuild filter using segmented layouts method //Clear old filter hipMemset(qfilter1.table, 0, numSlots * sizeof(unsigned char)); //Remove duplicates, if desired thrust::device_vector<unsigned int> d_thrustQuotients(d_combinedQuotients, d_combinedQuotients + outputSize); //must copy values to get them into thrust device_vector if(NoDuplicates == true){ thrust::detail::normal_iterator< thrust::device_ptr<unsigned int> > fingerprintEnd = thrust::unique(d_thrustQuotients.begin(), d_thrustQuotients.end()); d_thrustQuotients.erase(fingerprintEnd, d_thrustQuotients.end()); outputSize = d_thrustQuotients.end() - d_thrustQuotients.begin(); } d_combinedQuotients = thrust::raw_pointer_cast(&d_thrustQuotients[0]); //Divide fingerprints into quotients and remainders unsigned int* d_combinedRemainders; hipMalloc((void**) &d_combinedRemainders, outputSize * sizeof(unsigned int)); hipMemset(d_combinedRemainders, 0, outputSize * sizeof(unsigned int)); numBlocks = (outputSize + 767) / 768; dim3 quotientingBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( quotienting), dim3(quotientingBlockDims), dim3(768), 0, 0, outputSize, qfilter1.qbits, qfilter1.rbits, d_combinedQuotients, d_combinedRemainders); unsigned int q = qfilter1.qbits; int numSegments = ((1 << q) / q) + 1; //Determine which items belong in each segment unsigned int* d_segmentStarts; hipMalloc((void**) &d_segmentStarts, numSegments * sizeof(unsigned int)); hipMemset(d_segmentStarts, 0, numSegments * sizeof(unsigned int)); numBlocks = (outputSize + 255) / 256; dim3 findSegStartsBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( findSegmentStarts), dim3(findSegStartsBlockDims), dim3(256), 0, 0, outputSize, q, d_combinedQuotients, d_segmentStarts); //Each segment has an input shift value and outputs overflow value int* d_shifts; hipMalloc((void**) &d_shifts, numSegments * sizeof(int)); hipMemset(d_shifts, 0, numSegments * sizeof(int)); int* d_overflows; hipMalloc((void**) &d_overflows, numSegments * sizeof(int)); hipMemset(d_overflows, 0, numSegments * sizeof(int)); bool* h_changesMade = new bool[1]; h_changesMade[0] = 1; bool* d_changesMade; hipMalloc((void**) &d_changesMade, sizeof(bool)); hipMemset(d_changesMade, 0, sizeof(bool)); while(h_changesMade[0] == 1){ //copy overflows into shifts hipMemcpy(d_shifts, d_overflows, sizeof(int) * numSegments, hipMemcpyDeviceToDevice); //Layout numBlocks = (numSegments + 255) / 256; dim3 layoutBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( layout), dim3(layoutBlockDims), dim3(256), 0, 0, numSegments, qfilter1.qbits, d_combinedQuotients, d_segmentStarts, d_shifts, d_overflows, d_changesMade, outputSize); hipMemcpy(h_changesMade, d_changesMade, sizeof(bool), hipMemcpyDeviceToHost); hipMemset(d_changesMade, 0, sizeof(bool)); } //Write final values to filter numBlocks = (numSegments + 127) / 128; dim3 segmentedQFWriteBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( segmentedQFWrite), dim3(segmentedQFWriteBlockDims), dim3(128), 0, 0, numSegments, qfilter1, d_combinedQuotients, d_combinedRemainders, d_segmentStarts, d_shifts, outputSize); numBlocks = (outputSize + 511) / 512; dim3 setOccupiedBitsBlockDims((numBlocks + 31) / 32, 32); hipLaunchKernelGGL(( setOccupiedBits), dim3(setOccupiedBitsBlockDims), dim3(512), 0, 0, outputSize, qfilter1, d_combinedQuotients); //Timing hipEventRecord(stop); hipEventSynchronize(stop); float rebuildTime = 0; hipEventElapsedTime(&rebuildTime, start, stop); //Free Memory d_emptySlotFlags.~device_vector<bool>(); d_fingerprintsBySlot1.~device_vector<unsigned int>(); d_fingerprintsBySlot2.~device_vector<unsigned int>(); d_fingerprintsOutput.~mem_t<unsigned int>(); d_extractedFingerprintsMem1.~mem_t<unsigned int>(); d_extractedFingerprintsMem2.~mem_t<unsigned int>(); hipFree(d_combinedRemainders); hipFree(d_segmentStarts); hipFree(d_shifts); hipFree(d_overflows); delete[] h_changesMade; hipFree(d_changesMade); return rebuildTime; } }
1ff467c555dd17ad08d07a008962d15bda6f9ab2.cu
//quotientFilter.cu /* * Copyright 2021 Regents of the University of California * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <limits.h> #include <assert.h> #include <cuda_profiler_api.h> #include "sqf.cuh" #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/scan.h> #include <thrust/unique.h> #include "cub/cub.cuh" #include "../moderngpu/src/moderngpu/kernel_merge.hxx" namespace sqf_filter { #ifndef LOW_BIT_MASK #define LOW_BIT_MASK(n) ((1U << n) - 1U) #endif #ifndef NOT_FOUND #define NOT_FOUND UINT_MAX #endif __device__ __host__ size_t calcNumSlotsGPU(unsigned int q, unsigned int r) { size_t tableBits = (1 << q) * (r + 3); size_t tableSlots = tableBits / 8; return tableSlots * 1.1; //allow an extra 10% for overflow } __host__ void initFilterGPU(struct quotient_filter* qf, unsigned int q, unsigned int r) { assert((q + r) <= 32); //need to be able to store fingerprints in unsigned int assert(((r + 3) % 8) == 0); //slot size is one or more full bytes qf->qbits = q; qf->rbits = r; qf->bytesPerElement = (r + 3) / 8; size_t slots = calcNumSlotsGPU(q, r); unsigned char* d_filterTable; cudaMalloc((void**) &d_filterTable, slots * sizeof(unsigned char)); qf->table = d_filterTable; printf("SQF using %llu bytes\n",slots*sizeof(unsigned char)); } __device__ bool isOccupiedGPU(unsigned int element) { return element & 4; } __device__ bool isContinuationGPU(unsigned int element) { return element & 2; } __device__ bool isShiftedGPU(unsigned int element) { return element & 1; } __device__ bool isEmptyGPU(unsigned int element) { return ((element & 7) == 0); } __device__ unsigned int setOccupiedGPU(unsigned int element) { return element | 4; } __device__ unsigned int clearOccupiedGPU(unsigned int element) { return element & ~4; } __device__ unsigned int setContinuationGPU(unsigned int element) { return element | 2; } __device__ unsigned int clearContinuationGPU(unsigned int element) { return element & ~2; } __device__ unsigned int setShiftedGPU(unsigned int element) { return element | 1; } __device__ unsigned int clearShiftedGPU(unsigned int element) { return element & ~1; } __device__ __host__ unsigned int getRemainderGPU(unsigned int element) { return element >> 3; } __device__ unsigned int isolateOccupiedBit(unsigned int element) { return element & 4; } __device__ __host__ unsigned int FNVhashGPU(unsigned int value, unsigned int tableSize) { unsigned char p[4]; p[0] = (value >> 24) & 0xFF; p[1] = (value >> 16) & 0xFF; p[2] = (value >> 8) & 0xFF; p[3] = value & 0xFF; unsigned int h = 2166136261; for (int i = 0; i < 4; i++){ h = (h * 16777619) ^ p[i]; } return h % tableSize; } __device__ __host__ unsigned int Normal_APHash(unsigned int value, unsigned int tableSize) { unsigned char p[4]; p[0] = (value >> 24) & 0xFF; p[1] = (value >> 16) & 0xFF; p[2] = (value >> 8) & 0xFF; p[3] = value & 0xFF; unsigned int hash = 0xAAAAAAAA; for (int i = 0; i < 4; i++){ hash ^= ((i & 1) == 0) ? ((hash << 7) ^ p[i] ^ (hash >> 3)) : (~((hash << 11) ^ p[i] ^ (hash >> 5))); } return hash % tableSize; } __device__ __host__ unsigned int getElementGPU(struct quotient_filter* qf, unsigned int index) { unsigned int startSlot = index * qf->bytesPerElement; unsigned int element = qf->table[startSlot]; for (int i = 1; i < qf->bytesPerElement; i++){ element = (element << 8) | qf->table[startSlot + i]; } return element; } __device__ void setElementGPU(struct quotient_filter* qf, unsigned int index, unsigned int value) { unsigned int startSlot = index * qf->bytesPerElement; for (int i = 0; i < qf->bytesPerElement; i++){ unsigned int shift = qf->bytesPerElement - 1 - i; qf->table[startSlot + i] = (value >> (8 * shift)) & LOW_BIT_MASK(8); } } __device__ unsigned int findRunStartGPU(struct quotient_filter* qf, unsigned int fq) { unsigned int numElements = (1 << qf->qbits) * 1.1; //start bucket is fq unsigned int b = fq; //find beginning of cluster: while(isShiftedGPU(getElementGPU(qf, b))){ b--; } //find start of run we're interested in: //slot counter starts at beginning of cluster unsigned int s = b; while(b != fq){ do{ s++; }while((isContinuationGPU(getElementGPU(qf, s))) && (s < numElements)); //find end of current run do{ b++; }while((!isOccupiedGPU(getElementGPU(qf, b))) && (b < numElements)); //count number of runs passed } //now s is first value in correct run return s; } __device__ void insertItemHereGPU(struct quotient_filter* qf, unsigned int index, unsigned int value) { unsigned int previousElement; unsigned int newElement = value; bool empty = false; while(!empty){ previousElement = getElementGPU(qf, index); empty = isEmptyGPU(previousElement); previousElement = setShiftedGPU(previousElement); if(isOccupiedGPU(previousElement)){ //Need to preserve correct is_occupied bits previousElement = clearOccupiedGPU(previousElement); newElement = setOccupiedGPU(newElement); } setElementGPU(qf, index, newElement); newElement = previousElement; index++; } } __global__ void lookUp(int numItems, struct quotient_filter qfilter, unsigned int* hashValues, unsigned int* slotValues) { //returns NOT_FOUND (UINT_MAX) in slotValues[idx] if value is not in the filter, and returns the location of the remainder if it is in the filter unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; unsigned int hashValue = hashValues[idx]; //separate into quotient and remainder unsigned int fq = (hashValue >> qfilter.rbits) & LOW_BIT_MASK(qfilter.qbits); unsigned int fr = hashValue & LOW_BIT_MASK(qfilter.rbits); unsigned int element = getElementGPU(&qfilter, fq); if(!isOccupiedGPU(element)){ slotValues[idx] = NOT_FOUND; return; } unsigned int s = findRunStartGPU(&qfilter, fq); //search through elements in run do{ unsigned int remainder = getRemainderGPU(getElementGPU(&qfilter, s)); if(remainder == fr){ slotValues[idx] = s; return; } else if(remainder > fr){ slotValues[idx] = NOT_FOUND; return; } s++; }while(isContinuationGPU(getElementGPU(&qfilter, s))); slotValues[idx] = NOT_FOUND; } __global__ void hashInputs(int numItems, quotient_filter qfilter, unsigned int* insertValues, unsigned int* fingerprints) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; //hash values to get fingerprints // unsigned int hashValue = FNVhashGPU(insertValues[idx], (1 << (qfilter.qbits + qfilter.rbits))); unsigned int hashValue = Normal_APHash(insertValues[idx], (1 << (qfilter.qbits + qfilter.rbits))); fingerprints[idx] = hashValue; } __host__ float launchSortedLookups(quotient_filter qfilter, int numValues, unsigned int* d_lookupValues, unsigned int* d_returnValuesArray) { //Allocate array for hash values thrust::device_vector<unsigned int> d_hashValues(numValues); thrust::fill(d_hashValues.begin(), d_hashValues.end(), 0); unsigned int* d_hashValuesArray = thrust::raw_pointer_cast(&d_hashValues[0]); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //Hash input values int numBlocks = (numValues + 127) / 128; dim3 hashBlockDims((numBlocks + 31) / 32, 32); hashInputs<<<hashBlockDims, 128>>>(numValues, qfilter, d_lookupValues, d_hashValuesArray); //Create index array to track inputs -> outputs thrust::device_vector<unsigned int> d_indices(numValues); thrust::fill(d_indices.begin(), d_indices.end(), 1); thrust::exclusive_scan(d_indices.begin(), d_indices.end(), d_indices.begin(), 0); //Sort by fingerprint thrust::sort_by_key(d_hashValues.begin(), d_hashValues.end(), d_indices.begin()); //Launch lookup kernel numBlocks = (numValues + 1023) / 1024; dim3 blockDims((numBlocks + 31) / 32, 32); lookUp<<<blockDims, 1024>>>(numValues, qfilter, d_hashValuesArray, d_returnValuesArray); //Sort outputs thrust::device_ptr<unsigned int> d_returnValues(d_returnValuesArray); thrust::sort_by_key(d_indices.begin(), d_indices.end(), d_returnValues); cudaEventRecord(stop); //Calculate timing results cudaEventSynchronize(stop); float lookupTime = 0; cudaEventElapsedTime(&lookupTime, start, stop); //Free Memory d_hashValues.~device_vector<unsigned int>(); cudaEventDestroy(start); cudaEventDestroy(stop); return lookupTime; } __global__ void hashAndLookUp(int numItems, struct quotient_filter qfilter, unsigned int* lookupValues, unsigned int* slotValues) { //returns NOT_FOUND (UINT_MAX) in slotValues[idx] if value is not in the filter, and returns the location of the remainder if it is in the filter unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; unsigned int hashValue = Normal_APHash(lookupValues[idx], (1 << (qfilter.qbits + qfilter.rbits))); //separate into quotient and remainder unsigned int fq = (hashValue >> qfilter.rbits) & LOW_BIT_MASK(qfilter.qbits); unsigned int fr = hashValue & LOW_BIT_MASK(qfilter.rbits); unsigned int element = getElementGPU(&qfilter, fq); if(!isOccupiedGPU(element)){ slotValues[idx] = NOT_FOUND; return; } unsigned int s = findRunStartGPU(&qfilter, fq); //search through elements in run do{ unsigned int remainder = getRemainderGPU(getElementGPU(&qfilter, s)); if(remainder == fr){ slotValues[idx] = s; return; } else if(remainder > fr){ slotValues[idx] = NOT_FOUND; return; } s++; }while(isContinuationGPU(getElementGPU(&qfilter, s))); slotValues[idx] = NOT_FOUND; } __host__ float launchUnsortedLookups(quotient_filter qfilter, int numValues, unsigned int* d_lookupValues, unsigned int* d_returnValuesArray) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //Launch lookup kernel int numBlocks = (numValues + 1023) / 1024; dim3 blockDims((numBlocks + 31) / 32, 32); hashAndLookUp<<<blockDims, 1024>>>(numValues, qfilter, d_lookupValues, d_returnValuesArray); cudaEventRecord(stop); //Calculate timing results cudaEventSynchronize(stop); float lookupTime = 0; cudaEventElapsedTime(&lookupTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); return lookupTime; } __host__ void printQuotientFilterGPU(struct quotient_filter* qf) { unsigned char* h_filterTable = new unsigned char[calcNumSlotsGPU(qf->qbits, qf->rbits)]; cudaMemcpy(h_filterTable, qf->table, calcNumSlotsGPU(qf->qbits, qf->rbits) * sizeof(unsigned char), cudaMemcpyDeviceToHost); unsigned char* d_filterTable = qf->table; qf->table = h_filterTable; int filterSize = (1 << qf->qbits) * 1.1; printf("Printing metadata and remainders:\n"); for(int i = 0; i < filterSize/10; i++){ for(int j = 0; j < 10; j++){ int element = getElementGPU(qf, 10*i + j); printf("%u \t", element & 7); } printf("\n"); for(int j = 0; j < 10; j++){ int element = getElementGPU(qf, 10*i + j); printf("%u \t", getRemainderGPU(element)); } printf("\n --------------------------------------------------------------------- \n"); } printf("\n"); qf->table = d_filterTable; } __global__ void locateInsertSuperclusters(int numItems, quotient_filter qfilter, unsigned int* superclusterStarts) { //marks the beginning of each supercluster by looking for empty slots unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; superclusterStarts[idx] = 0; if(idx == 0) return; if(isEmptyGPU(getElementGPU(&qfilter, idx - 1))){ superclusterStarts[idx] = 1; } } __global__ void superclusterBidding(int numItems, quotient_filter qfilter, unsigned int* insertValues, unsigned int* superclusterIDs, bool* insertFlags, unsigned int* slotWinners) { //Outputs an array with one value per supercluster. These values can be inserted in parallel without collisions. unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; //initialize insert flags insertFlags[idx] = 1; if(insertValues[idx] == NOT_FOUND){ return; } //calculate fingerprint // unsigned int hashValue = FNVhashGPU(insertValues[idx], (1 << (qfilter.qbits + qfilter.rbits))); unsigned int hashValue = Normal_APHash(insertValues[idx], (1 << (qfilter.qbits + qfilter.rbits))); //separate out the quotient/canonical slot bits unsigned int fq = (hashValue >> qfilter.rbits) & LOW_BIT_MASK(qfilter.qbits); //determine which supercluster the item belongs in unsigned int superclusterNumber = superclusterIDs[fq]; //write the item's index to the supercluster slot to bid for insert slotWinners[superclusterNumber] = idx; } __global__ void insertItemGPU(int numItems, quotient_filter qfilter, unsigned int* insertValues, unsigned int* winnerIndices, unsigned int* finalLocationValues, bool* insertFlags) { //inserts items into the filter, returning their slot locations in slotValues[idx] //if the item is already in the filter, it still returns the item location, although no changes are made unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; //check that there is an item to insert for this supercluster if(winnerIndices[idx] == NOT_FOUND){ finalLocationValues[idx] = NOT_FOUND; return; } //determine which value is being added to the QF unsigned int originalIndex = winnerIndices[idx]; //reset winnerIndices for next bidding round winnerIndices[idx] = NOT_FOUND; insertFlags[originalIndex] = 0; //want to remove this item from insert queue unsigned int value = insertValues[originalIndex]; //calculate fingerprint // unsigned int hashValue = FNVhashGPU(value, (1 << (qfilter.qbits + qfilter.rbits))); unsigned int hashValue = Normal_APHash(value, (1 << (qfilter.qbits + qfilter.rbits))); //separate into quotient and remainder unsigned int fq = (hashValue >> qfilter.rbits) & LOW_BIT_MASK(qfilter.qbits); unsigned int fr = hashValue & LOW_BIT_MASK(qfilter.rbits); unsigned int canonElement = getElementGPU(&qfilter, fq); unsigned int newElement = fr << 3; if(isEmptyGPU(canonElement)){ setElementGPU(&qfilter, fq, setOccupiedGPU(newElement)); finalLocationValues[idx] = fq; return; } if(!isOccupiedGPU(canonElement)){ //set is_occupied to show that there is now a run for this slot setElementGPU(&qfilter, fq, setOccupiedGPU(canonElement)); } //Find beginning of item's run unsigned int runStart = findRunStartGPU(&qfilter, fq); unsigned int s = runStart; if(isOccupiedGPU(canonElement)){ //If slot already has a run, search through its elements. do{ unsigned int remainder = getRemainderGPU(getElementGPU(&qfilter, s)); if(remainder == fr){ //the item is already in the filter finalLocationValues[idx] = s; return; } else if(remainder > fr){ //s now points to where item goes break; } s++; }while(isContinuationGPU(getElementGPU(&qfilter, s))); if(s == runStart){ //The new element is now the start of the run, but we must move old start over, so it will be continuation unsigned int oldStartElement = getElementGPU(&qfilter, runStart); setElementGPU(&qfilter, runStart, setContinuationGPU(oldStartElement)); } else{ //New element is not the start, so set its continuation bit newElement = setContinuationGPU(newElement); } } if(s != fq){ //If it's not being inserted into the canonical slot, the element is shifted. newElement = setShiftedGPU(newElement); } insertItemHereGPU(&qfilter, s, newElement); finalLocationValues[idx] = s; return; } __host__ float insert(quotient_filter qfilter, int numValues, unsigned int* d_insertValues) { int filterSize = (1 << qfilter.qbits) * 1.1; //number of (r + 3)-bit slots in the filter //Allocate all necessary memory for inserts int* h_numItemsLeft = new int[1]; //counts number of elements in insert queue h_numItemsLeft[0] = numValues; int* d_numItemsLeft; cudaMalloc((void**) &d_numItemsLeft, sizeof(int)); unsigned int* d_superclusterIndicators; //stores bits marking beginning of superclusters cudaMalloc((void**) &d_superclusterIndicators, filterSize * sizeof(unsigned int)); //Variables for CUB function temporary storage void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; unsigned int* d_superclusterLabels = NULL; //labels each slot with its supercluster number cudaMalloc((void**) &d_superclusterLabels, filterSize * sizeof(unsigned int)); int* h_lastSuperclusterLabel = new int[1]; int maxNumSuperclusters = calcNumSlotsGPU(qfilter.qbits, qfilter.rbits) + 1; unsigned int* d_slotWinners; cudaMalloc((void**) &d_slotWinners, maxNumSuperclusters * sizeof(unsigned int)); unsigned int* h_slotWinners = new unsigned int[maxNumSuperclusters]; for(int i = 0; i < maxNumSuperclusters; i++){ h_slotWinners[i] = NOT_FOUND; } cudaMemcpy(d_slotWinners, h_slotWinners, maxNumSuperclusters * sizeof(unsigned int), cudaMemcpyHostToDevice); unsigned int* d_insertLocations; //Output for actual locations where items are inserted cudaMalloc((void**) &d_insertLocations, maxNumSuperclusters * sizeof(unsigned int)); bool* d_insertFlags; //Flags for removing items from insert queue cudaMalloc((void**) &d_insertFlags, numValues * sizeof(bool)); unsigned int* d_insertItemsQueue; cudaMalloc((void**) &d_insertItemsQueue, numValues * sizeof(unsigned int)); cudaMemcpy(d_insertItemsQueue, d_insertValues, numValues * sizeof(unsigned int), cudaMemcpyDeviceToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //cudaProfilerStart(); cudaEventRecord(start); do{ //TODO: could consider marking superclusters from previous rounds with no items to insert so that we don't continue to launch threads for these superclusters to do no work //Find supercluster array: int numBlocks = (filterSize + 1023) / 1024; dim3 SCBlockDims((numBlocks + 31) / 32, 32); locateInsertSuperclusters<<<SCBlockDims, 1024>>>(filterSize, qfilter, d_superclusterIndicators); //CUB Inclusive Prefix Sum d_temp_storage = NULL; temp_storage_bytes = 0; CubDebugExit(cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_superclusterIndicators, d_superclusterLabels, filterSize)); cudaMalloc(&d_temp_storage, temp_storage_bytes); CubDebugExit(cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_superclusterIndicators, d_superclusterLabels, filterSize)); //Determine how many superclusters there are cudaMemcpy(h_lastSuperclusterLabel, d_superclusterLabels + (filterSize - 1), sizeof(unsigned int), cudaMemcpyDeviceToHost); int numSuperclusters = h_lastSuperclusterLabel[0] + 1; //Pick one element per supercluster to insert numBlocks = (h_numItemsLeft[0] + 127) / 128; dim3 biddingBlockDims((numBlocks + 31) / 32, 32); superclusterBidding<<<biddingBlockDims, 128>>>(h_numItemsLeft[0], qfilter, d_insertItemsQueue, d_superclusterLabels, d_insertFlags, d_slotWinners); //Insert items into QF numBlocks = (numSuperclusters + 255) / 256; dim3 insertBlockDims((numBlocks + 31) / 32, 32); insertItemGPU<<<insertBlockDims, 256>>>(numSuperclusters, qfilter, d_insertItemsQueue, d_slotWinners, d_insertLocations, d_insertFlags); //Remove successfully inserted items from the queue d_temp_storage = NULL; temp_storage_bytes = 0; CubDebugExit(cub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_insertItemsQueue, d_insertFlags, d_insertItemsQueue, d_numItemsLeft, h_numItemsLeft[0])); cudaMalloc(&d_temp_storage, temp_storage_bytes); CubDebugExit(cub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_insertItemsQueue, d_insertFlags, d_insertItemsQueue, d_numItemsLeft, h_numItemsLeft[0])); cudaMemcpy(h_numItemsLeft, d_numItemsLeft, sizeof(int), cudaMemcpyDeviceToHost); }while(h_numItemsLeft[0] > 0); cudaEventRecord(stop); //cudaProfilerStop(); //Calculate timing results cudaEventSynchronize(stop); float insertTime = 0; cudaEventElapsedTime(&insertTime, start, stop); //Free memory delete[] h_numItemsLeft; cudaFree(d_numItemsLeft); cudaFree(d_superclusterIndicators); cudaFree(d_temp_storage); cudaFree(d_superclusterLabels); cudaFree(d_slotWinners); delete[] h_slotWinners; cudaFree(d_insertLocations); cudaFree(d_insertFlags); cudaFree(d_insertItemsQueue); cudaEventDestroy(start); cudaEventDestroy(stop); return insertTime; } __global__ void quotienting(int numItems, unsigned int qbits, unsigned int rbits, unsigned int* quotients, unsigned int* remainders) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; //return quotients and remainders unsigned int hashValue = quotients[idx]; //quotients array initially stores the fingerprint values unsigned int canonicalSlot = (hashValue >> rbits) & LOW_BIT_MASK(qbits); quotients[idx] = canonicalSlot; unsigned int remainderBits = hashValue & LOW_BIT_MASK(rbits); remainders[idx] = remainderBits; } __global__ void findSegmentHeads(int numItems, unsigned int* quotients, unsigned int* segStarts) { //locate the beginnings of segments unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; if(idx != 0){ if(quotients[idx] != quotients[idx - 1]){ segStarts[idx] = 1; } } } __global__ void calcOffsets(int numItems, unsigned int* locations, unsigned int* segLabels, int* offsets, int* credits, int* creditCarryover) { //compute the shift/credits for a group of elements when merging their segments unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems || idx ==0) return; unsigned int segmentIdx = segLabels[idx]; if((segmentIdx != segLabels[idx - 1]) && (segmentIdx % 2 == 1)){ offsets[segmentIdx] = locations[idx - 1] - locations[idx] + 1; creditCarryover[segmentIdx] = credits[idx - 1]; } } __global__ void shiftElements(int numItems, int* offsets, int* credits, unsigned int* locations, int* creditCarryover, unsigned int* segLabels) { //calculate the shifts for merging segments unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems || idx == 0) return; unsigned int segmentIdx = segLabels[idx]; int netShift = offsets[segmentIdx] - credits[idx]; int newCredits = 0; if(netShift > 0){ //merging the segments causes the items to shift. locations[idx] += netShift; newCredits = 0; } else{ //there are extra slots between segments. Track these with credits. newCredits = -netShift; } credits[idx] = newCredits + creditCarryover[segmentIdx]; segLabels[idx] /= 2; } __global__ void setMetadata(int numItems, unsigned int* remainders, unsigned int* quotients, unsigned int* locations) { //set is_continuation and is_shifted bits for each item in the filter unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; unsigned int element = remainders[idx]; element = element << 3; //shift remainder left to make room for metadata bits //is_continuation: check if quotient[i-1] = quotient[i] (this is already stored in segStarts) if(idx != 0 && quotients[idx] == quotients[idx - 1]) element = setContinuationGPU(element); //is_shifted: if location > quotient if(locations[idx] != quotients[idx]) element = setShiftedGPU(element); remainders[idx] = element; } __global__ void writeRemainders(int numItems, quotient_filter qfilter, unsigned int* remainders, unsigned int* locations) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; setElementGPU(&qfilter, locations[idx], remainders[idx]); } __global__ void setOccupiedBits(int numItems, quotient_filter qfilter, unsigned int* quotients) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; unsigned int element = getElementGPU(&qfilter, quotients[idx]); setElementGPU(&qfilter, quotients[idx], setOccupiedGPU(element)); } __host__ float bulkBuildParallelMerging(quotient_filter qfilter, int numValues, unsigned int* d_insertValues, bool NoDuplicates) { //build a quotient filter by inserting all (or a large batch of) items all at once /* 1. Compute all fingerprints 2. Sort list of fingerprints 3. Quotienting 4. Segmented scan of array of 1's 5. Add to quotients 6. Create array of credits[number items], initialized to 0 7. "Associative scan" with saturating operator to find end positions 8. Write values to filter */ //Memory Allocation thrust::device_vector<unsigned int> d_quotients(numValues); thrust::fill(d_quotients.begin(), d_quotients.end(), 0); unsigned int* d_quotientsArray = thrust::raw_pointer_cast(&d_quotients[0]); thrust::device_vector<unsigned int> d_locations(numValues); thrust::fill_n(d_locations.begin(), numValues, 1); unsigned int* d_locationsArray = thrust::raw_pointer_cast(&d_locations[0]); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //cudaProfilerStart(); cudaEventRecord(start); //Hash input values int numBlocks = (numValues + 127) / 128; dim3 hashBlockDims((numBlocks + 31) / 32, 32); hashInputs<<<hashBlockDims, 128>>>(numValues, qfilter, d_insertValues, d_quotientsArray); //store fingerprints in quotients array //Sort by fingerprint thrust::sort(d_quotients.begin(), d_quotients.end()); //Remove duplicates, if desired if(NoDuplicates == true){ thrust::detail::normal_iterator< thrust::device_ptr<unsigned int> > fingerprintEnd = thrust::unique(d_quotients.begin(), d_quotients.end()); d_quotients.erase(fingerprintEnd, d_quotients.end()); numValues = d_quotients.end() - d_quotients.begin(); } //Divide fingerprints into quotients and remainders unsigned int* d_remaindersArray; cudaMalloc((void**) &d_remaindersArray, numValues * sizeof(unsigned int)); cudaMemset(d_remaindersArray, 0, numValues * sizeof(unsigned int)); numBlocks = (numValues + 767) / 768; dim3 quotientingBlockDims((numBlocks + 31) / 32, 32); quotienting<<<quotientingBlockDims, 768>>>(numValues, qfilter.qbits, qfilter.rbits, d_quotientsArray, d_remaindersArray); //Segmented scan of array of 1's thrust::exclusive_scan_by_key(d_quotients.begin(), d_quotients.end(), d_locations.begin(), d_locations.begin()); //Add scanned values to quotients to find initial locations before shifts thrust::transform(d_quotients.begin(), d_quotients.end(), d_locations.begin(), d_locations.begin(), thrust::plus<unsigned int>()); //Associative scans: //1. Each quotient is a segment //2. Pair up segments //3. offset = L_tail - R_head + 1 //4. net shift = offset - credit //5. for each element: //if (net shift > 0): shift = net shift; credit = 0 //if (net shift < 0): shift = 0; credit = -net shift //segmentLabel = segmentLabel/2 thrust::device_vector<unsigned int> d_segStarts(numValues); thrust::fill(d_segStarts.begin(), d_segStarts.end(), 0); unsigned int* d_segStartsArray = thrust::raw_pointer_cast(&d_segStarts[0]); thrust::device_vector<unsigned int> d_segLabels(numValues); unsigned int* d_segLabelsArray = thrust::raw_pointer_cast(&d_segLabels[0]); //Label segments for grouping items numBlocks = (numValues + 767) / 768; dim3 findSegHeadsBlockDims((numBlocks + 31) / 32, 32); findSegmentHeads<<<findSegHeadsBlockDims, 768>>>(numValues, d_quotientsArray, d_segStartsArray); thrust::inclusive_scan(d_segStarts.begin(), d_segStarts.end(), d_segLabels.begin()); d_segStarts.~device_vector<unsigned int>(); //Join segments, calculating shifts along the way int numSegments = d_segLabels[numValues - 1] + 1; int numLoops = (int) ceil(log2((float)numSegments)); int* d_offsets; cudaMalloc((void**) &d_offsets, numSegments * sizeof(int)); int* d_creditCarryover; cudaMalloc((void**) &d_creditCarryover, numSegments * sizeof(int)); int* d_credits; cudaMalloc((void**) &d_credits, numValues * sizeof(int)); cudaMemset(d_credits, 0, numValues * sizeof(int)); for(int i = 0; i < numLoops; i++){ cudaMemset(d_offsets, 0, numSegments * sizeof(int)); cudaMemset(d_creditCarryover, 0, numSegments * sizeof(int)); //Calculate offsets between segments numBlocks = (numValues + 255) / 256; dim3 findSegHeadsBlockDims((numBlocks + 31) / 32, 32); calcOffsets<<<findSegHeadsBlockDims, 256>>>(numValues, d_locationsArray, d_segLabelsArray, d_offsets, d_credits, d_creditCarryover); //Calculate the shifts/credits for each item in this round of merging //Relabel segments so that pairs have now merged numBlocks = (numValues + 767) / 768; dim3 shiftElementsBlockDims((numBlocks + 31) / 32, 32); shiftElements<<<shiftElementsBlockDims, 768>>>(numValues, d_offsets, d_credits, d_locationsArray, d_creditCarryover, d_segLabelsArray); } //Shift the remainder values to left to make room for metadata //Then determine metadata bits and set them numBlocks = (numValues + 1023) / 1024; dim3 setMetadataBlockDims((numBlocks + 31) / 32, 32); setMetadata<<<setMetadataBlockDims, 1024>>>(numValues, d_remaindersArray, d_quotientsArray, d_locationsArray); //Scatter remainder values to the filter numBlocks = (numValues + 1023) / 1024; dim3 writeRemaindersBlockDims((numBlocks + 31) / 32, 32); writeRemainders<<<writeRemaindersBlockDims, 1024>>>(numValues, qfilter, d_remaindersArray, d_locationsArray); //Set the is_occupied bits numBlocks = (numValues + 511) / 512; dim3 setOccupiedBitsBlockDims((numBlocks + 31) / 32, 32); setOccupiedBits<<<setOccupiedBitsBlockDims, 512>>>(numValues, qfilter, d_quotientsArray); //Calculate and print timing results cudaEventRecord(stop); //cudaProfilerStop(); cudaEventSynchronize(stop); float filterBuildTime = 0; cudaEventElapsedTime(&filterBuildTime, start, stop); //Free memory d_quotients.~device_vector<unsigned int>(); cudaFree(d_remaindersArray); d_locations.~device_vector<unsigned int>(); d_segLabels.~device_vector<unsigned int>(); cudaFree(d_credits); cudaFree(d_offsets); cudaFree(d_creditCarryover); cudaEventDestroy(start); cudaEventDestroy(stop); return filterBuildTime; } __global__ void segmentStartLocations(int numItems, unsigned int* segLabels, unsigned int* segStartLocations) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; if(idx == 0){ segStartLocations[0] = 0; return; } if(segLabels[idx] == segLabels[idx-1]) return; segStartLocations[segLabels[idx]] = idx; } __global__ void shiftSegments(int numItems, unsigned int* segStartLocations, unsigned int* locations, int numValues, bool* changesMade) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems || idx ==0) return; int arrayIndex = segStartLocations[idx]; int shift = locations[arrayIndex-1] - locations[arrayIndex] + 1; if(shift > 0){ int segmentLength; if(idx == (numItems - 1)){ segmentLength = numValues - segStartLocations[idx]; } else{ segmentLength = segStartLocations[idx+1] - segStartLocations[idx]; } for(int i = 0; i < segmentLength; i++){ locations[arrayIndex + i] += shift; } changesMade[0] = 1; } } void printArray(int numValues, int* array) { for(int i = 0; i < numValues/10; i++){ for(int j = 0; j < 10; j++){ printf("%i\t", array[i*10 + j]); } printf("\n"); } for(int i = 0; i < numValues % 10; i++){ printf("%i\t", array[((numValues/10)*10) + i]); } printf("\n"); } __host__ float bulkBuildSequentialShifts(quotient_filter qfilter, int numValues, unsigned int* d_insertValues, bool NoDuplicates) { //build a quotient filter by inserting all (or a large batch of) items all at once //compute locations by shifting one run at a time //exit when shifting stops /* 1. Compute all quotients & remainders List of pairs of (fq, fr) 2. Sort list by fq, then by fr within groups of same fq (or maybe in reverse order?) 3. Segmented scan of array of 1's 4. Add to quotients 5. Iterate: a. Compute shift at every boundary between runs and shift all items in run if needed b. Write to Bool to indicate shift happened b. Check if a shift happened; if not, done! 6. Write values to filter */ //Memory Allocation thrust::device_vector<unsigned int> d_quotients(numValues); thrust::fill(d_quotients.begin(), d_quotients.end(), 0); unsigned int* d_quotientsArray = thrust::raw_pointer_cast(&d_quotients[0]); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //Hash input values int numBlocks = (numValues + 127) / 128; dim3 hashBlockDims((numBlocks + 31) / 32, 32); hashInputs<<<hashBlockDims, 128>>>(numValues, qfilter, d_insertValues, d_quotientsArray); //store fingerprints in quotients array //Sort by fingerprint thrust::sort(d_quotients.begin(), d_quotients.end()); //Remove duplicates, if desired if(NoDuplicates == true){ thrust::detail::normal_iterator< thrust::device_ptr<unsigned int> > fingerprintEnd = thrust::unique(d_quotients.begin(), d_quotients.end()); d_quotients.erase(fingerprintEnd, d_quotients.end()); numValues = d_quotients.end() - d_quotients.begin(); } //Divide fingerprints into quotients and remainders unsigned int* d_remaindersArray; cudaMalloc((void**) &d_remaindersArray, numValues * sizeof(unsigned int)); cudaMemset(d_remaindersArray, 0, numValues * sizeof(unsigned int)); thrust::device_vector<unsigned int> d_locations(numValues); thrust::fill_n(d_locations.begin(), numValues, 1); unsigned int* d_locationsArray = thrust::raw_pointer_cast(&d_locations[0]); numBlocks = (numValues + 767) / 768; dim3 quotientingBlockDims((numBlocks + 31) / 32, 32); quotienting<<<quotientingBlockDims, 768>>>(numValues, qfilter.qbits, qfilter.rbits, d_quotientsArray, d_remaindersArray); //Segmented scan of array of 1's thrust::exclusive_scan_by_key(d_quotients.begin(), d_quotients.end(), d_locations.begin(), d_locations.begin()); //Add scanned values to quotients to find initial locations before shifts thrust::transform(d_quotients.begin(), d_quotients.end(), d_locations.begin(), d_locations.begin(), thrust::plus<unsigned int>()); //Label segments for grouping items thrust::device_vector<unsigned int> d_segStarts(numValues); thrust::fill(d_segStarts.begin(), d_segStarts.end(), 0); unsigned int* d_segStartsArray = thrust::raw_pointer_cast(&d_segStarts[0]); thrust::device_vector<unsigned int> d_segLabels(numValues); unsigned int* d_segLabelsArray = thrust::raw_pointer_cast(&d_segLabels[0]); numBlocks = (numValues + 767) / 768; dim3 findSegHeadsBlockDims((numBlocks + 31) / 32, 32); findSegmentHeads<<<findSegHeadsBlockDims, 768>>>(numValues, d_quotientsArray, d_segStartsArray); thrust::inclusive_scan(d_segStarts.begin(), d_segStarts.end(), d_segLabels.begin()); d_segStarts.~device_vector<unsigned int>(); int numSegments = d_segLabels[numValues - 1] + 1; unsigned int* d_segStartLocations; cudaMalloc((void**) &d_segStartLocations, numSegments * sizeof(unsigned int)); //Create array with the location of first item in each run numBlocks = (numValues + 1023) / 1024; dim3 segStartLocationsBlockDims((numBlocks + 31) / 32, 32); segmentStartLocations<<<findSegHeadsBlockDims, 1024>>>(numValues, d_segLabelsArray, d_segStartLocations); bool* h_changesMade = new bool[1]; h_changesMade[0] = 1; bool* d_changesMade; cudaMalloc((void**) &d_changesMade, sizeof(bool)); cudaMemset(d_changesMade, 0, sizeof(bool)); while(h_changesMade[0] == 1){ h_changesMade[0] = 0; numBlocks = (numSegments + 191) / 192; dim3 shiftSegsBlockDims((numBlocks + 31) / 32, 32); shiftSegments<<<shiftSegsBlockDims, 192>>>(numSegments, d_segStartLocations, d_locationsArray, numValues, d_changesMade); cudaMemcpy(h_changesMade, d_changesMade, sizeof(bool), cudaMemcpyDeviceToHost); cudaMemset(d_changesMade, 0, sizeof(bool)); } //Shift the remainder values to left to make room for metadata //Then determine metadata bits and set them numBlocks = (numValues + 1023) / 1024; dim3 setMetadataBlockDims((numBlocks + 31) / 32, 32); setMetadata<<<setMetadataBlockDims, 1024>>>(numValues, d_remaindersArray, d_quotientsArray, d_locationsArray); //Scatter remainder values to the filter numBlocks = (numValues + 1023) / 1024; dim3 writeRemaindersBlockDims((numBlocks + 31) / 32, 32); writeRemainders<<<writeRemaindersBlockDims, 1024>>>(numValues, qfilter, d_remaindersArray, d_locationsArray); //Set the is_occupied bits numBlocks = (numValues + 511) / 512; dim3 setOccupiedBitsBlockDims((numBlocks + 31) / 32, 32); setOccupiedBits<<<setOccupiedBitsBlockDims, 512>>>(numValues, qfilter, d_quotientsArray); //Calculate and print timing results cudaEventRecord(stop); cudaEventSynchronize(stop); float filterBuildTime = 0; cudaEventElapsedTime(&filterBuildTime, start, stop); //Free memory d_quotients.~device_vector<unsigned int>(); cudaFree(d_remaindersArray); d_locations.~device_vector<unsigned int>(); d_segLabels.~device_vector<unsigned int>(); cudaFree(d_segStartLocations); delete[] h_changesMade; cudaFree(d_changesMade); cudaEventDestroy(start); cudaEventDestroy(stop); return filterBuildTime; } __global__ void locateDeleteSuperclusters(int numItems, quotient_filter qfilter, unsigned int* superclusterStarts) { //marks the beginning of each "supercluster" -> really, for deletes this is same as a cluster unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; superclusterStarts[idx] = 0; if(idx == 0) return; if((!isEmptyGPU(getElementGPU(&qfilter, idx))) && (!isShiftedGPU(getElementGPU(&qfilter, idx)))){ superclusterStarts[idx] = 1; } } __global__ void deleteItemGPU(int numItems, quotient_filter qfilter, unsigned int* deleteValues, unsigned int* winnerIndices, bool* deleteFlags) { //deletes items from the quotient filter, shifting other items left if required unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; //check that there is an item assigned to be deleted for supercluster idx if(winnerIndices[idx] == NOT_FOUND){ return; } //determine which value is being added to the QF unsigned int originalIndex = winnerIndices[idx]; //reset winnerIndices for next bidding round winnerIndices[idx] = NOT_FOUND; deleteFlags[originalIndex] = 0; //want to remove this item from delete queue after this finishes unsigned int value = deleteValues[originalIndex]; //calculate fingerprint // unsigned int hashValue = FNVhashGPU(value, (1 << (qfilter.qbits + qfilter.rbits))); unsigned int hashValue = Normal_APHash(value, (1 << (qfilter.qbits + qfilter.rbits))); //separate into quotient and remainder unsigned int fq = (hashValue >> qfilter.rbits) & LOW_BIT_MASK(qfilter.qbits); unsigned int fr = hashValue & LOW_BIT_MASK(qfilter.rbits); unsigned int canonElement = getElementGPU(&qfilter, fq); if(!isOccupiedGPU(canonElement)){ //if canonical slot is not occupied, the item isn't in the filter; we're done. return; } //start bucket is fq unsigned int b = fq; //find beginning of cluster: while(isShiftedGPU(getElementGPU(&qfilter, b))){ b--; } //find start of run we're interested in: //slot counter starts at beginning of cluster unsigned int s = b; while(b != fq){ do{ s++; }while((isContinuationGPU(getElementGPU(&qfilter, s)))); //find end of current run do{ b++; }while((!isOccupiedGPU(getElementGPU(&qfilter, b)))); //count number of runs passed } //now s is first value in run of item to be deleted unsigned int runStart = s; //Search through the run's elements to find item needing to be deleted unsigned int remainder; do{ remainder = getRemainderGPU(getElementGPU(&qfilter, s)); if(remainder == fr){ //found it! break; } else if(remainder > fr){ //the item is not in the filter //nothing to delete here return; } s++; }while(isContinuationGPU(getElementGPU(&qfilter, s))); //If we searched entire run without finding it: if(remainder != fr){ return; //the item is not in the filter } if(!isContinuationGPU(getElementGPU(&qfilter, (s + 1)))){ do{ //if next item is a new run, add to run count b++; }while(!isOccupiedGPU(getElementGPU(&qfilter, b))); } //We have now located the item that needs to be deleted, stored in slot s. //Special conditions for deleted run starts if(s == runStart){ if(!isContinuationGPU(getElementGPU(&qfilter, (s + 1)))){ //the run is empty; clear the occupied bit setElementGPU(&qfilter, fq, clearOccupiedGPU(getElementGPU(&qfilter, fq))); } else{ //next item is now the first in the run setElementGPU(&qfilter, (s + 1), clearContinuationGPU(getElementGPU(&qfilter, (s + 1)))); } } //now check the item to the right to see whether it will need to be moved //if it was shifted, it is part of the same cluster and can be shifted left while(isShiftedGPU(getElementGPU(&qfilter, (s + 1)))){ //want to check if s = b for clearing shifted bit if(b == s){ //in this case, run about to be shifted into its correct slot -> unshifted setElementGPU(&qfilter, (s + 1), clearShiftedGPU(getElementGPU(&qfilter, (s + 1)))); } do{ unsigned int nextElement = getElementGPU(&qfilter, (s + 1)); if(isOccupiedGPU(getElementGPU(&qfilter, s))){ setElementGPU(&qfilter, s, setOccupiedGPU(nextElement)); } else{ setElementGPU(&qfilter, s, clearOccupiedGPU(nextElement)); } s++; }while((isContinuationGPU(getElementGPU(&qfilter, (s + 1))))); //shift the items in current run do{ b++; }while(!isOccupiedGPU(getElementGPU(&qfilter, b)) ); //keep track of current run } //Last item is always a new empty slot setElementGPU(&qfilter, s, 0); return; } __host__ float superclusterDeletes(quotient_filter qfilter, int numValues, unsigned int* d_deleteValues) { int filterSize = (1 << qfilter.qbits) * 1.1; //number of (r + 3)-bit slots in the filter //Allocate all necessary memory for deletes int* h_numItemsLeft = new int[1]; //counts number of elements in delete queue h_numItemsLeft[0] = numValues; int* d_numItemsLeft; cudaMalloc((void**) &d_numItemsLeft, sizeof(int)); unsigned int* d_superclusterIndicators; //stores bits marking beginning of superclusters cudaMalloc((void**) &d_superclusterIndicators, filterSize * sizeof(unsigned int)); //Variables for CUB function temporary storage void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; unsigned int* d_superclusterLabels = NULL; //labels each slot with its supercluster number cudaMalloc((void**) &d_superclusterLabels, filterSize * sizeof(unsigned int)); int* h_lastSuperclusterLabel = new int[1]; int maxNumSuperclusters = calcNumSlotsGPU(qfilter.qbits, qfilter.rbits) + 1; unsigned int* d_slotWinners; cudaMalloc((void**) &d_slotWinners, maxNumSuperclusters * sizeof(unsigned int)); unsigned int* h_slotWinners = new unsigned int[maxNumSuperclusters]; for(int i = 0; i < maxNumSuperclusters; i++){ h_slotWinners[i] = NOT_FOUND; } cudaMemcpy(d_slotWinners, h_slotWinners, maxNumSuperclusters * sizeof(unsigned int), cudaMemcpyHostToDevice); bool* d_deleteFlags; //Flags for removing items from delete queue cudaMalloc((void**) &d_deleteFlags, numValues * sizeof(bool)); unsigned int* d_deleteItemsQueue; cudaMalloc((void**) &d_deleteItemsQueue, numValues * sizeof(unsigned int)); cudaMemcpy(d_deleteItemsQueue, d_deleteValues, numValues * sizeof(unsigned int), cudaMemcpyDeviceToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); do{ //Find supercluster array: int numBlocks = (filterSize + 1023) / 1024; dim3 deleteSCBlockDims((numBlocks + 31) / 32, 32); locateDeleteSuperclusters<<<deleteSCBlockDims, 1024>>>(filterSize, qfilter, d_superclusterIndicators); //CUB Inclusive Prefix Sum d_temp_storage = NULL; temp_storage_bytes = 0; CubDebugExit(cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_superclusterIndicators, d_superclusterLabels, filterSize)); cudaMalloc(&d_temp_storage, temp_storage_bytes); CubDebugExit(cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_superclusterIndicators, d_superclusterLabels, filterSize)); //Determine how many superclusters there are cudaMemcpy(h_lastSuperclusterLabel, d_superclusterLabels + (filterSize - 1), sizeof(unsigned int), cudaMemcpyDeviceToHost); int numSuperclusters = h_lastSuperclusterLabel[0] + 1; //Pick one element per supercluster to delete numBlocks = (h_numItemsLeft[0] + 127) / 128; dim3 biddingBlockDims((numBlocks + 31) / 32, 32); superclusterBidding<<<biddingBlockDims, 128>>>(h_numItemsLeft[0], qfilter, d_deleteItemsQueue, d_superclusterLabels, d_deleteFlags, d_slotWinners); //Insert items into QF numBlocks = (numSuperclusters + 1023) / 1024; dim3 deleteBlockDims((numBlocks + 31) / 32, 32); deleteItemGPU<<<deleteBlockDims, 1024>>>(numSuperclusters, qfilter, d_deleteItemsQueue, d_slotWinners, d_deleteFlags); //Remove successfully deleted items from the queue d_temp_storage = NULL; temp_storage_bytes = 0; CubDebugExit(cub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_deleteItemsQueue, d_deleteFlags, d_deleteItemsQueue, d_numItemsLeft, h_numItemsLeft[0])); cudaMalloc(&d_temp_storage, temp_storage_bytes); CubDebugExit(cub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_deleteItemsQueue, d_deleteFlags, d_deleteItemsQueue, d_numItemsLeft, h_numItemsLeft[0])); cudaMemcpy(h_numItemsLeft, d_numItemsLeft, sizeof(int), cudaMemcpyDeviceToHost); }while(h_numItemsLeft[0] > 0); cudaEventRecord(stop); //Calculate timing results cudaEventSynchronize(stop); float deleteTime = 0; cudaEventElapsedTime(&deleteTime, start, stop); //Free memory delete[] h_numItemsLeft; cudaFree(d_numItemsLeft); cudaFree(d_superclusterIndicators); cudaFree(d_temp_storage); cudaFree(d_superclusterLabels); cudaFree(d_slotWinners); delete[] h_slotWinners; cudaFree(d_deleteFlags); cudaFree(d_deleteItemsQueue); cudaEventDestroy(start); cudaEventDestroy(stop); return deleteTime; } __global__ void findSegmentStarts(int numItems, unsigned int q, unsigned int* quotients, unsigned int* segmentStarts) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems || idx ==0) return; unsigned int currentSegment = quotients[idx] / q; unsigned int previousSegment = quotients[idx - 1] / q; if(currentSegment != previousSegment){ segmentStarts[currentSegment] = idx; } } __global__ void layout(int numItems, unsigned int qbits, unsigned int* quotients, unsigned int* segmentAssignments, int* shift, int* overflow, bool* changesMade, int numInsertValues) { //computes layout for the idx-th segment of the filter unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; int firstNonemptySegment = quotients[0] / qbits; if(idx < firstNonemptySegment){ overflow[idx] = 0; return; } int segStart; int firstItemIdx; if(idx == 0){ segStart = 0; if(quotients[0] < qbits){ firstItemIdx = 0; } else{ //no items in segment 0 overflow[0] = 0; return; } } else{ segStart = idx * qbits + shift[idx-1]; //start the layout to right of shifted values from previous segment firstItemIdx = segmentAssignments[idx]; if(firstItemIdx == 0 && (quotients[0] < segStart)){ //the segment has no items overflow[idx] = 0; return; } } int lastItemIdx; if(idx == (numItems - 1)){ //last segment lastItemIdx = numInsertValues - 1; } else{ lastItemIdx = segmentAssignments[idx + 1] - 1; int j = idx + 1; while(lastItemIdx == -1 && j < numItems){ //in case of empty segments to the right if(j == numItems - 1){ lastItemIdx = numInsertValues - 1; } else{ lastItemIdx = segmentAssignments[j] - 1; } j++; } } int numSegItems = lastItemIdx - firstItemIdx + 1; if(numSegItems <= 0){ overflow[idx] = 0; return; } int maxSlot = segStart; //maxSlot = next open slot for(int i = firstItemIdx; i <= lastItemIdx; i++){ if(quotients[i] > maxSlot) maxSlot = quotients[i]; maxSlot++; } int segEnd = ((idx + 1) * qbits) - 1; int segmentOverflow = (maxSlot - 1) - segEnd; if(segmentOverflow > 0){ overflow[idx] = segmentOverflow; if(segmentOverflow > shift[idx]){ //check if there has been change from last iteration changesMade[0] = 1; } } else{ overflow[idx] = 0; } } __global__ void segmentedQFWrite(int numItems, quotient_filter qfilter, unsigned int* quotients, unsigned int* remainders, unsigned int* segmentAssignments, int* shift, int numInsertValues) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; int segStart; int firstItemIdx; if(idx == 0){ segStart = 0; if(quotients[0] < qfilter.qbits){ firstItemIdx = 0; } else{ //no items in segment 0 return; } } else{ segStart = idx * qfilter.qbits + shift[idx-1]; //start the layout to right of shifted values from previous segment firstItemIdx = segmentAssignments[idx]; if(firstItemIdx == 0 && (quotients[0] < segStart)){ //the segment has no items return; } } int lastItemIdx; if(idx == (numItems - 1)){ lastItemIdx = numInsertValues - 1; } else{ lastItemIdx = segmentAssignments[idx + 1] - 1; int j = idx + 1; while(lastItemIdx == -1 && j < numItems){ //in case of empty segments to the right if(j == numItems - 1){ lastItemIdx = numInsertValues - 1; } else{ lastItemIdx = segmentAssignments[j] - 1; } j++; } } int numSegItems = lastItemIdx - firstItemIdx + 1; if(numSegItems <= 0){ return; } int maxSlot = segStart; //maxSlot = location of last/currently inserted item for(int i = firstItemIdx; i <= lastItemIdx; i++){ unsigned int currentRemainder = remainders[i] << 3; if(quotients[i] >= maxSlot){ //item is not shifted maxSlot = quotients[i]; setElementGPU(&qfilter, maxSlot, currentRemainder); } else{ currentRemainder = setShiftedGPU(currentRemainder); if(quotients[i] == quotients[i - 1]) currentRemainder = setContinuationGPU(currentRemainder); setElementGPU(&qfilter, maxSlot, currentRemainder); } maxSlot++; } } __host__ float bulkBuildSegmentedLayouts(quotient_filter qfilter, int numValues, unsigned int* d_insertValues, bool NoDuplicates) { //build a quotient filter by partitioning into segments, inserting items into segments, then computing overflow /* 1. Compute all fingerprints 2. Sort list of fingerprints 3. Quotienting 4. Assign items to a segment 5. Compute layouts and overflow 6. Repeat until convergence 7. Write final values to filter */ //Memory Allocation thrust::device_vector<unsigned int> d_quotients(numValues); thrust::fill(d_quotients.begin(), d_quotients.end(), 0); unsigned int* d_quotientsArray = thrust::raw_pointer_cast(&d_quotients[0]); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //cudaProfilerStart(); cudaEventRecord(start); //Hash input values int numBlocks = (numValues + 127) / 128; dim3 hashBlockDims((numBlocks + 31) / 32, 32); hashInputs<<<hashBlockDims, 128>>>(numValues, qfilter, d_insertValues, d_quotientsArray); //store fingerprints in quotients array //Sort by fingerprint thrust::sort(d_quotients.begin(), d_quotients.end()); //Remove duplicates, if desired if(NoDuplicates == true){ thrust::detail::normal_iterator< thrust::device_ptr<unsigned int> > fingerprintEnd = thrust::unique(d_quotients.begin(), d_quotients.end()); d_quotients.erase(fingerprintEnd, d_quotients.end()); numValues = d_quotients.end() - d_quotients.begin(); } //Divide fingerprints into quotients and remainders unsigned int* d_remaindersArray; cudaMalloc((void**) &d_remaindersArray, numValues * sizeof(unsigned int)); cudaMemset(d_remaindersArray, 0, numValues * sizeof(unsigned int)); numBlocks = (numValues + 767) / 768; dim3 quotientingBlockDims((numBlocks + 31) / 32, 32); quotienting<<<quotientingBlockDims, 768>>>(numValues, qfilter.qbits, qfilter.rbits, d_quotientsArray, d_remaindersArray); unsigned int q = qfilter.qbits; int numSegments = ((1 << q) / q) + 1; //Determine which items belong in each segment unsigned int* d_segmentStarts; cudaMalloc((void**) &d_segmentStarts, numSegments * sizeof(unsigned int)); cudaMemset(d_segmentStarts, 0, numSegments * sizeof(unsigned int)); numBlocks = (numValues + 255) / 256; dim3 findSegStartsBlockDims((numBlocks + 31) / 32, 32); findSegmentStarts<<<findSegStartsBlockDims, 256>>>(numValues, q, d_quotientsArray, d_segmentStarts); //Each segment has an input shift value and outputs overflow value int* d_shifts; cudaMalloc((void**) &d_shifts, numSegments * sizeof(int)); cudaMemset(d_shifts, 0, numSegments * sizeof(int)); int* d_overflows; cudaMalloc((void**) &d_overflows, numSegments * sizeof(int)); cudaMemset(d_overflows, 0, numSegments * sizeof(int)); bool* h_changesMade = new bool[1]; h_changesMade[0] = 1; bool* d_changesMade; cudaMalloc((void**) &d_changesMade, sizeof(bool)); cudaMemset(d_changesMade, 0, sizeof(bool)); while(h_changesMade[0] == 1){ h_changesMade[0] = 0; //since I set d_changesMade to 0 already might not need this //copy overflows into shifts //shifts[idx] represents the shift caused by segment idx, to be carried over into segment idx+1 cudaMemcpy(d_shifts, d_overflows, sizeof(int) * numSegments, cudaMemcpyDeviceToDevice); //Launch one thread per segment //Layout numBlocks = (numSegments + 255) / 256; dim3 layoutBlockDims((numBlocks + 31) / 32, 32); layout<<<layoutBlockDims, 256>>>(numSegments, qfilter.qbits, d_quotientsArray, d_segmentStarts, d_shifts, d_overflows, d_changesMade, numValues); cudaMemcpy(h_changesMade, d_changesMade, sizeof(bool), cudaMemcpyDeviceToHost); cudaMemset(d_changesMade, 0, sizeof(bool)); } //Write final values to filter numBlocks = (numSegments + 127) / 128; dim3 segmentedQFWriteBlockDims((numBlocks + 31) / 32, 32); segmentedQFWrite<<<segmentedQFWriteBlockDims, 128>>>(numSegments, qfilter, d_quotientsArray, d_remaindersArray, d_segmentStarts, d_shifts, numValues); numBlocks = (numValues + 511) / 512; dim3 setOccupiedBitsBlockDims((numBlocks + 31) / 32, 32); setOccupiedBits<<<setOccupiedBitsBlockDims, 512>>>(numValues, qfilter, d_quotientsArray); cudaEventRecord(stop); //Calculate timing results cudaEventSynchronize(stop); float buildTime = 0; cudaEventElapsedTime(&buildTime, start, stop); //Free memory d_quotients.~device_vector<unsigned int>(); cudaFree(d_remaindersArray); cudaFree(d_segmentStarts); cudaFree(d_shifts); cudaFree(d_overflows); delete[] h_changesMade; cudaFree(d_changesMade); cudaEventDestroy(start); cudaEventDestroy(stop); return buildTime; } __global__ void extractQuotients(int numItems, quotient_filter qfilter, unsigned int* fingerprints, bool* emptySlotFlags) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; if(idx >= numItems) return; unsigned int element = getElementGPU(&qfilter, idx); //Empty slots: if(isEmptyGPU(element)){ emptySlotFlags[idx] = true; return; } //Unshifted elements(beginning of cluster): if(!isShiftedGPU(element)){ fingerprints[idx] = (idx << qfilter.rbits) | getRemainderGPU(element); return; } //Shifted elements: //Find beginning of cluster: unsigned int b = idx; do{ b--; }while(isShiftedGPU(getElementGPU(&qfilter, b))); //Step through cluster, counting the runs: unsigned int s = b; while(s <= idx){ do{ s++; }while((isContinuationGPU(getElementGPU(&qfilter, s)))); //find end of each run if(s > idx) break; do{ b++; }while((!isOccupiedGPU(getElementGPU(&qfilter, b)))); //keeping track of canonical slot } fingerprints[idx] = (b << qfilter.rbits) | getRemainderGPU(element); } __host__ float insertViaMerge(quotient_filter qfilter, unsigned int* d_insertedValues, int numOldValues, unsigned int* d_newValues, int numNewValues, bool NoDuplicates) { //d_insertedValues and numOldValues are just for checking results of fingerprint extraction. They are not needed for the merge operation. // printQuotientFilterGPU(&qfilter); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //Extract fingerprints from quotient filter int numSlots = calcNumSlotsGPU(qfilter.qbits, qfilter.rbits); thrust::device_vector<bool> d_emptySlotFlags(numSlots); thrust::fill(d_emptySlotFlags.begin(), d_emptySlotFlags.end(), 0); bool* d_emptySlotFlagsArray = thrust::raw_pointer_cast(&d_emptySlotFlags[0]); thrust::device_vector<unsigned int> d_fingerprintsBySlot(numSlots); thrust::fill(d_fingerprintsBySlot.begin(), d_fingerprintsBySlot.end(), 0); unsigned int* d_fingerprintsBySlotArray = thrust::raw_pointer_cast(&d_fingerprintsBySlot[0]); int numBlocks = (numSlots + 191) / 192; dim3 extractQuotientsBlockDims((numBlocks + 31) / 32, 32); extractQuotients<<<extractQuotientsBlockDims, 192>>>(numSlots, qfilter, d_fingerprintsBySlotArray, d_emptySlotFlagsArray); thrust::detail::normal_iterator< thrust::device_ptr<unsigned int> > fingerprintsEnd = thrust::remove_if(d_fingerprintsBySlot.begin(), d_fingerprintsBySlot.end(), d_emptySlotFlags.begin(), thrust::identity<bool>()); d_fingerprintsBySlot.erase(fingerprintsEnd, d_fingerprintsBySlot.end()); int numExtractedValues = d_fingerprintsBySlot.end() - d_fingerprintsBySlot.begin(); //Merge with new array //Hash and quotientize new values to insert thrust::device_vector<unsigned int> d_newFingerprints(numNewValues); thrust::fill(d_newFingerprints.begin(), d_newFingerprints.end(), 0); unsigned int* d_newFingerprintsArray = thrust::raw_pointer_cast(&d_newFingerprints[0]); numBlocks = (numNewValues + 127) / 128; dim3 hashBlockDims((numBlocks + 31) / 32, 32); hashInputs<<<hashBlockDims, 128>>>(numNewValues, qfilter, d_newValues, d_newFingerprintsArray); //Sort by fingerprint thrust::sort(d_newFingerprints.begin(), d_newFingerprints.end()); //Merge d_newValues with extracted quotients and remainders mgpu::standard_context_t context(false); int outputSize = numExtractedValues + numNewValues; mgpu::mem_t<unsigned int> d_fingerprintsOutput(outputSize, context); mgpu::mem_t<unsigned int> d_newFingerprintsMem = copy_to_mem(d_newFingerprintsArray, numNewValues, context); mgpu::mem_t<unsigned int> d_extractedFingerprintsMem = copy_to_mem(d_fingerprintsBySlotArray, numExtractedValues, context); mgpu::merge(d_extractedFingerprintsMem.data(), numExtractedValues, d_newFingerprintsMem.data(), numNewValues, d_fingerprintsOutput.data(), mgpu::less_t<unsigned int>(), context); unsigned int* d_combinedQuotients = d_fingerprintsOutput.data(); //Rebuild filter using segmented layouts method //Clear old filter cudaMemset(qfilter.table, 0, numSlots * sizeof(unsigned char)); //Remove duplicates, if desired thrust::device_vector<unsigned int> d_thrustQuotients(d_combinedQuotients, d_combinedQuotients + outputSize); //must copy values to get them into thrust device_vector if(NoDuplicates == true){ thrust::detail::normal_iterator< thrust::device_ptr<unsigned int> > fingerprintEnd = thrust::unique(d_thrustQuotients.begin(), d_thrustQuotients.end()); d_thrustQuotients.erase(fingerprintEnd, d_thrustQuotients.end()); outputSize = d_thrustQuotients.end() - d_thrustQuotients.begin(); } d_combinedQuotients = thrust::raw_pointer_cast(&d_thrustQuotients[0]); //Divide fingerprints into quotients and remainders unsigned int* d_combinedRemainders; cudaMalloc((void**) &d_combinedRemainders, outputSize * sizeof(unsigned int)); cudaMemset(d_combinedRemainders, 0, outputSize * sizeof(unsigned int)); numBlocks = (outputSize + 767) / 768; dim3 quotientingBlockDims((numBlocks + 31) / 32, 32); quotienting<<<quotientingBlockDims, 768>>>(outputSize, qfilter.qbits, qfilter.rbits, d_combinedQuotients, d_combinedRemainders); unsigned int q = qfilter.qbits; int numSegments = ((1 << q) / q) + 1; //Determine which items belong in each segment unsigned int* d_segmentStarts; cudaMalloc((void**) &d_segmentStarts, numSegments * sizeof(unsigned int)); cudaMemset(d_segmentStarts, 0, numSegments * sizeof(unsigned int)); numBlocks = (outputSize + 255) / 256; dim3 findSegStartsBlockDims((numBlocks + 31) / 32, 32); findSegmentStarts<<<findSegStartsBlockDims, 256>>>(outputSize, q, d_combinedQuotients, d_segmentStarts); //Each segment has an input shift value and outputs overflow value int* d_shifts; cudaMalloc((void**) &d_shifts, numSegments * sizeof(int)); cudaMemset(d_shifts, 0, numSegments * sizeof(int)); int* d_overflows; cudaMalloc((void**) &d_overflows, numSegments * sizeof(int)); cudaMemset(d_overflows, 0, numSegments * sizeof(int)); bool* h_changesMade = new bool[1]; h_changesMade[0] = 1; bool* d_changesMade; cudaMalloc((void**) &d_changesMade, sizeof(bool)); cudaMemset(d_changesMade, 0, sizeof(bool)); while(h_changesMade[0] == 1){ //copy overflows into shifts cudaMemcpy(d_shifts, d_overflows, sizeof(int) * numSegments, cudaMemcpyDeviceToDevice); //Layout numBlocks = (numSegments + 255) / 256; dim3 layoutBlockDims((numBlocks + 31) / 32, 32); layout<<<layoutBlockDims, 256>>>(numSegments, qfilter.qbits, d_combinedQuotients, d_segmentStarts, d_shifts, d_overflows, d_changesMade, outputSize); cudaMemcpy(h_changesMade, d_changesMade, sizeof(bool), cudaMemcpyDeviceToHost); cudaMemset(d_changesMade, 0, sizeof(bool)); } //Write final values to filter numBlocks = (numSegments + 127) / 128; dim3 segmentedQFWriteBlockDims((numBlocks + 31) / 32, 32); segmentedQFWrite<<<segmentedQFWriteBlockDims, 128>>>(numSegments, qfilter, d_combinedQuotients, d_combinedRemainders, d_segmentStarts, d_shifts, outputSize); numBlocks = (outputSize + 511) / 512; dim3 setOccupiedBitsBlockDims((numBlocks + 31) / 32, 32); setOccupiedBits<<<setOccupiedBitsBlockDims, 512>>>(outputSize, qfilter, d_combinedQuotients); //Timing cudaEventRecord(stop); cudaEventSynchronize(stop); float rebuildTime = 0; cudaEventElapsedTime(&rebuildTime, start, stop); //Free Memory d_emptySlotFlags.~device_vector<bool>(); d_fingerprintsBySlot.~device_vector<unsigned int>(); d_newFingerprints.~device_vector<unsigned int>(); d_fingerprintsOutput.~mem_t<unsigned int>(); d_newFingerprintsMem.~mem_t<unsigned int>(); d_extractedFingerprintsMem.~mem_t<unsigned int>(); cudaFree(d_combinedRemainders); cudaFree(d_segmentStarts); cudaFree(d_shifts); cudaFree(d_overflows); delete[] h_changesMade; cudaFree(d_changesMade); return rebuildTime; } __host__ float mergeTwoFilters(quotient_filter qfilter1, quotient_filter qfilter2, bool NoDuplicates) { //merges filters qfilter1 and qfilter2 and outputs the result to qfilter1 //Check that filters are the same size if(qfilter1.qbits != qfilter2.qbits || qfilter1.rbits != qfilter2.rbits){ printf("Error: two filters to be merged must have same number of quotient and remainder bits\n"); return 0.0f; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //Extract fingerprints from first quotient filter int numSlots = calcNumSlotsGPU(qfilter1.qbits, qfilter1.rbits); thrust::device_vector<bool> d_emptySlotFlags(numSlots); thrust::fill(d_emptySlotFlags.begin(), d_emptySlotFlags.end(), 0); bool* d_emptySlotFlagsArray = thrust::raw_pointer_cast(&d_emptySlotFlags[0]); thrust::device_vector<unsigned int> d_fingerprintsBySlot1(numSlots); thrust::fill(d_fingerprintsBySlot1.begin(), d_fingerprintsBySlot1.end(), 0); unsigned int* d_fingerprintsBySlotArray1 = thrust::raw_pointer_cast(&d_fingerprintsBySlot1[0]); int numBlocks = (numSlots + 191) / 192; dim3 extractQuotientsBlockDims1((numBlocks + 31) / 32, 32); extractQuotients<<<extractQuotientsBlockDims1, 192>>>(numSlots, qfilter1, d_fingerprintsBySlotArray1, d_emptySlotFlagsArray); thrust::detail::normal_iterator< thrust::device_ptr<unsigned int> > fingerprintsEnd = thrust::remove_if(d_fingerprintsBySlot1.begin(), d_fingerprintsBySlot1.end(), d_emptySlotFlags.begin(), thrust::identity<bool>()); d_fingerprintsBySlot1.erase(fingerprintsEnd, d_fingerprintsBySlot1.end()); int numExtractedValues1 = d_fingerprintsBySlot1.end() - d_fingerprintsBySlot1.begin(); //Extract fingerprints from second quotient filter thrust::fill(d_emptySlotFlags.begin(), d_emptySlotFlags.end(), 0); thrust::device_vector<unsigned int> d_fingerprintsBySlot2(numSlots); thrust::fill(d_fingerprintsBySlot2.begin(), d_fingerprintsBySlot2.end(), 0); unsigned int* d_fingerprintsBySlotArray2 = thrust::raw_pointer_cast(&d_fingerprintsBySlot2[0]); numBlocks = (numSlots + 191) / 192; dim3 extractQuotientsBlockDims2((numBlocks + 31) / 32, 32); extractQuotients<<<extractQuotientsBlockDims2, 192>>>(numSlots, qfilter2, d_fingerprintsBySlotArray2, d_emptySlotFlagsArray); fingerprintsEnd = thrust::remove_if(d_fingerprintsBySlot2.begin(), d_fingerprintsBySlot2.end(), d_emptySlotFlags.begin(), thrust::identity<bool>()); d_fingerprintsBySlot2.erase(fingerprintsEnd, d_fingerprintsBySlot2.end()); int numExtractedValues2 = d_fingerprintsBySlot2.end() - d_fingerprintsBySlot2.begin(); //Merge arrays of extracted values mgpu::standard_context_t context(false); int outputSize = numExtractedValues1 + numExtractedValues2; mgpu::mem_t<unsigned int> d_fingerprintsOutput(outputSize, context); mgpu::mem_t<unsigned int> d_extractedFingerprintsMem1 = copy_to_mem(d_fingerprintsBySlotArray1, numExtractedValues1, context); mgpu::mem_t<unsigned int> d_extractedFingerprintsMem2 = copy_to_mem(d_fingerprintsBySlotArray2, numExtractedValues2, context); mgpu::merge(d_extractedFingerprintsMem1.data(), numExtractedValues1, d_extractedFingerprintsMem2.data(), numExtractedValues2, d_fingerprintsOutput.data(), mgpu::less_t<unsigned int>(), context); unsigned int* d_combinedQuotients = d_fingerprintsOutput.data(); //Rebuild filter using segmented layouts method //Clear old filter cudaMemset(qfilter1.table, 0, numSlots * sizeof(unsigned char)); //Remove duplicates, if desired thrust::device_vector<unsigned int> d_thrustQuotients(d_combinedQuotients, d_combinedQuotients + outputSize); //must copy values to get them into thrust device_vector if(NoDuplicates == true){ thrust::detail::normal_iterator< thrust::device_ptr<unsigned int> > fingerprintEnd = thrust::unique(d_thrustQuotients.begin(), d_thrustQuotients.end()); d_thrustQuotients.erase(fingerprintEnd, d_thrustQuotients.end()); outputSize = d_thrustQuotients.end() - d_thrustQuotients.begin(); } d_combinedQuotients = thrust::raw_pointer_cast(&d_thrustQuotients[0]); //Divide fingerprints into quotients and remainders unsigned int* d_combinedRemainders; cudaMalloc((void**) &d_combinedRemainders, outputSize * sizeof(unsigned int)); cudaMemset(d_combinedRemainders, 0, outputSize * sizeof(unsigned int)); numBlocks = (outputSize + 767) / 768; dim3 quotientingBlockDims((numBlocks + 31) / 32, 32); quotienting<<<quotientingBlockDims, 768>>>(outputSize, qfilter1.qbits, qfilter1.rbits, d_combinedQuotients, d_combinedRemainders); unsigned int q = qfilter1.qbits; int numSegments = ((1 << q) / q) + 1; //Determine which items belong in each segment unsigned int* d_segmentStarts; cudaMalloc((void**) &d_segmentStarts, numSegments * sizeof(unsigned int)); cudaMemset(d_segmentStarts, 0, numSegments * sizeof(unsigned int)); numBlocks = (outputSize + 255) / 256; dim3 findSegStartsBlockDims((numBlocks + 31) / 32, 32); findSegmentStarts<<<findSegStartsBlockDims, 256>>>(outputSize, q, d_combinedQuotients, d_segmentStarts); //Each segment has an input shift value and outputs overflow value int* d_shifts; cudaMalloc((void**) &d_shifts, numSegments * sizeof(int)); cudaMemset(d_shifts, 0, numSegments * sizeof(int)); int* d_overflows; cudaMalloc((void**) &d_overflows, numSegments * sizeof(int)); cudaMemset(d_overflows, 0, numSegments * sizeof(int)); bool* h_changesMade = new bool[1]; h_changesMade[0] = 1; bool* d_changesMade; cudaMalloc((void**) &d_changesMade, sizeof(bool)); cudaMemset(d_changesMade, 0, sizeof(bool)); while(h_changesMade[0] == 1){ //copy overflows into shifts cudaMemcpy(d_shifts, d_overflows, sizeof(int) * numSegments, cudaMemcpyDeviceToDevice); //Layout numBlocks = (numSegments + 255) / 256; dim3 layoutBlockDims((numBlocks + 31) / 32, 32); layout<<<layoutBlockDims, 256>>>(numSegments, qfilter1.qbits, d_combinedQuotients, d_segmentStarts, d_shifts, d_overflows, d_changesMade, outputSize); cudaMemcpy(h_changesMade, d_changesMade, sizeof(bool), cudaMemcpyDeviceToHost); cudaMemset(d_changesMade, 0, sizeof(bool)); } //Write final values to filter numBlocks = (numSegments + 127) / 128; dim3 segmentedQFWriteBlockDims((numBlocks + 31) / 32, 32); segmentedQFWrite<<<segmentedQFWriteBlockDims, 128>>>(numSegments, qfilter1, d_combinedQuotients, d_combinedRemainders, d_segmentStarts, d_shifts, outputSize); numBlocks = (outputSize + 511) / 512; dim3 setOccupiedBitsBlockDims((numBlocks + 31) / 32, 32); setOccupiedBits<<<setOccupiedBitsBlockDims, 512>>>(outputSize, qfilter1, d_combinedQuotients); //Timing cudaEventRecord(stop); cudaEventSynchronize(stop); float rebuildTime = 0; cudaEventElapsedTime(&rebuildTime, start, stop); //Free Memory d_emptySlotFlags.~device_vector<bool>(); d_fingerprintsBySlot1.~device_vector<unsigned int>(); d_fingerprintsBySlot2.~device_vector<unsigned int>(); d_fingerprintsOutput.~mem_t<unsigned int>(); d_extractedFingerprintsMem1.~mem_t<unsigned int>(); d_extractedFingerprintsMem2.~mem_t<unsigned int>(); cudaFree(d_combinedRemainders); cudaFree(d_segmentStarts); cudaFree(d_shifts); cudaFree(d_overflows); delete[] h_changesMade; cudaFree(d_changesMade); return rebuildTime; } }
3b1035a4c8fde7b0354750049b9e911b54c666d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <stdlib.h> #include <algorithm> #include <limits> #include <random/rng.cuh> #include <selection/kselection.cuh> namespace MLCommon { namespace Selection { template <typename TypeV, typename TypeK, int N, int TPB, bool Greater> __global__ void sortTestKernel(TypeK *key) { KVArray<TypeV, TypeK, N, Greater> arr; #pragma unroll for (int i = 0; i < N; ++i) { arr.arr[i].val = (TypeV)raft::laneId(); arr.arr[i].key = (TypeK)raft::laneId(); } raft::warpFence(); arr.sort(); raft::warpFence(); #pragma unroll for (int i = 0; i < N; ++i) arr.arr[i].store(nullptr, key + threadIdx.x + i * TPB); } template <typename TypeV, typename TypeK, int N, int TPB, bool Greater> void sortTest(TypeK *key) { TypeK *dkey; CUDA_CHECK(hipMalloc((void **)&dkey, sizeof(TypeK) * TPB * N)); hipLaunchKernelGGL(( sortTestKernel<TypeV, TypeK, N, TPB, Greater>), dim3(1), dim3(TPB), 0, 0, dkey); CUDA_CHECK(hipPeekAtLastError()); raft::update_host<TypeK>(key, dkey, TPB * N, 0); CUDA_CHECK(hipFree(dkey)); } /************************************************************************/ /********************** Add the function for CPU test *******************/ /************************************************************************/ template <typename TypeV, typename TypeK, bool Greater> int cmp(KVPair<TypeV, TypeK> a, KVPair<TypeV, TypeK> b) { if (Greater == 0) { return a.val > b.val; } else { return a.val < b.val; } } template <typename TypeV, typename TypeK, bool Greater> void partSortKVPair(KVPair<TypeV, TypeK> *arr, int N, int k) { std::partial_sort(arr, arr + k, arr + N, cmp<TypeV, TypeK, Greater>); } template <typename TypeV, typename TypeK, int N, bool Greater> void sortKVArray(KVArray<TypeV, TypeK, N, Greater> &arr) { std::sort(arr.arr, arr.arr + N, cmp<TypeV, TypeK, Greater>); } template <typename TypeV, typename TypeK, bool Greater> ::testing::AssertionResult checkResult(TypeV *d_arr, TypeV *d_outv, TypeK *d_outk, int rows, int N, int k, TypeV tolerance) { for (int rIndex = 0; rIndex < rows; rIndex++) { // input data TypeV *h_arr = new TypeV[N]; raft::update_host(h_arr, d_arr + rIndex * N, N, 0); KVPair<TypeV, TypeK> *topk = new KVPair<TypeV, TypeK>[N]; for (int j = 0; j < N; j++) { topk[j].val = h_arr[j]; topk[j].key = j; } // result reference TypeV *h_outv = new TypeV[k]; raft::update_host(h_outv, d_outv + rIndex * k, k, 0); TypeK *h_outk = new TypeK[k]; raft::update_host(h_outk, d_outk + rIndex * k, k, 0); // calculate the result partSortKVPair<TypeV, TypeK, Greater>(topk, N, k); // check result for (int j = 0; j < k; j++) { // std::cout<<"Get value at ("<<rIndex<<" "<<j<<") Cpu " // <<topk[j].val<<" "<<topk[j].key<<" Gpu "<<h_outv[j]<<" " //<<h_outk[j] <<std::endl<<std::endl; if (abs(h_outv[j] - topk[j].val) > tolerance) { return ::testing::AssertionFailure() << "actual=" << topk[j].val << " != expected=" << h_outv[j]; } } // delete resource delete[] h_arr; delete[] h_outv; delete[] h_outk; delete[] topk; } return ::testing::AssertionSuccess(); } // Structure WarpTopKInputs template <typename T> struct WarpTopKInputs { T tolerance; int rows; // batch size int cols; // N the length of variables int k; // the top-k value unsigned long long int seed; // seed to generate data }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const WarpTopKInputs<T> &dims) { return os; } // Define functions WarpTopKTest template <typename T> class WarpTopKTest : public ::testing::TestWithParam<WarpTopKInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<WarpTopKInputs<T>>::GetParam(); raft::random::Rng r(params.seed); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); raft::allocate(arr, params.rows * params.cols); raft::allocate(outk, params.rows * params.k); raft::allocate(outv, params.rows * params.k); r.uniform(arr, params.rows * params.cols, T(-1.0), T(1.0), stream); static const bool Sort = false; static const bool Greater = true; warpTopK<T, int, Greater, Sort>(outv, outk, arr, params.k, params.rows, params.cols, stream); CUDA_CHECK(hipStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(hipFree(outv)); CUDA_CHECK(hipFree(outk)); CUDA_CHECK(hipFree(arr)); } protected: WarpTopKInputs<T> params; T *arr, *outv; int *outk; }; // Parameters // Milestone 1: Verify the result of current implementation // Milestone 2: Support all the values of k between 1 and 1024; both inclusive // Milestone 2.1: Using the POC code to Support all the values const std::vector<WarpTopKInputs<float>> inputs2_0 = { {0.00000001, 2, 1024, 256, 1234ULL}}; const std::vector<WarpTopKInputs<float>> inputs2_1 = { {0.00000001, 4, 2048, 1024, 1234ULL}}; const std::vector<WarpTopKInputs<float>> inputs2_2 = { {0.00000001, 4, 2048, 1, 1234ULL}}; // Milestone 2.2: Using the full thread queue and warp queue code to support // all the values // @TODO: Milestone 3: Support not sorted // @TODO: Milestone 4: Support multi-gpu // Define the function TEST_P typedef WarpTopKTest<float> TestD2_0; typedef WarpTopKTest<float> TestD2_1; typedef WarpTopKTest<float> TestD2_2; TEST_P(TestD2_0, Result) { const static bool Greater = true; ASSERT_TRUE((checkResult<float, int, Greater>( arr, outv, outk, params.rows, params.cols, params.k, params.tolerance))); } TEST_P(TestD2_1, Result) { const static bool Greater = true; ASSERT_TRUE((checkResult<float, int, Greater>( arr, outv, outk, params.rows, params.cols, params.k, params.tolerance))); } TEST_P(TestD2_2, Result) { const static bool Greater = true; ASSERT_TRUE((checkResult<float, int, Greater>( arr, outv, outk, params.rows, params.cols, params.k, params.tolerance))); } // Instantiate INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_0, ::testing::ValuesIn(inputs2_0)); INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_1, ::testing::ValuesIn(inputs2_1)); INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_2, ::testing::ValuesIn(inputs2_2)); } // end namespace Selection } // end namespace MLCommon
3b1035a4c8fde7b0354750049b9e911b54c666d1.cu
/* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <stdlib.h> #include <algorithm> #include <limits> #include <random/rng.cuh> #include <selection/kselection.cuh> namespace MLCommon { namespace Selection { template <typename TypeV, typename TypeK, int N, int TPB, bool Greater> __global__ void sortTestKernel(TypeK *key) { KVArray<TypeV, TypeK, N, Greater> arr; #pragma unroll for (int i = 0; i < N; ++i) { arr.arr[i].val = (TypeV)raft::laneId(); arr.arr[i].key = (TypeK)raft::laneId(); } raft::warpFence(); arr.sort(); raft::warpFence(); #pragma unroll for (int i = 0; i < N; ++i) arr.arr[i].store(nullptr, key + threadIdx.x + i * TPB); } template <typename TypeV, typename TypeK, int N, int TPB, bool Greater> void sortTest(TypeK *key) { TypeK *dkey; CUDA_CHECK(cudaMalloc((void **)&dkey, sizeof(TypeK) * TPB * N)); sortTestKernel<TypeV, TypeK, N, TPB, Greater><<<1, TPB>>>(dkey); CUDA_CHECK(cudaPeekAtLastError()); raft::update_host<TypeK>(key, dkey, TPB * N, 0); CUDA_CHECK(cudaFree(dkey)); } /************************************************************************/ /********************** Add the function for CPU test *******************/ /************************************************************************/ template <typename TypeV, typename TypeK, bool Greater> int cmp(KVPair<TypeV, TypeK> a, KVPair<TypeV, TypeK> b) { if (Greater == 0) { return a.val > b.val; } else { return a.val < b.val; } } template <typename TypeV, typename TypeK, bool Greater> void partSortKVPair(KVPair<TypeV, TypeK> *arr, int N, int k) { std::partial_sort(arr, arr + k, arr + N, cmp<TypeV, TypeK, Greater>); } template <typename TypeV, typename TypeK, int N, bool Greater> void sortKVArray(KVArray<TypeV, TypeK, N, Greater> &arr) { std::sort(arr.arr, arr.arr + N, cmp<TypeV, TypeK, Greater>); } template <typename TypeV, typename TypeK, bool Greater> ::testing::AssertionResult checkResult(TypeV *d_arr, TypeV *d_outv, TypeK *d_outk, int rows, int N, int k, TypeV tolerance) { for (int rIndex = 0; rIndex < rows; rIndex++) { // input data TypeV *h_arr = new TypeV[N]; raft::update_host(h_arr, d_arr + rIndex * N, N, 0); KVPair<TypeV, TypeK> *topk = new KVPair<TypeV, TypeK>[N]; for (int j = 0; j < N; j++) { topk[j].val = h_arr[j]; topk[j].key = j; } // result reference TypeV *h_outv = new TypeV[k]; raft::update_host(h_outv, d_outv + rIndex * k, k, 0); TypeK *h_outk = new TypeK[k]; raft::update_host(h_outk, d_outk + rIndex * k, k, 0); // calculate the result partSortKVPair<TypeV, TypeK, Greater>(topk, N, k); // check result for (int j = 0; j < k; j++) { // std::cout<<"Get value at ("<<rIndex<<" "<<j<<") Cpu " // <<topk[j].val<<" "<<topk[j].key<<" Gpu "<<h_outv[j]<<" " //<<h_outk[j] <<std::endl<<std::endl; if (abs(h_outv[j] - topk[j].val) > tolerance) { return ::testing::AssertionFailure() << "actual=" << topk[j].val << " != expected=" << h_outv[j]; } } // delete resource delete[] h_arr; delete[] h_outv; delete[] h_outk; delete[] topk; } return ::testing::AssertionSuccess(); } // Structure WarpTopKInputs template <typename T> struct WarpTopKInputs { T tolerance; int rows; // batch size int cols; // N the length of variables int k; // the top-k value unsigned long long int seed; // seed to generate data }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const WarpTopKInputs<T> &dims) { return os; } // Define functions WarpTopKTest template <typename T> class WarpTopKTest : public ::testing::TestWithParam<WarpTopKInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<WarpTopKInputs<T>>::GetParam(); raft::random::Rng r(params.seed); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); raft::allocate(arr, params.rows * params.cols); raft::allocate(outk, params.rows * params.k); raft::allocate(outv, params.rows * params.k); r.uniform(arr, params.rows * params.cols, T(-1.0), T(1.0), stream); static const bool Sort = false; static const bool Greater = true; warpTopK<T, int, Greater, Sort>(outv, outk, arr, params.k, params.rows, params.cols, stream); CUDA_CHECK(cudaStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(cudaFree(outv)); CUDA_CHECK(cudaFree(outk)); CUDA_CHECK(cudaFree(arr)); } protected: WarpTopKInputs<T> params; T *arr, *outv; int *outk; }; // Parameters // Milestone 1: Verify the result of current implementation // Milestone 2: Support all the values of k between 1 and 1024; both inclusive // Milestone 2.1: Using the POC code to Support all the values const std::vector<WarpTopKInputs<float>> inputs2_0 = { {0.00000001, 2, 1024, 256, 1234ULL}}; const std::vector<WarpTopKInputs<float>> inputs2_1 = { {0.00000001, 4, 2048, 1024, 1234ULL}}; const std::vector<WarpTopKInputs<float>> inputs2_2 = { {0.00000001, 4, 2048, 1, 1234ULL}}; // Milestone 2.2: Using the full thread queue and warp queue code to support // all the values // @TODO: Milestone 3: Support not sorted // @TODO: Milestone 4: Support multi-gpu // Define the function TEST_P typedef WarpTopKTest<float> TestD2_0; typedef WarpTopKTest<float> TestD2_1; typedef WarpTopKTest<float> TestD2_2; TEST_P(TestD2_0, Result) { const static bool Greater = true; ASSERT_TRUE((checkResult<float, int, Greater>( arr, outv, outk, params.rows, params.cols, params.k, params.tolerance))); } TEST_P(TestD2_1, Result) { const static bool Greater = true; ASSERT_TRUE((checkResult<float, int, Greater>( arr, outv, outk, params.rows, params.cols, params.k, params.tolerance))); } TEST_P(TestD2_2, Result) { const static bool Greater = true; ASSERT_TRUE((checkResult<float, int, Greater>( arr, outv, outk, params.rows, params.cols, params.k, params.tolerance))); } // Instantiate INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_0, ::testing::ValuesIn(inputs2_0)); INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_1, ::testing::ValuesIn(inputs2_1)); INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_2, ::testing::ValuesIn(inputs2_2)); } // end namespace Selection } // end namespace MLCommon
87f5d258fdc92d275adec4def4f7938afc07aa55.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void refreshClusters(dim3 *sum, dim3 *cluster, int *counter) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(counter[i] != 0) { cluster[i].x = sum[i].x / counter[i]; cluster[i].y = sum[i].y / counter[i]; cluster[i].z = sum[i].z / counter[i]; } else { cluster[i].z = cluster[i].x = cluster[i].z = 0; } sum[i] = dim3(0, 0, 0); counter[i] = 0; }
87f5d258fdc92d275adec4def4f7938afc07aa55.cu
#include "includes.h" __global__ void refreshClusters(dim3 *sum, dim3 *cluster, int *counter) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(counter[i] != 0) { cluster[i].x = sum[i].x / counter[i]; cluster[i].y = sum[i].y / counter[i]; cluster[i].z = sum[i].z / counter[i]; } else { cluster[i].z = cluster[i].x = cluster[i].z = 0; } sum[i] = dim3(0, 0, 0); counter[i] = 0; }
a227c98c3d9dc767feacbf3b8448065dcdff6c9e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <omp.h> #define COMMENT "Histogram_GPU" #define RGB_COMPONENT_COLOR 255 typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *)malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; img->data = (PPMPixel *)malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } __global__ void Histogram(PPMImage *image, float *h) { printf("TODO: Implement this kernel!\n"); } int main(int argc, char *argv[]) { double t_start, t_end; int i; char filename[255]; if (argc < 2) { fprintf(stderr, "Error: missing path to input file\n"); return 1; } PPMImage *image = readPPM(argv[1]); float *h = (float *)malloc(sizeof(float) * 64); // Inicializar h for (i = 0; i < 64; i++) h[i] = 0.0; t_start = omp_get_wtime(); hipLaunchKernelGGL(( Histogram), dim3(1), dim3(1), 0, 0, image, h); t_end = omp_get_wtime(); for (i = 0; i < 64; i++) { printf("%0.3f ", h[i]); } printf("\n"); fprintf(stderr, "%lf\n", t_end - t_start); free(h); }
a227c98c3d9dc767feacbf3b8448065dcdff6c9e.cu
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <omp.h> #define COMMENT "Histogram_GPU" #define RGB_COMPONENT_COLOR 255 typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *)malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; img->data = (PPMPixel *)malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } __global__ void Histogram(PPMImage *image, float *h) { printf("TODO: Implement this kernel!\n"); } int main(int argc, char *argv[]) { double t_start, t_end; int i; char filename[255]; if (argc < 2) { fprintf(stderr, "Error: missing path to input file\n"); return 1; } PPMImage *image = readPPM(argv[1]); float *h = (float *)malloc(sizeof(float) * 64); // Inicializar h for (i = 0; i < 64; i++) h[i] = 0.0; t_start = omp_get_wtime(); Histogram<<<1, 1>>>(image, h); t_end = omp_get_wtime(); for (i = 0; i < 64; i++) { printf("%0.3f ", h[i]); } printf("\n"); fprintf(stderr, "%lf\n", t_end - t_start); free(h); }
a693009957c02e6c89c7d82ddc6e30ef48abbb42.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author Yurii Shyrma ([email protected]) // #include <helpers/PointersManager.h> #include <math/templatemath.h> #include <ops/declarable/helpers/convolutions.h> namespace sd { namespace ops { ////////////////////////////////////////////////////////////////////////// template <typename T> SD_KERNEL static void pooling2dBPCuda(const void* vx, const sd::LongType* xShapeInfo, const void* vy, const sd::LongType* yShapeInfo, void* vz, const sd::LongType* zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) { // x: input [bS, iC, iH, iW] // y: gradO [bS, iC, oH, oW] // z: gradI [bS, iC, iH, iW] -> gradI is output in this function const T* x = reinterpret_cast<const T*>(vx); const T* y = reinterpret_cast<const T*>(vy); T* z = reinterpret_cast<T*>(vz); sd::LongType coord2, coord3; __shared__ int rank, kHeff, kWeff, iH, iW, kProd; __shared__ sd::LongType yLen, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<sd::LongType*>(shmem); yLen = shape::length(yShapeInfo); rank = 4; kHeff = kH + (kH - 1) * (dH - 1); kWeff = kW + (kW - 1) * (dW - 1); iH = xShapeInfo[3]; iW = xShapeInfo[4]; kProd = kH * kW; } __syncthreads(); const auto yInd = threadIdx.x + blockIdx.x * blockDim.x; if (yInd >= yLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(yInd, yShapeInfo, coords); const auto yOffset = shape::getOffset(yShapeInfo, coords); int hstart = coords[2] * sH - pH; int wstart = coords[3] * sW - pW; int hend = hstart + kHeff; int wend = wstart + kWeff; if (hstart < 0) hstart += dH * ((-hstart + dH - 1) / dH); if (wstart < 0) wstart += dW * ((-wstart + dW - 1) / dW); if (hend > iH) hend -= dH * ((hend - iH + dH - 1) / dH); if (wend > iW) wend -= dW * ((wend - iW + dW - 1) / dW); switch (poolingMode) { /*** max ***/ case 0: { coord2 = hstart; coord3 = wstart; T max = -DataTypeUtils::max<T>(); for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) { for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) { T val = x[shape::getOffset(xShapeInfo, coords)]; if (val > max) { max = val; coord2 = coords[2]; coord3 = coords[3]; } } } coords[2] = coord2; coords[3] = coord3; auto zOffset = shape::getOffset(zShapeInfo, coords); sd::math::atomics::sd_atomicAdd<T>(&z[zOffset], y[yOffset]); // z[zOffset] += y[yOffset]; } break; /*** avg ***/ case 1: { T val = y[yOffset]; if (extraParam0 == 0) // Exclude padding val /= sd::math::sd_ceil<double, T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * sd::math::sd_ceil<double, T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); // Accounts for dilation else if (extraParam0 == 1) // Include padding val /= kProd; for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) sd::math::atomics::sd_atomicAdd<T>(&z[shape::getOffset(zShapeInfo, coords)], val); } break; /*** pnorm ***/ case 2: { T sum = static_cast<T>(0.); T val = y[yOffset]; for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) sum += sd::math::sd_pow<T, T, T>(sd::math::sd_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0); val *= sd::math::sd_pow<T, T, T>(sum, ((T)1.f - extraParam0) / extraParam0); for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) { for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) { const auto xOffset = shape::getOffset(xShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); sd::math::atomics::sd_atomicAdd<T>( &z[zOffset], val * sd::math::sd_pow<T, T, T>(sd::math::sd_abs<T>(x[xOffset]), extraParam0 - 1.f) * sd::math::sd_sgn<T, T>(x[xOffset])); } } } break; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void pooling2dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t* stream, const void* vx, const sd::LongType* xShapeInfo, const void* vy, const sd::LongType* yShapeInfo, void* vz, const sd::LongType* zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) { hipLaunchKernelGGL(( pooling2dBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, extraParam0); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::pooling2dBP(sd::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) { // initial zeroing of gradI gradI.nullify(); PointersManager manager(block.launchContext(), "pooling2dBP"); const int threadsPerBlock = 256; const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = gradO.rankOf() * sizeof(sd::LongType) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&gradI}, {&input, &gradO}); BUILD_SINGLE_SELECTOR( input.dataType(), pooling2dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, extraParam0), SD_NUMERIC_TYPES); NDArray::registerSpecialUse({&gradI}, {&input, &gradO}); manager.synchronize(); } } // namespace ops } // namespace sd
a693009957c02e6c89c7d82ddc6e30ef48abbb42.cu
/* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author Yurii Shyrma ([email protected]) // #include <helpers/PointersManager.h> #include <math/templatemath.h> #include <ops/declarable/helpers/convolutions.h> namespace sd { namespace ops { ////////////////////////////////////////////////////////////////////////// template <typename T> SD_KERNEL static void pooling2dBPCuda(const void* vx, const sd::LongType* xShapeInfo, const void* vy, const sd::LongType* yShapeInfo, void* vz, const sd::LongType* zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) { // x: input [bS, iC, iH, iW] // y: gradO [bS, iC, oH, oW] // z: gradI [bS, iC, iH, iW] -> gradI is output in this function const T* x = reinterpret_cast<const T*>(vx); const T* y = reinterpret_cast<const T*>(vy); T* z = reinterpret_cast<T*>(vz); sd::LongType coord2, coord3; __shared__ int rank, kHeff, kWeff, iH, iW, kProd; __shared__ sd::LongType yLen, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<sd::LongType*>(shmem); yLen = shape::length(yShapeInfo); rank = 4; kHeff = kH + (kH - 1) * (dH - 1); kWeff = kW + (kW - 1) * (dW - 1); iH = xShapeInfo[3]; iW = xShapeInfo[4]; kProd = kH * kW; } __syncthreads(); const auto yInd = threadIdx.x + blockIdx.x * blockDim.x; if (yInd >= yLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(yInd, yShapeInfo, coords); const auto yOffset = shape::getOffset(yShapeInfo, coords); int hstart = coords[2] * sH - pH; int wstart = coords[3] * sW - pW; int hend = hstart + kHeff; int wend = wstart + kWeff; if (hstart < 0) hstart += dH * ((-hstart + dH - 1) / dH); if (wstart < 0) wstart += dW * ((-wstart + dW - 1) / dW); if (hend > iH) hend -= dH * ((hend - iH + dH - 1) / dH); if (wend > iW) wend -= dW * ((wend - iW + dW - 1) / dW); switch (poolingMode) { /*** max ***/ case 0: { coord2 = hstart; coord3 = wstart; T max = -DataTypeUtils::max<T>(); for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) { for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) { T val = x[shape::getOffset(xShapeInfo, coords)]; if (val > max) { max = val; coord2 = coords[2]; coord3 = coords[3]; } } } coords[2] = coord2; coords[3] = coord3; auto zOffset = shape::getOffset(zShapeInfo, coords); sd::math::atomics::sd_atomicAdd<T>(&z[zOffset], y[yOffset]); // z[zOffset] += y[yOffset]; } break; /*** avg ***/ case 1: { T val = y[yOffset]; if (extraParam0 == 0) // Exclude padding val /= sd::math::sd_ceil<double, T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * sd::math::sd_ceil<double, T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); // Accounts for dilation else if (extraParam0 == 1) // Include padding val /= kProd; for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) sd::math::atomics::sd_atomicAdd<T>(&z[shape::getOffset(zShapeInfo, coords)], val); } break; /*** pnorm ***/ case 2: { T sum = static_cast<T>(0.); T val = y[yOffset]; for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) sum += sd::math::sd_pow<T, T, T>(sd::math::sd_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0); val *= sd::math::sd_pow<T, T, T>(sum, ((T)1.f - extraParam0) / extraParam0); for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) { for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) { const auto xOffset = shape::getOffset(xShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); sd::math::atomics::sd_atomicAdd<T>( &z[zOffset], val * sd::math::sd_pow<T, T, T>(sd::math::sd_abs<T>(x[xOffset]), extraParam0 - 1.f) * sd::math::sd_sgn<T, T>(x[xOffset])); } } } break; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void pooling2dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t* stream, const void* vx, const sd::LongType* xShapeInfo, const void* vy, const sd::LongType* yShapeInfo, void* vz, const sd::LongType* zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) { pooling2dBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>( vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, extraParam0); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::pooling2dBP(sd::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) { // initial zeroing of gradI gradI.nullify(); PointersManager manager(block.launchContext(), "pooling2dBP"); const int threadsPerBlock = 256; const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = gradO.rankOf() * sizeof(sd::LongType) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&gradI}, {&input, &gradO}); BUILD_SINGLE_SELECTOR( input.dataType(), pooling2dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, extraParam0), SD_NUMERIC_TYPES); NDArray::registerSpecialUse({&gradI}, {&input, &gradO}); manager.synchronize(); } } // namespace ops } // namespace sd
dbeaaaf243c7cc05b53acab615624934fb7281e1.hip
// !!! This is a file automatically generated by hipify!!! #include <THH/THHTensorMathCompare.cuh> #include <THH/THHTensor.hpp> #include <THH/generic/THHTensorMathCompare.hip> #include <THH/THHGenerateBFloat16Type.h>
dbeaaaf243c7cc05b53acab615624934fb7281e1.cu
#include <THC/THCTensorMathCompare.cuh> #include <THC/THCTensor.hpp> #include <THC/generic/THCTensorMathCompare.cu> #include <THC/THCGenerateBFloat16Type.h>
b38ca5e82bbf9d77d3b04d811cbd4a00667fb339.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel3_plus_2_b; int xdim0_update_halo_kernel3_plus_2_b_h = -1; __constant__ int ydim0_update_halo_kernel3_plus_2_b; int ydim0_update_halo_kernel3_plus_2_b_h = -1; __constant__ int xdim1_update_halo_kernel3_plus_2_b; int xdim1_update_halo_kernel3_plus_2_b_h = -1; __constant__ int ydim1_update_halo_kernel3_plus_2_b; int ydim1_update_halo_kernel3_plus_2_b_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel3_plus_2_b * (y) + \ xdim0_update_halo_kernel3_plus_2_b * ydim0_update_halo_kernel3_plus_2_b * \ (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel3_plus_2_b * (y) + \ xdim1_update_halo_kernel3_plus_2_b * ydim1_update_halo_kernel3_plus_2_b * \ (z)) // user function __device__ inline void update_halo_kernel3_plus_2_b_gpu(double *vol_flux_x, double *mass_flux_x, const int *fields) { if (fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0, 0, 0)] = vol_flux_x[OPS_ACC0(0, -2, 0)]; if (fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0, 0, 0)] = mass_flux_x[OPS_ACC1(0, -2, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel3_plus_2_b(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_plus_2_b + idx_z * 1 * 1 * xdim0_update_halo_kernel3_plus_2_b * ydim0_update_halo_kernel3_plus_2_b; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_plus_2_b + idx_z * 1 * 1 * xdim1_update_halo_kernel3_plus_2_b * ydim1_update_halo_kernel3_plus_2_b; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel3_plus_2_b_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel3_plus_2_b(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 108)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(108, "update_halo_kernel3_plus_2_b"); OPS_kernels[108].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel3_plus_2_b_h || ydim0 != ydim0_update_halo_kernel3_plus_2_b_h || xdim1 != xdim1_update_halo_kernel3_plus_2_b_h || ydim1 != ydim1_update_halo_kernel3_plus_2_b_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel3_plus_2_b, &xdim0, sizeof(int)); xdim0_update_halo_kernel3_plus_2_b_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel3_plus_2_b, &ydim0, sizeof(int)); ydim0_update_halo_kernel3_plus_2_b_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel3_plus_2_b, &xdim1, sizeof(int)); xdim1_update_halo_kernel3_plus_2_b_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel3_plus_2_b, &ydim1, sizeof(int)); ydim1_update_halo_kernel3_plus_2_b_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[108].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel3_plus_2_b), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[108].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[108].mpi_time += t2 - t1; OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
b38ca5e82bbf9d77d3b04d811cbd4a00667fb339.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel3_plus_2_b; int xdim0_update_halo_kernel3_plus_2_b_h = -1; __constant__ int ydim0_update_halo_kernel3_plus_2_b; int ydim0_update_halo_kernel3_plus_2_b_h = -1; __constant__ int xdim1_update_halo_kernel3_plus_2_b; int xdim1_update_halo_kernel3_plus_2_b_h = -1; __constant__ int ydim1_update_halo_kernel3_plus_2_b; int ydim1_update_halo_kernel3_plus_2_b_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel3_plus_2_b * (y) + \ xdim0_update_halo_kernel3_plus_2_b * ydim0_update_halo_kernel3_plus_2_b * \ (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel3_plus_2_b * (y) + \ xdim1_update_halo_kernel3_plus_2_b * ydim1_update_halo_kernel3_plus_2_b * \ (z)) // user function __device__ inline void update_halo_kernel3_plus_2_b_gpu(double *vol_flux_x, double *mass_flux_x, const int *fields) { if (fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0, 0, 0)] = vol_flux_x[OPS_ACC0(0, -2, 0)]; if (fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0, 0, 0)] = mass_flux_x[OPS_ACC1(0, -2, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel3_plus_2_b(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_plus_2_b + idx_z * 1 * 1 * xdim0_update_halo_kernel3_plus_2_b * ydim0_update_halo_kernel3_plus_2_b; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_plus_2_b + idx_z * 1 * 1 * xdim1_update_halo_kernel3_plus_2_b * ydim1_update_halo_kernel3_plus_2_b; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel3_plus_2_b_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel3_plus_2_b(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 108)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(108, "update_halo_kernel3_plus_2_b"); OPS_kernels[108].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel3_plus_2_b_h || ydim0 != ydim0_update_halo_kernel3_plus_2_b_h || xdim1 != xdim1_update_halo_kernel3_plus_2_b_h || ydim1 != ydim1_update_halo_kernel3_plus_2_b_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel3_plus_2_b, &xdim0, sizeof(int)); xdim0_update_halo_kernel3_plus_2_b_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel3_plus_2_b, &ydim0, sizeof(int)); ydim0_update_halo_kernel3_plus_2_b_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel3_plus_2_b, &xdim1, sizeof(int)); xdim1_update_halo_kernel3_plus_2_b_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel3_plus_2_b, &ydim1, sizeof(int)); ydim1_update_halo_kernel3_plus_2_b_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[108].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel3_plus_2_b<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[108].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[108].mpi_time += t2 - t1; OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
55541f09c99ef60c6186af298baac46c9741c293.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <cstdio> #include <string.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <cuda_gl_interop.h> #include <hip/hip_runtime_api.h> #include <helper_functions.h> #include <helper_cuda.h> #include <helper_cuda_gl.h> extern "C" { int getCudaEnabledDeviceCount() { int count; hipError_t error = hipGetDeviceCount( &count ); if (error == hipErrorNoDevice) { printf("NO DEVICE\n"); count = 0; } else if (error == hipErrorInsufficientDriver) { count = -1; } else //should never happen { checkCudaErrors(error); } return count; } bool cudaInit(int argc, char **argv) { int count = getCudaEnabledDeviceCount(); printf("count:%d\n",count); int devID = findCudaGLDevice(argc, (const char **)argv); if (devID < 0) { printf("No CUDA Capable devices found, exiting...\n"); exit(EXIT_SUCCESS); return false; } else { hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID)); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); return true; } } void cudaGLInit(int argc, char **argv) { // use command-line specified CUDA device, otherwise use device with highest Gflops/s findCudaGLDevice(argc, (const char **)argv); } void allocateArray(void **devPtr, size_t size) { hipMalloc(devPtr, size); } void freeArray(void *devPtr) { hipFree(devPtr); } void threadSync() { hipDeviceSynchronize(); } void copyArrayToDevice(void *device, const void *host, int offset, int size) { hipMemcpy((char *) device + offset, host, size, hipMemcpyHostToDevice); } void copyArrayDeviceToDevice(void *device, const void *host, int offset, int size) { hipMemcpy((char *) device + offset, host, size, hipMemcpyDeviceToDevice); } void copyArrayFromDevice(void *host, const void *device, struct cudaGraphicsResource **cuda_vbo_resource, int size) { hipMemcpy(host, device, size, hipMemcpyDeviceToHost); } //Round a / b to nearest higher integer value uint iDivUp(uint a, uint b) { return (a % b != 0) ? (a / b + 1) : (a / b); } // compute grid and thread block size for a given number of elements void computeGridSize(uint n, uint &numBlocks, uint &numThreads) { uint blockSize = 256; numThreads = min(blockSize, n); numBlocks = iDivUp(n, numThreads); } }
55541f09c99ef60c6186af298baac46c9741c293.cu
#include <cstdlib> #include <cstdio> #include <string.h> #include <cuda_runtime_api.h> #include <cuda.h> #include <device_launch_parameters.h> #include <cuda_gl_interop.h> #include <cuda_runtime_api.h> #include <helper_functions.h> #include <helper_cuda.h> #include <helper_cuda_gl.h> extern "C" { int getCudaEnabledDeviceCount() { int count; cudaError_t error = cudaGetDeviceCount( &count ); if (error == cudaErrorNoDevice) { printf("NO DEVICE\n"); count = 0; } else if (error == cudaErrorInsufficientDriver) { count = -1; } else //should never happen { checkCudaErrors(error); } return count; } bool cudaInit(int argc, char **argv) { int count = getCudaEnabledDeviceCount(); printf("count:%d\n",count); int devID = findCudaGLDevice(argc, (const char **)argv); if (devID < 0) { printf("No CUDA Capable devices found, exiting...\n"); exit(EXIT_SUCCESS); return false; } else { cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID)); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); return true; } } void cudaGLInit(int argc, char **argv) { // use command-line specified CUDA device, otherwise use device with highest Gflops/s findCudaGLDevice(argc, (const char **)argv); } void allocateArray(void **devPtr, size_t size) { cudaMalloc(devPtr, size); } void freeArray(void *devPtr) { cudaFree(devPtr); } void threadSync() { cudaDeviceSynchronize(); } void copyArrayToDevice(void *device, const void *host, int offset, int size) { cudaMemcpy((char *) device + offset, host, size, cudaMemcpyHostToDevice); } void copyArrayDeviceToDevice(void *device, const void *host, int offset, int size) { cudaMemcpy((char *) device + offset, host, size, cudaMemcpyDeviceToDevice); } void copyArrayFromDevice(void *host, const void *device, struct cudaGraphicsResource **cuda_vbo_resource, int size) { cudaMemcpy(host, device, size, cudaMemcpyDeviceToHost); } //Round a / b to nearest higher integer value uint iDivUp(uint a, uint b) { return (a % b != 0) ? (a / b + 1) : (a / b); } // compute grid and thread block size for a given number of elements void computeGridSize(uint n, uint &numBlocks, uint &numThreads) { uint blockSize = 256; numThreads = min(blockSize, n); numBlocks = iDivUp(n, numThreads); } }
54f081b288845fe30684df16ce5c552e3d254f72.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/replace.h> #include <thrust/functional.h> //#include <thrust\device_vector.h> #include "utilities.h" #include "kernel.h" #include "gridStruct.h" #include "smallObjLoader.h" #include "Macros.h" //GLOBALS dim3 threadsPerBlock(blockSize); int totalGridSize = (2 * (BOX_X + 2)) * (2 * (BOX_Y + 2)) * (BOX_Z + 2); int numParticles; string MeshFileName;// = "..\\..\\Models\\bunny_fu_low2.obj"; __device__ int LockNum=1000; vec3 gravity(.0f); int numGenerated; const float scene_scale = 1; //size of the height map in simulation space particle* particles; int* neighbors; int* num_neighbors; int* grid_idx; int* grid; bool hitonce = false; float wallMove = 0.0f; bool cleanupFixedPoints=false; bool ExtForceSet = false; rigidbodyObj rigtest; vec4* rigPredictedPos; mat3* rigPredictedRot; mat3 rigRotMat; using namespace glm; struct particleMassCenter{ __host__ __device__ vec4 operator()(const particle& x) const{ return x.position; } }; struct particlePredictMassCenter{ __host__ __device__ vec4 operator()(const particle& x) const{ return x.pred_position; } }; void setLockNum(int x){ LockNum=x; } void setGravity(const vec3& g){ ExtForceSet = true; gravity = g; } void setMeshFile(string s){ MeshFileName = s; } void checkCUDAError(const char *msg, int line = -1) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { if( line >= 0 ) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } } __device__ bool Conditions(int index, int N, int LockNum){ return index < N&&index>=LockNum; } __device__ bool ParticleConditions(int index, int N, particle* p, int LockNum, int LayerMask){ return index < N && (p[index].LayerMask&LayerMask)&&!(p[index].LayerMask&FROZEN); //return index<N && (p[index].ID>LockNum); } __host__ __device__ unsigned int devhash(unsigned int a){ a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a; } //Function that generates static. __host__ __device__ glm::vec3 generateRandomNumberFromThread(float time, int index) { thrust::default_random_engine rng(devhash(index*time)); thrust::uniform_real_distribution<float> u01(0,1); return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng)); } //Update the vertex buffer object //(The VBO is where OpenGL looks for the positions for the planets) __global__ void sendToVBO(int N, particle* particles, float * vbo, int width, int height, float s_scale, unsigned int LockNum) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale_w = 1.0f; float c_scale_h = 1.0f; float c_scale_z = 1.0f; if(index<N) { vbo[4*index+0] = particles[index].position.x*c_scale_w; vbo[4*index+1] = particles[index].position.y*c_scale_h; vbo[4*index+2] = particles[index].position.z*c_scale_z; //the w component is used as a selector of render color vbo[4 * index + 3] = (particles[index].ID >= LockNum)?1.0f:0.0f; } } /************************************* * Device Methods for Solver *************************************/ __device__ float wPoly6Kernel(glm::vec3 p_i, glm::vec3 p_j){ vec3 r(p_i-p_j); if (length(r) > H) return 0.000001f; return 315.0f / (64.0f * PI_FLOAT * POW9(H)) * CUBE(SQR(H) - dot(r, r)); } __device__ glm::vec3 wGradientSpikyKernel(glm::vec3 p_i, glm::vec3 p_j){ glm::vec3 r = p_i - p_j; float hr_term = H - glm::length(r); if (hr_term < 0.0f) return vec3(0.0f); float gradient_magnitude = 45.0f / (PI * POW_H_6) * hr_term * hr_term; float div = (glm::length(r) + 0.001f); return gradient_magnitude * 1.0f / div * r; } __device__ float calculateRo(particle* particles, glm::vec3 p, int* p_neighbors, int p_num_neighbors, int index){ glm::vec3 p_j; float ro = 0.0f; for(int i = 0; i < p_num_neighbors; i++){ glm::vec3 p_j(particles[p_neighbors[i + index * MAX_NEIGHBORS]].pred_position); double kv=wPoly6Kernel(p,p_j); if (kv < K_EPSILON) kv = 0.0f; ro+=kv; } return ro; } __device__ glm::vec3 calculateCiGradient(glm::vec3 p_i, glm::vec3 p_j){ //glm::vec3 Ci = -1.0f / float(REST_DENSITY) * wGradientSpikyKernel(p_i, p_j); //vec3 p_j((*pit)->PredictedPos); //if(Particles[i]->id>(*pit)->id) continue; //Ci=pow(spikyGradient(p-p_j,core_radius).Length()/material.rest_density,2); //sum_gradients+=C_i_gradient; return wGradientSpikyKernel(p_i,p_j)/REST_DENSITY; } __device__ glm::vec3 calculateCiGradientAti(particle* particles, glm::vec3 p_i, int* neighbors, int p_num_neighbors, int index){ glm::vec3 accum = glm::vec3(0.0f); for(int i = 0; i < p_num_neighbors; i++){ accum += wGradientSpikyKernel(p_i, glm::vec3(particles[neighbors[i + index * MAX_NEIGHBORS]].pred_position)); } glm::vec3 Ci = 1.0f / float(REST_DENSITY) * accum; return Ci; } /************************************* * Finding Neighboring Particles *************************************/ struct GridElement{ particle *particles[4*MAX_NEIGHBORS]; //int lock; int size; GridElement(){ //particles = new particle[MAX_NEIGHBORS]; //lock = 0; size = 0; } }; GridElement *grid_elements; int *grid_lock; //__device__ GridElement *sleeping_grid_elements; const int grid_width = 2 * BOX_X / H + 1; const int grid_depth = 2 * BOX_Y / H + 1; const int grid_height = BOX_Z / H + 1; __device__ int grid_index(int i, int j, int k){ if (i < 0 || i >= grid_width || j < 0 || j >= grid_depth || k < 0 || k >= grid_height) return -1; return grid_width*(k*grid_depth + j) + i; } //__device__ GridElement & gridContains(int i, int j, int k){ // return grid_elements[grid_index(i, j, k)]; //} __device__ void add2grid(GridElement *target_grid, particle* p, int* grid_lock){ bool wait = true; int i = (int)(float(p->pred_position[0] + BOX_X) / H); i = clamp(i, 0, grid_width - 1); int j = (int)(float(p->pred_position[1] + BOX_Y) / H); j = clamp(j, 0, grid_depth - 1); int k = (int)(float(p->pred_position[2]) / H); k = clamp(k, 0, grid_height - 1); int id = grid_index(i, j, k); while (wait){ if (0 == atomicExch(&(grid_lock[id]), 1)){ int size = target_grid[id].size; if (size<MAX_NEIGHBORS) target_grid[grid_index(i, j, k)].particles[size++] = p; target_grid[id].size = size; grid_lock[id] = 0; wait = false; } else if (target_grid[id].size >= MAX_NEIGHBORS) { //printf("Too many particles in one grid!!!!"); wait = false; } } } __global__ void update_grid(GridElement* grid_elements, particle* particles, int N,int* grid_lock){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N){ add2grid(grid_elements, &particles[index],grid_lock); } } __global__ void clearHistory(GridElement* grid_elements, int totalGridSize){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < totalGridSize){ //printf("I'm good %d\n", index); grid_elements[index].size = 0; } } __global__ void findParticleNeighbors(GridElement* grid_elements, particle* particles, int* neighbors, int* num_neighbors, int N, int LockNum){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index, N, particles, LockNum,FLUID)){ num_neighbors[index] = 0; particle p = particles[index]; int i = (int)(float(p.pred_position[0] + BOX_X) / H); i = clamp(i, 0, grid_width - 1); int j = (int)(float(p.pred_position[1] + BOX_Y) / H); j = clamp(j, 0, grid_depth - 1); int k = (int)(float(p.pred_position[2]) / H); k = clamp(k, 0, grid_height - 1); int id = grid_index(i, j, k); int neighborsNum = 0; int offset[] = {0,-1, 1 }; for (int ioff = 0, iiter = i; ioff < 3; iiter = i + offset[++ioff]){ if (iiter < 0 || iiter >= grid_width) continue; for (int joff = 0, jiter = j ; joff < 3; jiter = j + offset[++joff]){ if (jiter<0 || jiter>grid_depth - 1) continue; for (int koff = 0, kiter = k ; koff < 3; kiter = k + offset[++koff]){ if (kiter<0 || kiter>grid_height - 1) continue; GridElement* thisGrid = &grid_elements[grid_index(iiter, jiter, kiter)]; int thisGridSize = thisGrid->size; for (int pi = 0; pi < thisGridSize&&neighborsNum<MAX_NEIGHBORS; pi++){ particle piter = *thisGrid->particles[pi]; if (p.ID == piter.ID) continue; if (length(p.pred_position - piter.pred_position) < H) neighbors[p.ID*MAX_NEIGHBORS + neighborsNum++] = piter.ID; } } } } num_neighbors[index] = neighborsNum; } } void findParticleNeighborsWrapper(particle* particles, int* neighbors, int* num_neighbors, int N, int LockNum){ dim3 fullBlocksPerGrid((int)ceil(float(grid_width*grid_depth*grid_height) / float(blockSize))); dim3 fullBlocksPerGridParticles((int)ceil(float(N) / float(blockSize))); //printf("fullblockPerGrid x=%d y=%d z=%d\n", fullBlocksPerGrid.x, fullBlocksPerGrid.y, fullBlocksPerGrid.z); clearHistory << <fullBlocksPerGrid, blockSize >> >(grid_elements, grid_width*grid_depth*grid_height); checkCUDAErrorWithLine("clearGrid failed!"); update_grid << <fullBlocksPerGridParticles, blockSize >> >(grid_elements, particles, N,grid_lock); checkCUDAErrorWithLine("findParticleGridIndex failed!"); findParticleNeighbors << <fullBlocksPerGridParticles, blockSize >> >(grid_elements, particles, neighbors, num_neighbors, N, LockNum); checkCUDAErrorWithLine("findKNearestNeighbors failed!"); } // // //// Clears grid from previous neighbors //__global__ void clearGrid(int* grid, int totalGridSize){ // int index = threadIdx.x + (blockIdx.x * blockDim.x); // if(index < totalGridSize){ // grid[index] = -1; // } //} // // //// Matches each particles the grid index for the cell in which the particle resides //__global__ void findParticleGridIndex(particle* particles, int* grid_idx, int N){ // int index = threadIdx.x + (blockIdx.x * blockDim.x); // if(index < N){ // int x, y, z; // glm::vec4 p = particles[index].pred_position; // x = int(p.x) + BOX_X + 2; // y = int(p.y) + BOX_Y + 2; // z = int(p.z) + 2; // grid_idx[index] = x + (2 * (BOX_X + 2) * y) + (4 * (BOX_X + 2) * (BOX_Y + 2) * z); // } //} // //// Matches the sorted index to each of the cells //__global__ void matchParticleToCell(int* gridIdx, int* grid, int N, int totalGridSize){ // int index = threadIdx.x + (blockIdx.x * blockDim.x); // if(index < N){ // if(index == 0){ // grid[gridIdx[index]] = index; // }else if(gridIdx[index] != gridIdx[index - 1]){ // if(gridIdx[index] >= 0 && gridIdx[index] < totalGridSize) grid[gridIdx[index]] = index; // } // } //} // //// Finds the nearest K neighbors within the smoothing kernel radius //__global__ void findKNearestNeighbors(particle* particles, int* gridIdx, int* grid, int* neighbors, int* num_neighbors, int N, int totalGridSize,int LockNum){ // int index = threadIdx.x + (blockIdx.x * blockDim.x); // if (ParticleConditions(index, N, particles, LockNum)){ // int heap_size = 0; // int x,y,z,idx; // float r; // glm::vec4 p_j, p = particles[index].pred_position; // // // Find particle index // x = int(p.x) + BOX_X + 2; // y = int(p.y) + BOX_Y + 2; // z = int(p.z) + 2; // // float max; // int m, max_index, begin, cell_position; // // // Examine all cells within radius // // NOTE: checks the cube that circumscribes the spherical smoothing kernel // for(int i = int(-H + z); i <= int(H + z); i++){ // for(int j = int(-H + y); j <= int(H + y); j++){ // for(int k = int(-H + x); k <= int(H + x); k++){ // idx = k + (2 * (BOX_X + 2) * j) + (4 * (BOX_X + 2) * (BOX_Y + 2) * i); // // if(idx >= totalGridSize || idx < 0){ // continue; // } // // begin = grid[idx]; // // if(begin < 0) continue; // // cell_position = begin; // while(cell_position < N && gridIdx[begin] == gridIdx[cell_position]){ // if(cell_position == index){ // ++cell_position; // continue; // } // p_j = particles[cell_position].pred_position; // r = glm::length(p - p_j); // // if(heap_size < MAX_NEIGHBORS){ // if(r < H){ // neighbors[index * MAX_NEIGHBORS + heap_size] = cell_position; // ++heap_size; // } // }else{ // max = glm::length(p - particles[neighbors[index * MAX_NEIGHBORS]].pred_position); // max_index = 0; // for(m = 1; m < heap_size; m++){ // float d = glm::length(p - particles[neighbors[index * MAX_NEIGHBORS + m]].pred_position); // if(d > max){ // max = d; // max_index = m; // } // } // // if(r < max && r < H){ // neighbors[index * MAX_NEIGHBORS + max_index] = cell_position; // } // } // // ++cell_position; // } // } // } // } // num_neighbors[index] = heap_size; // } //} // //// Wrapper to find neighbors using hash grid //void findNeighbors(particle* particles, int* grid_idx, int* grid, int* neighbors, int N,int LockNum){ // dim3 fullBlocksPerGrid((int)ceil(float(totalGridSize) / float(blockSize))); // dim3 fullBlocksPerGridParticles((int)ceil(float(N)/float(blockSize))); // // // Clear Grid // clearGrid<<<fullBlocksPerGrid, blockSize>>>(grid, totalGridSize); // checkCUDAErrorWithLine("clearGrid failed!"); // // // Match particle to index // findParticleGridIndex<<<fullBlocksPerGridParticles, blockSize>>>(particles, grid_idx, N); // checkCUDAErrorWithLine("findParticleGridIndex failed!"); // // // Cast to device pointers // thrust::device_ptr<int> t_grid_idx = thrust::device_pointer_cast(grid_idx); // thrust::device_ptr<particle> t_particles = thrust::device_pointer_cast(particles); // // // Sort by key // thrust::sort_by_key(t_grid_idx, t_grid_idx + N, t_particles); // checkCUDAErrorWithLine("thrust failed!"); // // // Match sorted particle index // matchParticleToCell<<<fullBlocksPerGridParticles, blockSize>>>(grid_idx, grid, N, totalGridSize); // checkCUDAErrorWithLine("matchParticletoCell failed!"); // // // Find K nearest neighbors // findKNearestNeighbors<<<fullBlocksPerGridParticles, blockSize>>>(particles, grid_idx, grid, neighbors, num_neighbors, N, totalGridSize,LockNum); // checkCUDAErrorWithLine("findKNearestNeighbors failed!"); //} /************************************* * Kernels for Jacobi Solver *************************************/ __global__ void calculateLambda(particle* particles, int* neighbors, int* num_neighbors, int N,int LockNum){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(ParticleConditions(index,N,particles,LockNum,FLUID)){ int k = num_neighbors[index]; glm::vec3 p = glm::vec3(particles[index].pred_position); float p_i = calculateRo(particles, p, neighbors, k, index); float C_i = (p_i / REST_DENSITY) - 1.0f; float C_i_gradient, sum_gradients = 0.0f; for(int i = 0; i < k; i++){ // Calculate gradient when k = j C_i_gradient = glm::length(calculateCiGradient(p, glm::vec3(particles[neighbors[i + index * MAX_NEIGHBORS]].pred_position))); sum_gradients += (C_i_gradient * C_i_gradient); } // Add gradient when k = i C_i_gradient = glm::length(calculateCiGradientAti(particles, p, neighbors, k, index)); sum_gradients += (C_i_gradient * C_i_gradient); float sumCi = sum_gradients + RELAXATION; particles[index].lambda = -1.0f * (C_i / sumCi); } } __global__ void calculateDeltaPi(particle* particles, int* neighbors, int* num_neighbors, int N,int LockNum){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (ParticleConditions(index, N, particles, LockNum,FLUID)){ int k = num_neighbors[index]; glm::vec3 p = glm::vec3(particles[index].pred_position); float l = particles[index].lambda; glm::vec3 delta = glm::vec3(0.0f); int p_j_idx; #if PRESSURE == 1 float k_term; glm::vec3 d_q = DELTA_Q * glm::vec3(1.0f) + p; #endif float s_corr = 0.0f; for(int i = 0; i < k; i++){ p_j_idx = neighbors[i + index * MAX_NEIGHBORS]; #if PRESSURE == 1 float poly6pd_q = wPoly6Kernel(p, d_q); if(poly6pd_q < EPSILON) k_term = 0.0f; else k_term = wPoly6Kernel(p, glm::vec3(particles[p_j_idx].pred_position)) / poly6pd_q; s_corr = -1.0f * PRESSURE_K * pow(k_term, PRESSURE_N); #endif delta += (l + particles[p_j_idx].lambda + s_corr) * wGradientSpikyKernel(p, glm::vec3(particles[p_j_idx].pred_position)); } particles[index].delta_pos = 1.0f / REST_DENSITY * delta; } } __global__ void calculateCurl(particle* particles, int* neighbors, int* num_neighbors, int N,int LockNum){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (ParticleConditions(index, N, particles, LockNum,FLUID)){ int k = num_neighbors[index]; glm::vec3 p = glm::vec3(particles[index].pred_position); glm::vec3 v = particles[index].velocity; int j_idx; glm::vec3 v_ij, gradient, accum = glm::vec3(0.0f); for(int i = 0; i < k; i++){ j_idx = neighbors[i + index * MAX_NEIGHBORS]; v_ij = particles[j_idx].velocity - v; gradient = wGradientSpikyKernel(p, glm::vec3(particles[j_idx].pred_position)); accum += glm::cross(v_ij, gradient); } particles[index].curl = accum; } } __global__ void applyVorticity(particle* particles, int* neighbors, int* num_neighbors, int N,int LockNum){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(ParticleConditions(index,N,particles,LockNum,FLUID)){ int k = num_neighbors[index]; glm::vec3 p = glm::vec3(particles[index].pred_position); glm::vec3 w = particles[index].curl; int j_idx; float mag_w; glm::vec3 r, grad = glm::vec3(0.0f); for(int i = 0; i < k; i++){ j_idx = neighbors[i + index * MAX_NEIGHBORS]; r = glm::vec3(particles[j_idx].pred_position) - p; mag_w = glm::length(particles[j_idx].curl - w); grad.x += mag_w / r.x; grad.y += mag_w / r.y; grad.z += mag_w / r.z; } glm::vec3 vorticity, N; N = 1.0f/(glm::length(grad) + .001f) * grad; vorticity = float(RELAXATION) * (glm::cross(N, w)); particles[index].external_forces += vorticity; } } __global__ void initializeParticles(int N, particle* particles,int LockNum=INT_MAX) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; float gravity = -9.8f; if (Conditions(index, N, LockNum)) { particle p = particles[index]; glm::vec3 rand = (generateRandomNumberFromThread(1.0f, index)-0.5f); p.ID=index; p.LayerMask = FLUID; p.position.x = (index%20)-9.5f; p.position.y = ((index/20)%20)-9.5f; p.position.z = (index/400)+10.0f+0.05f*rand.z; p.position.w = 1.0f; //p.position=glm::vec4(index%9-3.5f,(index/9)%20-9.5f,5.0f+index/180,1.0f); p.pred_position = p.position; p.velocity = glm::vec3(0.0f); p.external_forces = glm::vec3(0.0f,0.0f,gravity); particles[index] = p; } else if(index<N){ particle p=particles[index]; p.ID=index; //p.LayerMask = CONTAINER; p.velocity=glm::vec3(0.0f); p.external_forces=glm::vec3(0.0f,0.0f,gravity); particles[index]=p; } } __global__ void setExternalForces(int N, particle* particles, int LockNum,vec3 extForce){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index,N,particles,LockNum,FLUID|RIGID_BODY)){ particles[index].external_forces = extForce; } } //Simple Euler integration scheme __global__ void applyExternalForces(int N, float dt, particle* particles,int LockNum) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index,N,particles,LockNum,FLUID|RIGID_BODY)){ particle p = particles[index]; p.velocity+=dt*p.external_forces; p.delta_pos=glm::vec3(0.0f); p.pred_position=p.position+vec4(p.velocity*dt,0.0); particles[index] = p; } } __global__ void updatePosition(int N, particle* particles,int LockNum=0) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index, N, particles, LockNum,FLUID|RIGID_BODY)){ //if (length(particles[index].position - particles[index].pred_position) > frozenDistance) particles[index].position = particles[index].pred_position; } /*if (index < N){ particles[index].LayerMask &= ~FROZEN; }*/ //if(particles[index].ID<=LockNum){ // particles[index].velocity=vec3(0.0f); // particles[index].curl=vec3(0.0f); // //particles[index].external_forces=vec3(0.0f); //} } __global__ void updatePredictedPosition(int N, particle* particles,int LockNum=0) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index, N, particles, LockNum,FLUID)){ particles[index].pred_position += glm::vec4(particles[index].delta_pos,0.0f); } } __global__ void updateVelocity(int N, particle* particles, float dt,int LockNum) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index,N,particles,LockNum,FLUID|RIGID_BODY)){ particles[index].velocity = glm::vec3((1.0f/dt)*(particles[index].pred_position - particles[index].position)); if (length(particles[index].velocity) > 20.0f) particles[index].velocity = 20.0f*normalize(particles[index].velocity); /*if (length(particles[index].velocity) < frozenDistance) particles[index].LayerMask |= FROZEN;*/ } } __global__ void boxCollisionResponse(int N, particle* particles, int LockNum){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index,N,particles,LockNum,FLUID|RIGID_BODY)){ vec3 randv = generateRandomNumberFromThread(N, index); if( particles[index].pred_position.z < 0.0f){ particles[index].pred_position.z = 0.001f*randv.z+0.01f; glm::vec3 normal = glm::vec3(0,0,1); particles[index].velocity.z = collision_restitution*abs(particles[index].velocity.z); } if( particles[index].pred_position.z > BOX_Z){ particles[index].pred_position.z = BOX_Z - 0.001f*randv.z-0.01f; glm::vec3 normal = glm::vec3(0,0,-1); particles[index].velocity.z = -collision_restitution*abs(particles[index].velocity.z); } if( particles[index].pred_position.y < -BOX_Y){ particles[index].pred_position.y = -BOX_Y + 0.001f*randv.y+0.01f; glm::vec3 normal = glm::vec3(0,1,0); particles[index].velocity.y = collision_restitution*abs(particles[index].velocity.y); } if( particles[index].pred_position.y > BOX_Y){ particles[index].pred_position.y = BOX_Y - 0.001f*randv.y-0.01f; glm::vec3 normal = glm::vec3(0,-1,0); particles[index].velocity.y = -collision_restitution*abs(particles[index].velocity.y); } if( particles[index].pred_position.x < -BOX_X){ particles[index].pred_position.x = -BOX_X + 0.001f*randv.x+0.01f; glm::vec3 normal = glm::vec3(1,0,0); particles[index].velocity.x = collision_restitution*abs(particles[index].velocity.x); } if( particles[index].pred_position.x > BOX_X){ particles[index].pred_position.x = BOX_X - 0.001f*randv.x-0.01f; glm::vec3 normal = glm::vec3(-1,0,0); particles[index].velocity.x = -collision_restitution*abs(particles[index].velocity.x); } } } /************************************* * shape matching * *************************************/ void jacobiRotate(mat3 &A, mat3 &R, int p, int q){ // rotates A through phi in pq-plane to set A(p,q) = 0 // rotation stored in R whose columns are eigenvectors of A float d = (A[p][p] - A[q][q]) / (2.0f*A[p][q]); float t = 1.0f / (abs(d) + sqrt(d*d + 1.0f)); if (d < 0.0f) t = -t; float c = 1.0f / sqrt(t*t + 1.0f); float s = t*c; A[p][p] += t*A[p][q]; A[q][q] -= t*A[p][q]; A[p][q] = A[q][p] = 0.0f; //transform A int k; for (k = 0; k < 3; k++){ if (k != p&&k != q){ float Akp = c*A[k][p] + s*A[k][q]; float Akq = -s*A[k][p] + c*A[k][q]; A[k][p] = A[p][k] = Akp; A[k][q] = A[q][k] = Akq; } } //store rotation in R for (k = 0; k < 3; k++){ float Rkp = c*R[k][p] + s*R[k][q]; float Rkq = -s*R[k][p] + c*R[k][q]; R[k][p] = Rkp; R[k][q] = Rkq; } } void eigenDecompposition(mat3 &outA, mat3& outR){ //only for symmetric matrices! //A=RA'R^T, where A' is diagnoal and R orthonormal //identity; mat3 A(outA); mat3 R = mat3(0.0f); R[0][0] = R[1][1] = R[2][2] = 1.0f; /*mat3 view = A; printf("view[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]);*/ int iter = 0; while (iter < JACOBI_ITERATIONS){ int p, q; float a, maxval; maxval = -1.0f; for (int i = 0; i < 2; i++){ for (int j = i+1; j < 3; j++){ a = abs(A[i][j]); if (maxval<0.0f || a>maxval){ p = i; q = j; maxval = a; } } } //all small enough->done if (maxval < 0.0001f) break; //rotate matrix with respect to that element jacobiRotate(A, R, p, q); /*printf("---------------------------------------------\n"); view = A; printf("A[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]); view = R; printf("R[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]);*/ iter++; } outA = A; outR = R; } void polarDecomposition(mat3 A, mat3 &R, mat3 &S){ //A=RS, where S is symmetric and R is orthonormal //-> S=(A^T A)^(1/2) //mat3 A, R, S; //A = mat3(vec3(1.0f,-.3333f,.959f),vec3(.495f,1.0f,0.0f),vec3(.5f,-.247f,1.5f)); //identity; R = mat3(0.0f); R[0][0] = R[1][1] = R[2][2] = 1.0f; mat3 ATA(0.0f); ATA = glm::transpose(A)*A; mat3 view = transpose(A); /*printf("AT[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]);*/ mat3 U; eigenDecompposition(ATA, U); view = U; /*printf("QT[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]);*/ float l0 = ATA[0][0]; ATA[0][0] = l0 = l0 <= 0.0f ? 0.0f : sqrt(l0); float l1 = ATA[1][1]; ATA[1][1]=l1 = l1 <= 0.0f ? 0.0f : sqrt(l1); float l2 = ATA[2][2]; ATA[2][2]=l2 = l2 <= 0.0f ? 0.0f : sqrt(l2); view = ATA; /*printf("ATA[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]);*/ view = transpose(U)*ATA*U; /*printf("U[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]);*/ mat3 S1=inverse(view); /*S1[0][0] = l0*U[0][0] * U[0][0] + l1*U[0][1] * U[0][1] + l2*U[0][2] * U[0][2]; S1[0][1] = l0*U[0][0] * U[1][0] + l1*U[0][1] * U[1][1] + l2*U[0][2] * U[1][2]; S1[0][2] = l0*U[0][0] * U[2][0] + l1*U[0][1] * U[2][1] + l2*U[0][2] * U[2][2]; S1[1][0] = S1[0][1]; S1[1][1] = l0*U[1][0] * U[1][0] + l1*U[1][1] * U[1][1] + l2*U[1][2] * U[1][2]; S1[1][2] = l0*U[1][0] * U[2][0] + l1*U[1][1] * U[2][1] + l2*U[1][2] * U[2][2]; S1[2][0] = S1[0][2]; S1[2][1] = S1[1][2]; S1[2][2] = l0*U[2][0] * U[2][0] + l1*U[2][1] * U[2][1] + l2*U[2][2] * U[2][2];*/ R = A*S1; for (int i = 0; i < 3; i++) for (int j = 0; j < 3; j++) if (abs(R[i][j] < 0.001f)) R[i][j] = 0.0f; R[0] = normalize(R[0]); R[1] = normalize(R[1]); R[2] = normalize(R[2]); view = R; /*printf("view[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]);*/ S = transpose(R)*A; } __global__ void SetGoalPosition(particle* particles, int N, mat3 R, vec4 MassCenter0, vec4 MassCenter1){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index,N,particles,0,RIGID_BODY)){ particles[index].pred_position=vec4(R*(vec3(particles[index].position - MassCenter0) + vec3(MassCenter1)),1.0f); } } __global__ void MassCenterPredictedPosition(vec4* rigPredictedPos, particle* particles, int N, int startID){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index, N, particles, 0, RIGID_BODY)){ rigPredictedPos[particles[index].ID - startID] = particles[index].pred_position; } } __global__ void MassCenterPredictedMatrix(mat3* rigPredictedRot, particle* particles, int N, int startID, vec4 oldMC, vec4 newMC){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index, N, particles, 0, RIGID_BODY)){ vec3 p = vec3(particles[index].position - oldMC); vec3 q = vec3(particles[index].pred_position - newMC); rigPredictedRot[particles[index].ID - startID] = mat3(q.x*p, q.y*p, q.z*p); } } /************************************* * Wrappers for the __global__ calls * *************************************/ //Initialize memory, update some globals void initCuda(int N) { numParticles = N; numGenerated = 0; dim3 fullBlocksPerGrid((int)ceil(float(N)/float(blockSize))); //mat3 A, R, S; //polarDecomposition << <1, 1 >> >(); LockNum = 0; hipMalloc((void**)&particles, N * sizeof(particle)); SmallObjMesh som(MeshFileName); LockNum+=som.position.size(); printf("%d Vertices\n",LockNum); SmallObjMesh rigidtest("D:\\workspace\\PhysiAnim\\FinalProj\\Position_Based_Fluids\\PBF_Stage2\\Ref\\Models\\cont.obj"); rigtest.start = LockNum; rigtest.size = rigidtest.position.size(); LockNum += rigidtest.position.size(); printf("%d Vertices\n", LockNum); particle* par=new particle[LockNum/*som.position.size()*/]; for (int i = 0; i<som.position.size(); i++){ par[i].position=vec4(som.position[i]+vec3(0.0,0.0,10.0),1.0); par[i].pred_position = par[i].position; par[i].LayerMask = CONTAINER; } if (rigidtest.position.size() > 0){ rigtest.ID = 0; rigtest.size = rigidtest.position.size(); rigtest.newMassCenter = rigtest.oldMassCenter = vec4(0.0); } vec4 testrig(0.0); for (int i = som.position.size(); i < som.position.size() + rigidtest.position.size(); i++){ par[i].position = vec4(rigidtest.position[i - som.position.size()]+vec3(0,0,70.0f), 1.0f); par[i].pred_position = par[i].position; testrig += par[i].position; par[i].LayerMask = RIGID_BODY; } if (LockNum > N){ printf("The mesh file need %d particles but the total particle number is set to %d!\n", LockNum, N); printf("Program down!\n"); exit(-1); } if(LockNum>0){ hipMemcpy(particles,par,LockNum*sizeof(particle),hipMemcpyHostToDevice); } //delete [] par; checkCUDAErrorWithLine("particles cudamalloc failed"); hipMalloc((void**)&neighbors, MAX_NEIGHBORS*N*sizeof(int)); hipMalloc((void**)&num_neighbors, N*sizeof(int)); hipMalloc((void**)&grid_idx, N*sizeof(int)); checkCUDAErrorWithLine("grid idx cudamalloc failed!"); hipMalloc((void**)&grid, totalGridSize*sizeof(int)); checkCUDAErrorWithLine("grid cudamalloc failed!"); hipMalloc((void**)&grid_elements, grid_width*grid_depth*grid_height*sizeof(GridElement)); hipMalloc((void**)&grid_lock, grid_width*grid_depth*grid_height*sizeof(int)); hipMemset(grid_lock, 0, grid_width*grid_depth*grid_height*sizeof(int)); checkCUDAErrorWithLine("grid_elements cudamalloc failed!"); hipMalloc((void**)&rigPredictedPos, rigidtest.position.size()*sizeof(vec4)); hipMalloc((void**)&rigPredictedRot, rigidtest.position.size()*sizeof(mat3)); hipLaunchKernelGGL(( initializeParticles), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, N, particles,LockNum); MassCenterPredictedPosition << <fullBlocksPerGrid, blockSize >> >(rigPredictedPos,particles, N, som.position.size()); thrust::device_ptr<vec4> begin(rigPredictedPos); //begin+=som.position.size(); thrust::device_ptr<vec4> end = begin + rigtest.size; //end += rigidtest.position.size(); rigtest.oldMassCenter = thrust::reduce(begin, end,vec4(0.0), thrust::plus<vec4>())/rigtest.size; rigtest.oldMassCenter.w = 1.0f; printf("zhehuo: [%f, %f, %f]\n", rigtest.oldMassCenter.x, rigtest.oldMassCenter.y, rigtest.oldMassCenter.z); printf("tester: [%f, %f, %f]\n", testrig.x, testrig.y, testrig.z); checkCUDAErrorWithLine("Kernel failed!"); hipDeviceSynchronize(); } void cudaPBFUpdateWrapper(float dt) { dim3 fullBlocksPerGrid((int)ceil(float(numParticles)/float(blockSize))); int innerLockNum=0; if(cleanupFixedPoints&&innerLockNum!=LockNum){ //cleanup<<<fullBlocksPerGrid, blockSize>>>(numParticles,particles,inn } innerLockNum=LockNum; /*if (ExtForceSet){ ExtForceSet = false; setExternalForces << < fullBlocksPerGrid, blockSize >> >(numParticles, particles,innerLockNum,gravity); }*/ //printf("Good\n"); applyExternalForces << <fullBlocksPerGrid, blockSize >> >(numParticles, dt, particles, innerLockNum); checkCUDAErrorWithLine("applyExternalForces failed!"); //findNeighbors(particles, grid_idx, grid, neighbors, numParticles,innerLockNum); findParticleNeighborsWrapper(particles, neighbors, num_neighbors, numParticles, innerLockNum); checkCUDAErrorWithLine("findNeighbors failed!"); boxCollisionResponse << <fullBlocksPerGrid, blockSize >> >(numParticles, particles, innerLockNum); MassCenterPredictedPosition << <fullBlocksPerGrid, blockSize >> >(rigPredictedPos, particles, numParticles, rigtest.start); checkCUDAErrorWithLine("MassCenterPredictedPosition failed!"); thrust::device_ptr<vec4> begin(rigPredictedPos); thrust::device_ptr<vec4> end = begin + rigtest.size; rigtest.newMassCenter = thrust::reduce(begin, end, vec4(0.0), thrust::plus<vec4>())/rigtest.size; rigtest.newMassCenter.w = 1.0f; //printf("mass center: [%f, %f, %f]\n", rigtest.newMassCenter.x, rigtest.newMassCenter.y, rigtest.newMassCenter.z); MassCenterPredictedMatrix << <fullBlocksPerGrid, blockSize >> >(rigPredictedRot, particles, numParticles, rigtest.start, rigtest.oldMassCenter, rigtest.newMassCenter); checkCUDAErrorWithLine("MassCenterPredictedMatrix failed!"); thrust::device_ptr<mat3> rotBegin(rigPredictedRot); thrust::device_ptr<mat3> rotEnd = rotBegin + rigtest.size; rigRotMat = thrust::reduce(rotBegin, rotEnd, mat3(0.0), thrust::plus<mat3>()); mat3 rotationDecomposition; mat3 scaleDecomposition; //mat3 view = rigRotMat; /*printf("view[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]);*/ polarDecomposition(rigRotMat, rotationDecomposition, scaleDecomposition); checkCUDAErrorWithLine("polarDecomposition failed!"); SetGoalPosition << <fullBlocksPerGrid, blockSize >> >(particles, numParticles, rotationDecomposition, rigtest.oldMassCenter, rigtest.newMassCenter); checkCUDAErrorWithLine("SetGoalPosition failed!"); MassCenterPredictedPosition << <fullBlocksPerGrid, blockSize >> >(rigPredictedPos, particles, numParticles, rigtest.start); checkCUDAErrorWithLine("MassCenterPredictedPosition failed!"); begin=thrust::device_ptr<vec4>(rigPredictedPos); end = begin + rigtest.size; rigtest.oldMassCenter = thrust::reduce(begin, end, vec4(0.0), thrust::plus<vec4>()) / rigtest.size; rigtest.oldMassCenter.w = 1.0f; for(int i = 0; i < SOLVER_ITERATIONS; i++){ hipLaunchKernelGGL(( calculateLambda), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, particles, neighbors, num_neighbors, numParticles,innerLockNum); hipLaunchKernelGGL(( calculateDeltaPi), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, particles, neighbors, num_neighbors, numParticles,innerLockNum); //PEFORM COLLISION DETECTION AND RESPONSE hipLaunchKernelGGL(( updatePredictedPosition), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numParticles, particles,innerLockNum); } updateVelocity << <fullBlocksPerGrid, blockSize >> >(numParticles, particles, dt, innerLockNum); calculateCurl << <fullBlocksPerGrid, blockSize >> >(particles, neighbors, num_neighbors, numParticles, innerLockNum); applyVorticity << <fullBlocksPerGrid, blockSize >> >(particles, neighbors, num_neighbors, numParticles, innerLockNum); hipLaunchKernelGGL(( updatePosition), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numParticles, particles,innerLockNum); checkCUDAErrorWithLine("updatePosition failed!"); hipDeviceSynchronize(); } void cudaUpdateVBO(float * vbodptr, int width, int height) { dim3 fullBlocksPerGrid((int)ceil(float(numParticles)/float(blockSize))); hipLaunchKernelGGL(( sendToVBO), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numParticles, particles, vbodptr, width, height, scene_scale,LockNum); hipDeviceSynchronize(); } void freeCuda(){ hipFree(particles); hipFree(neighbors); hipFree(num_neighbors); hipFree(grid_idx); hipFree(grid); }
54f081b288845fe30684df16ce5c552e3d254f72.cu
#include <stdio.h> #include <cuda.h> #include <cmath> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/replace.h> #include <thrust/functional.h> //#include <thrust\device_vector.h> #include "utilities.h" #include "kernel.h" #include "gridStruct.h" #include "smallObjLoader.h" #include "Macros.h" //GLOBALS dim3 threadsPerBlock(blockSize); int totalGridSize = (2 * (BOX_X + 2)) * (2 * (BOX_Y + 2)) * (BOX_Z + 2); int numParticles; string MeshFileName;// = "..\\..\\Models\\bunny_fu_low2.obj"; __device__ int LockNum=1000; vec3 gravity(.0f); int numGenerated; const float scene_scale = 1; //size of the height map in simulation space particle* particles; int* neighbors; int* num_neighbors; int* grid_idx; int* grid; bool hitonce = false; float wallMove = 0.0f; bool cleanupFixedPoints=false; bool ExtForceSet = false; rigidbodyObj rigtest; vec4* rigPredictedPos; mat3* rigPredictedRot; mat3 rigRotMat; using namespace glm; struct particleMassCenter{ __host__ __device__ vec4 operator()(const particle& x) const{ return x.position; } }; struct particlePredictMassCenter{ __host__ __device__ vec4 operator()(const particle& x) const{ return x.pred_position; } }; void setLockNum(int x){ LockNum=x; } void setGravity(const vec3& g){ ExtForceSet = true; gravity = g; } void setMeshFile(string s){ MeshFileName = s; } void checkCUDAError(const char *msg, int line = -1) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { if( line >= 0 ) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } } __device__ bool Conditions(int index, int N, int LockNum){ return index < N&&index>=LockNum; } __device__ bool ParticleConditions(int index, int N, particle* p, int LockNum, int LayerMask){ return index < N && (p[index].LayerMask&LayerMask)&&!(p[index].LayerMask&FROZEN); //return index<N && (p[index].ID>LockNum); } __host__ __device__ unsigned int devhash(unsigned int a){ a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a; } //Function that generates static. __host__ __device__ glm::vec3 generateRandomNumberFromThread(float time, int index) { thrust::default_random_engine rng(devhash(index*time)); thrust::uniform_real_distribution<float> u01(0,1); return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng)); } //Update the vertex buffer object //(The VBO is where OpenGL looks for the positions for the planets) __global__ void sendToVBO(int N, particle* particles, float * vbo, int width, int height, float s_scale, unsigned int LockNum) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale_w = 1.0f; float c_scale_h = 1.0f; float c_scale_z = 1.0f; if(index<N) { vbo[4*index+0] = particles[index].position.x*c_scale_w; vbo[4*index+1] = particles[index].position.y*c_scale_h; vbo[4*index+2] = particles[index].position.z*c_scale_z; //the w component is used as a selector of render color vbo[4 * index + 3] = (particles[index].ID >= LockNum)?1.0f:0.0f; } } /************************************* * Device Methods for Solver *************************************/ __device__ float wPoly6Kernel(glm::vec3 p_i, glm::vec3 p_j){ vec3 r(p_i-p_j); if (length(r) > H) return 0.000001f; return 315.0f / (64.0f * PI_FLOAT * POW9(H)) * CUBE(SQR(H) - dot(r, r)); } __device__ glm::vec3 wGradientSpikyKernel(glm::vec3 p_i, glm::vec3 p_j){ glm::vec3 r = p_i - p_j; float hr_term = H - glm::length(r); if (hr_term < 0.0f) return vec3(0.0f); float gradient_magnitude = 45.0f / (PI * POW_H_6) * hr_term * hr_term; float div = (glm::length(r) + 0.001f); return gradient_magnitude * 1.0f / div * r; } __device__ float calculateRo(particle* particles, glm::vec3 p, int* p_neighbors, int p_num_neighbors, int index){ glm::vec3 p_j; float ro = 0.0f; for(int i = 0; i < p_num_neighbors; i++){ glm::vec3 p_j(particles[p_neighbors[i + index * MAX_NEIGHBORS]].pred_position); double kv=wPoly6Kernel(p,p_j); if (kv < K_EPSILON) kv = 0.0f; ro+=kv; } return ro; } __device__ glm::vec3 calculateCiGradient(glm::vec3 p_i, glm::vec3 p_j){ //glm::vec3 Ci = -1.0f / float(REST_DENSITY) * wGradientSpikyKernel(p_i, p_j); //vec3 p_j((*pit)->PredictedPos); //if(Particles[i]->id>(*pit)->id) continue; //Ci=pow(spikyGradient(p-p_j,core_radius).Length()/material.rest_density,2); //sum_gradients+=C_i_gradient; return wGradientSpikyKernel(p_i,p_j)/REST_DENSITY; } __device__ glm::vec3 calculateCiGradientAti(particle* particles, glm::vec3 p_i, int* neighbors, int p_num_neighbors, int index){ glm::vec3 accum = glm::vec3(0.0f); for(int i = 0; i < p_num_neighbors; i++){ accum += wGradientSpikyKernel(p_i, glm::vec3(particles[neighbors[i + index * MAX_NEIGHBORS]].pred_position)); } glm::vec3 Ci = 1.0f / float(REST_DENSITY) * accum; return Ci; } /************************************* * Finding Neighboring Particles *************************************/ struct GridElement{ particle *particles[4*MAX_NEIGHBORS]; //int lock; int size; GridElement(){ //particles = new particle[MAX_NEIGHBORS]; //lock = 0; size = 0; } }; GridElement *grid_elements; int *grid_lock; //__device__ GridElement *sleeping_grid_elements; const int grid_width = 2 * BOX_X / H + 1; const int grid_depth = 2 * BOX_Y / H + 1; const int grid_height = BOX_Z / H + 1; __device__ int grid_index(int i, int j, int k){ if (i < 0 || i >= grid_width || j < 0 || j >= grid_depth || k < 0 || k >= grid_height) return -1; return grid_width*(k*grid_depth + j) + i; } //__device__ GridElement & gridContains(int i, int j, int k){ // return grid_elements[grid_index(i, j, k)]; //} __device__ void add2grid(GridElement *target_grid, particle* p, int* grid_lock){ bool wait = true; int i = (int)(float(p->pred_position[0] + BOX_X) / H); i = clamp(i, 0, grid_width - 1); int j = (int)(float(p->pred_position[1] + BOX_Y) / H); j = clamp(j, 0, grid_depth - 1); int k = (int)(float(p->pred_position[2]) / H); k = clamp(k, 0, grid_height - 1); int id = grid_index(i, j, k); while (wait){ if (0 == atomicExch(&(grid_lock[id]), 1)){ int size = target_grid[id].size; if (size<MAX_NEIGHBORS) target_grid[grid_index(i, j, k)].particles[size++] = p; target_grid[id].size = size; grid_lock[id] = 0; wait = false; } else if (target_grid[id].size >= MAX_NEIGHBORS) { //printf("Too many particles in one grid!!!!"); wait = false; } } } __global__ void update_grid(GridElement* grid_elements, particle* particles, int N,int* grid_lock){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N){ add2grid(grid_elements, &particles[index],grid_lock); } } __global__ void clearHistory(GridElement* grid_elements, int totalGridSize){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < totalGridSize){ //printf("I'm good %d\n", index); grid_elements[index].size = 0; } } __global__ void findParticleNeighbors(GridElement* grid_elements, particle* particles, int* neighbors, int* num_neighbors, int N, int LockNum){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index, N, particles, LockNum,FLUID)){ num_neighbors[index] = 0; particle p = particles[index]; int i = (int)(float(p.pred_position[0] + BOX_X) / H); i = clamp(i, 0, grid_width - 1); int j = (int)(float(p.pred_position[1] + BOX_Y) / H); j = clamp(j, 0, grid_depth - 1); int k = (int)(float(p.pred_position[2]) / H); k = clamp(k, 0, grid_height - 1); int id = grid_index(i, j, k); int neighborsNum = 0; int offset[] = {0,-1, 1 }; for (int ioff = 0, iiter = i; ioff < 3; iiter = i + offset[++ioff]){ if (iiter < 0 || iiter >= grid_width) continue; for (int joff = 0, jiter = j ; joff < 3; jiter = j + offset[++joff]){ if (jiter<0 || jiter>grid_depth - 1) continue; for (int koff = 0, kiter = k ; koff < 3; kiter = k + offset[++koff]){ if (kiter<0 || kiter>grid_height - 1) continue; GridElement* thisGrid = &grid_elements[grid_index(iiter, jiter, kiter)]; int thisGridSize = thisGrid->size; for (int pi = 0; pi < thisGridSize&&neighborsNum<MAX_NEIGHBORS; pi++){ particle piter = *thisGrid->particles[pi]; if (p.ID == piter.ID) continue; if (length(p.pred_position - piter.pred_position) < H) neighbors[p.ID*MAX_NEIGHBORS + neighborsNum++] = piter.ID; } } } } num_neighbors[index] = neighborsNum; } } void findParticleNeighborsWrapper(particle* particles, int* neighbors, int* num_neighbors, int N, int LockNum){ dim3 fullBlocksPerGrid((int)ceil(float(grid_width*grid_depth*grid_height) / float(blockSize))); dim3 fullBlocksPerGridParticles((int)ceil(float(N) / float(blockSize))); //printf("fullblockPerGrid x=%d y=%d z=%d\n", fullBlocksPerGrid.x, fullBlocksPerGrid.y, fullBlocksPerGrid.z); clearHistory << <fullBlocksPerGrid, blockSize >> >(grid_elements, grid_width*grid_depth*grid_height); checkCUDAErrorWithLine("clearGrid failed!"); update_grid << <fullBlocksPerGridParticles, blockSize >> >(grid_elements, particles, N,grid_lock); checkCUDAErrorWithLine("findParticleGridIndex failed!"); findParticleNeighbors << <fullBlocksPerGridParticles, blockSize >> >(grid_elements, particles, neighbors, num_neighbors, N, LockNum); checkCUDAErrorWithLine("findKNearestNeighbors failed!"); } // // //// Clears grid from previous neighbors //__global__ void clearGrid(int* grid, int totalGridSize){ // int index = threadIdx.x + (blockIdx.x * blockDim.x); // if(index < totalGridSize){ // grid[index] = -1; // } //} // // //// Matches each particles the grid index for the cell in which the particle resides //__global__ void findParticleGridIndex(particle* particles, int* grid_idx, int N){ // int index = threadIdx.x + (blockIdx.x * blockDim.x); // if(index < N){ // int x, y, z; // glm::vec4 p = particles[index].pred_position; // x = int(p.x) + BOX_X + 2; // y = int(p.y) + BOX_Y + 2; // z = int(p.z) + 2; // grid_idx[index] = x + (2 * (BOX_X + 2) * y) + (4 * (BOX_X + 2) * (BOX_Y + 2) * z); // } //} // //// Matches the sorted index to each of the cells //__global__ void matchParticleToCell(int* gridIdx, int* grid, int N, int totalGridSize){ // int index = threadIdx.x + (blockIdx.x * blockDim.x); // if(index < N){ // if(index == 0){ // grid[gridIdx[index]] = index; // }else if(gridIdx[index] != gridIdx[index - 1]){ // if(gridIdx[index] >= 0 && gridIdx[index] < totalGridSize) grid[gridIdx[index]] = index; // } // } //} // //// Finds the nearest K neighbors within the smoothing kernel radius //__global__ void findKNearestNeighbors(particle* particles, int* gridIdx, int* grid, int* neighbors, int* num_neighbors, int N, int totalGridSize,int LockNum){ // int index = threadIdx.x + (blockIdx.x * blockDim.x); // if (ParticleConditions(index, N, particles, LockNum)){ // int heap_size = 0; // int x,y,z,idx; // float r; // glm::vec4 p_j, p = particles[index].pred_position; // // // Find particle index // x = int(p.x) + BOX_X + 2; // y = int(p.y) + BOX_Y + 2; // z = int(p.z) + 2; // // float max; // int m, max_index, begin, cell_position; // // // Examine all cells within radius // // NOTE: checks the cube that circumscribes the spherical smoothing kernel // for(int i = int(-H + z); i <= int(H + z); i++){ // for(int j = int(-H + y); j <= int(H + y); j++){ // for(int k = int(-H + x); k <= int(H + x); k++){ // idx = k + (2 * (BOX_X + 2) * j) + (4 * (BOX_X + 2) * (BOX_Y + 2) * i); // // if(idx >= totalGridSize || idx < 0){ // continue; // } // // begin = grid[idx]; // // if(begin < 0) continue; // // cell_position = begin; // while(cell_position < N && gridIdx[begin] == gridIdx[cell_position]){ // if(cell_position == index){ // ++cell_position; // continue; // } // p_j = particles[cell_position].pred_position; // r = glm::length(p - p_j); // // if(heap_size < MAX_NEIGHBORS){ // if(r < H){ // neighbors[index * MAX_NEIGHBORS + heap_size] = cell_position; // ++heap_size; // } // }else{ // max = glm::length(p - particles[neighbors[index * MAX_NEIGHBORS]].pred_position); // max_index = 0; // for(m = 1; m < heap_size; m++){ // float d = glm::length(p - particles[neighbors[index * MAX_NEIGHBORS + m]].pred_position); // if(d > max){ // max = d; // max_index = m; // } // } // // if(r < max && r < H){ // neighbors[index * MAX_NEIGHBORS + max_index] = cell_position; // } // } // // ++cell_position; // } // } // } // } // num_neighbors[index] = heap_size; // } //} // //// Wrapper to find neighbors using hash grid //void findNeighbors(particle* particles, int* grid_idx, int* grid, int* neighbors, int N,int LockNum){ // dim3 fullBlocksPerGrid((int)ceil(float(totalGridSize) / float(blockSize))); // dim3 fullBlocksPerGridParticles((int)ceil(float(N)/float(blockSize))); // // // Clear Grid // clearGrid<<<fullBlocksPerGrid, blockSize>>>(grid, totalGridSize); // checkCUDAErrorWithLine("clearGrid failed!"); // // // Match particle to index // findParticleGridIndex<<<fullBlocksPerGridParticles, blockSize>>>(particles, grid_idx, N); // checkCUDAErrorWithLine("findParticleGridIndex failed!"); // // // Cast to device pointers // thrust::device_ptr<int> t_grid_idx = thrust::device_pointer_cast(grid_idx); // thrust::device_ptr<particle> t_particles = thrust::device_pointer_cast(particles); // // // Sort by key // thrust::sort_by_key(t_grid_idx, t_grid_idx + N, t_particles); // checkCUDAErrorWithLine("thrust failed!"); // // // Match sorted particle index // matchParticleToCell<<<fullBlocksPerGridParticles, blockSize>>>(grid_idx, grid, N, totalGridSize); // checkCUDAErrorWithLine("matchParticletoCell failed!"); // // // Find K nearest neighbors // findKNearestNeighbors<<<fullBlocksPerGridParticles, blockSize>>>(particles, grid_idx, grid, neighbors, num_neighbors, N, totalGridSize,LockNum); // checkCUDAErrorWithLine("findKNearestNeighbors failed!"); //} /************************************* * Kernels for Jacobi Solver *************************************/ __global__ void calculateLambda(particle* particles, int* neighbors, int* num_neighbors, int N,int LockNum){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(ParticleConditions(index,N,particles,LockNum,FLUID)){ int k = num_neighbors[index]; glm::vec3 p = glm::vec3(particles[index].pred_position); float p_i = calculateRo(particles, p, neighbors, k, index); float C_i = (p_i / REST_DENSITY) - 1.0f; float C_i_gradient, sum_gradients = 0.0f; for(int i = 0; i < k; i++){ // Calculate gradient when k = j C_i_gradient = glm::length(calculateCiGradient(p, glm::vec3(particles[neighbors[i + index * MAX_NEIGHBORS]].pred_position))); sum_gradients += (C_i_gradient * C_i_gradient); } // Add gradient when k = i C_i_gradient = glm::length(calculateCiGradientAti(particles, p, neighbors, k, index)); sum_gradients += (C_i_gradient * C_i_gradient); float sumCi = sum_gradients + RELAXATION; particles[index].lambda = -1.0f * (C_i / sumCi); } } __global__ void calculateDeltaPi(particle* particles, int* neighbors, int* num_neighbors, int N,int LockNum){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (ParticleConditions(index, N, particles, LockNum,FLUID)){ int k = num_neighbors[index]; glm::vec3 p = glm::vec3(particles[index].pred_position); float l = particles[index].lambda; glm::vec3 delta = glm::vec3(0.0f); int p_j_idx; #if PRESSURE == 1 float k_term; glm::vec3 d_q = DELTA_Q * glm::vec3(1.0f) + p; #endif float s_corr = 0.0f; for(int i = 0; i < k; i++){ p_j_idx = neighbors[i + index * MAX_NEIGHBORS]; #if PRESSURE == 1 float poly6pd_q = wPoly6Kernel(p, d_q); if(poly6pd_q < EPSILON) k_term = 0.0f; else k_term = wPoly6Kernel(p, glm::vec3(particles[p_j_idx].pred_position)) / poly6pd_q; s_corr = -1.0f * PRESSURE_K * pow(k_term, PRESSURE_N); #endif delta += (l + particles[p_j_idx].lambda + s_corr) * wGradientSpikyKernel(p, glm::vec3(particles[p_j_idx].pred_position)); } particles[index].delta_pos = 1.0f / REST_DENSITY * delta; } } __global__ void calculateCurl(particle* particles, int* neighbors, int* num_neighbors, int N,int LockNum){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (ParticleConditions(index, N, particles, LockNum,FLUID)){ int k = num_neighbors[index]; glm::vec3 p = glm::vec3(particles[index].pred_position); glm::vec3 v = particles[index].velocity; int j_idx; glm::vec3 v_ij, gradient, accum = glm::vec3(0.0f); for(int i = 0; i < k; i++){ j_idx = neighbors[i + index * MAX_NEIGHBORS]; v_ij = particles[j_idx].velocity - v; gradient = wGradientSpikyKernel(p, glm::vec3(particles[j_idx].pred_position)); accum += glm::cross(v_ij, gradient); } particles[index].curl = accum; } } __global__ void applyVorticity(particle* particles, int* neighbors, int* num_neighbors, int N,int LockNum){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(ParticleConditions(index,N,particles,LockNum,FLUID)){ int k = num_neighbors[index]; glm::vec3 p = glm::vec3(particles[index].pred_position); glm::vec3 w = particles[index].curl; int j_idx; float mag_w; glm::vec3 r, grad = glm::vec3(0.0f); for(int i = 0; i < k; i++){ j_idx = neighbors[i + index * MAX_NEIGHBORS]; r = glm::vec3(particles[j_idx].pred_position) - p; mag_w = glm::length(particles[j_idx].curl - w); grad.x += mag_w / r.x; grad.y += mag_w / r.y; grad.z += mag_w / r.z; } glm::vec3 vorticity, N; N = 1.0f/(glm::length(grad) + .001f) * grad; vorticity = float(RELAXATION) * (glm::cross(N, w)); particles[index].external_forces += vorticity; } } __global__ void initializeParticles(int N, particle* particles,int LockNum=INT_MAX) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; float gravity = -9.8f; if (Conditions(index, N, LockNum)) { particle p = particles[index]; glm::vec3 rand = (generateRandomNumberFromThread(1.0f, index)-0.5f); p.ID=index; p.LayerMask = FLUID; p.position.x = (index%20)-9.5f; p.position.y = ((index/20)%20)-9.5f; p.position.z = (index/400)+10.0f+0.05f*rand.z; p.position.w = 1.0f; //p.position=glm::vec4(index%9-3.5f,(index/9)%20-9.5f,5.0f+index/180,1.0f); p.pred_position = p.position; p.velocity = glm::vec3(0.0f); p.external_forces = glm::vec3(0.0f,0.0f,gravity); particles[index] = p; } else if(index<N){ particle p=particles[index]; p.ID=index; //p.LayerMask = CONTAINER; p.velocity=glm::vec3(0.0f); p.external_forces=glm::vec3(0.0f,0.0f,gravity); particles[index]=p; } } __global__ void setExternalForces(int N, particle* particles, int LockNum,vec3 extForce){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index,N,particles,LockNum,FLUID|RIGID_BODY)){ particles[index].external_forces = extForce; } } //Simple Euler integration scheme __global__ void applyExternalForces(int N, float dt, particle* particles,int LockNum) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index,N,particles,LockNum,FLUID|RIGID_BODY)){ particle p = particles[index]; p.velocity+=dt*p.external_forces; p.delta_pos=glm::vec3(0.0f); p.pred_position=p.position+vec4(p.velocity*dt,0.0); particles[index] = p; } } __global__ void updatePosition(int N, particle* particles,int LockNum=0) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index, N, particles, LockNum,FLUID|RIGID_BODY)){ //if (length(particles[index].position - particles[index].pred_position) > frozenDistance) particles[index].position = particles[index].pred_position; } /*if (index < N){ particles[index].LayerMask &= ~FROZEN; }*/ //if(particles[index].ID<=LockNum){ // particles[index].velocity=vec3(0.0f); // particles[index].curl=vec3(0.0f); // //particles[index].external_forces=vec3(0.0f); //} } __global__ void updatePredictedPosition(int N, particle* particles,int LockNum=0) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index, N, particles, LockNum,FLUID)){ particles[index].pred_position += glm::vec4(particles[index].delta_pos,0.0f); } } __global__ void updateVelocity(int N, particle* particles, float dt,int LockNum) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index,N,particles,LockNum,FLUID|RIGID_BODY)){ particles[index].velocity = glm::vec3((1.0f/dt)*(particles[index].pred_position - particles[index].position)); if (length(particles[index].velocity) > 20.0f) particles[index].velocity = 20.0f*normalize(particles[index].velocity); /*if (length(particles[index].velocity) < frozenDistance) particles[index].LayerMask |= FROZEN;*/ } } __global__ void boxCollisionResponse(int N, particle* particles, int LockNum){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index,N,particles,LockNum,FLUID|RIGID_BODY)){ vec3 randv = generateRandomNumberFromThread(N, index); if( particles[index].pred_position.z < 0.0f){ particles[index].pred_position.z = 0.001f*randv.z+0.01f; glm::vec3 normal = glm::vec3(0,0,1); particles[index].velocity.z = collision_restitution*abs(particles[index].velocity.z); } if( particles[index].pred_position.z > BOX_Z){ particles[index].pred_position.z = BOX_Z - 0.001f*randv.z-0.01f; glm::vec3 normal = glm::vec3(0,0,-1); particles[index].velocity.z = -collision_restitution*abs(particles[index].velocity.z); } if( particles[index].pred_position.y < -BOX_Y){ particles[index].pred_position.y = -BOX_Y + 0.001f*randv.y+0.01f; glm::vec3 normal = glm::vec3(0,1,0); particles[index].velocity.y = collision_restitution*abs(particles[index].velocity.y); } if( particles[index].pred_position.y > BOX_Y){ particles[index].pred_position.y = BOX_Y - 0.001f*randv.y-0.01f; glm::vec3 normal = glm::vec3(0,-1,0); particles[index].velocity.y = -collision_restitution*abs(particles[index].velocity.y); } if( particles[index].pred_position.x < -BOX_X){ particles[index].pred_position.x = -BOX_X + 0.001f*randv.x+0.01f; glm::vec3 normal = glm::vec3(1,0,0); particles[index].velocity.x = collision_restitution*abs(particles[index].velocity.x); } if( particles[index].pred_position.x > BOX_X){ particles[index].pred_position.x = BOX_X - 0.001f*randv.x-0.01f; glm::vec3 normal = glm::vec3(-1,0,0); particles[index].velocity.x = -collision_restitution*abs(particles[index].velocity.x); } } } /************************************* * shape matching * *************************************/ void jacobiRotate(mat3 &A, mat3 &R, int p, int q){ // rotates A through phi in pq-plane to set A(p,q) = 0 // rotation stored in R whose columns are eigenvectors of A float d = (A[p][p] - A[q][q]) / (2.0f*A[p][q]); float t = 1.0f / (abs(d) + sqrt(d*d + 1.0f)); if (d < 0.0f) t = -t; float c = 1.0f / sqrt(t*t + 1.0f); float s = t*c; A[p][p] += t*A[p][q]; A[q][q] -= t*A[p][q]; A[p][q] = A[q][p] = 0.0f; //transform A int k; for (k = 0; k < 3; k++){ if (k != p&&k != q){ float Akp = c*A[k][p] + s*A[k][q]; float Akq = -s*A[k][p] + c*A[k][q]; A[k][p] = A[p][k] = Akp; A[k][q] = A[q][k] = Akq; } } //store rotation in R for (k = 0; k < 3; k++){ float Rkp = c*R[k][p] + s*R[k][q]; float Rkq = -s*R[k][p] + c*R[k][q]; R[k][p] = Rkp; R[k][q] = Rkq; } } void eigenDecompposition(mat3 &outA, mat3& outR){ //only for symmetric matrices! //A=RA'R^T, where A' is diagnoal and R orthonormal //identity; mat3 A(outA); mat3 R = mat3(0.0f); R[0][0] = R[1][1] = R[2][2] = 1.0f; /*mat3 view = A; printf("view[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]);*/ int iter = 0; while (iter < JACOBI_ITERATIONS){ int p, q; float a, maxval; maxval = -1.0f; for (int i = 0; i < 2; i++){ for (int j = i+1; j < 3; j++){ a = abs(A[i][j]); if (maxval<0.0f || a>maxval){ p = i; q = j; maxval = a; } } } //all small enough->done if (maxval < 0.0001f) break; //rotate matrix with respect to that element jacobiRotate(A, R, p, q); /*printf("---------------------------------------------\n"); view = A; printf("A[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]); view = R; printf("R[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]);*/ iter++; } outA = A; outR = R; } void polarDecomposition(mat3 A, mat3 &R, mat3 &S){ //A=RS, where S is symmetric and R is orthonormal //-> S=(A^T A)^(1/2) //mat3 A, R, S; //A = mat3(vec3(1.0f,-.3333f,.959f),vec3(.495f,1.0f,0.0f),vec3(.5f,-.247f,1.5f)); //identity; R = mat3(0.0f); R[0][0] = R[1][1] = R[2][2] = 1.0f; mat3 ATA(0.0f); ATA = glm::transpose(A)*A; mat3 view = transpose(A); /*printf("AT[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]);*/ mat3 U; eigenDecompposition(ATA, U); view = U; /*printf("QT[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]);*/ float l0 = ATA[0][0]; ATA[0][0] = l0 = l0 <= 0.0f ? 0.0f : sqrt(l0); float l1 = ATA[1][1]; ATA[1][1]=l1 = l1 <= 0.0f ? 0.0f : sqrt(l1); float l2 = ATA[2][2]; ATA[2][2]=l2 = l2 <= 0.0f ? 0.0f : sqrt(l2); view = ATA; /*printf("ATA[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]);*/ view = transpose(U)*ATA*U; /*printf("U[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]);*/ mat3 S1=inverse(view); /*S1[0][0] = l0*U[0][0] * U[0][0] + l1*U[0][1] * U[0][1] + l2*U[0][2] * U[0][2]; S1[0][1] = l0*U[0][0] * U[1][0] + l1*U[0][1] * U[1][1] + l2*U[0][2] * U[1][2]; S1[0][2] = l0*U[0][0] * U[2][0] + l1*U[0][1] * U[2][1] + l2*U[0][2] * U[2][2]; S1[1][0] = S1[0][1]; S1[1][1] = l0*U[1][0] * U[1][0] + l1*U[1][1] * U[1][1] + l2*U[1][2] * U[1][2]; S1[1][2] = l0*U[1][0] * U[2][0] + l1*U[1][1] * U[2][1] + l2*U[1][2] * U[2][2]; S1[2][0] = S1[0][2]; S1[2][1] = S1[1][2]; S1[2][2] = l0*U[2][0] * U[2][0] + l1*U[2][1] * U[2][1] + l2*U[2][2] * U[2][2];*/ R = A*S1; for (int i = 0; i < 3; i++) for (int j = 0; j < 3; j++) if (abs(R[i][j] < 0.001f)) R[i][j] = 0.0f; R[0] = normalize(R[0]); R[1] = normalize(R[1]); R[2] = normalize(R[2]); view = R; /*printf("view[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]);*/ S = transpose(R)*A; } __global__ void SetGoalPosition(particle* particles, int N, mat3 R, vec4 MassCenter0, vec4 MassCenter1){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index,N,particles,0,RIGID_BODY)){ particles[index].pred_position=vec4(R*(vec3(particles[index].position - MassCenter0) + vec3(MassCenter1)),1.0f); } } __global__ void MassCenterPredictedPosition(vec4* rigPredictedPos, particle* particles, int N, int startID){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index, N, particles, 0, RIGID_BODY)){ rigPredictedPos[particles[index].ID - startID] = particles[index].pred_position; } } __global__ void MassCenterPredictedMatrix(mat3* rigPredictedRot, particle* particles, int N, int startID, vec4 oldMC, vec4 newMC){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (ParticleConditions(index, N, particles, 0, RIGID_BODY)){ vec3 p = vec3(particles[index].position - oldMC); vec3 q = vec3(particles[index].pred_position - newMC); rigPredictedRot[particles[index].ID - startID] = mat3(q.x*p, q.y*p, q.z*p); } } /************************************* * Wrappers for the __global__ calls * *************************************/ //Initialize memory, update some globals void initCuda(int N) { numParticles = N; numGenerated = 0; dim3 fullBlocksPerGrid((int)ceil(float(N)/float(blockSize))); //mat3 A, R, S; //polarDecomposition << <1, 1 >> >(); LockNum = 0; cudaMalloc((void**)&particles, N * sizeof(particle)); SmallObjMesh som(MeshFileName); LockNum+=som.position.size(); printf("%d Vertices\n",LockNum); SmallObjMesh rigidtest("D:\\workspace\\PhysiAnim\\FinalProj\\Position_Based_Fluids\\PBF_Stage2\\Ref\\Models\\cont.obj"); rigtest.start = LockNum; rigtest.size = rigidtest.position.size(); LockNum += rigidtest.position.size(); printf("%d Vertices\n", LockNum); particle* par=new particle[LockNum/*som.position.size()*/]; for (int i = 0; i<som.position.size(); i++){ par[i].position=vec4(som.position[i]+vec3(0.0,0.0,10.0),1.0); par[i].pred_position = par[i].position; par[i].LayerMask = CONTAINER; } if (rigidtest.position.size() > 0){ rigtest.ID = 0; rigtest.size = rigidtest.position.size(); rigtest.newMassCenter = rigtest.oldMassCenter = vec4(0.0); } vec4 testrig(0.0); for (int i = som.position.size(); i < som.position.size() + rigidtest.position.size(); i++){ par[i].position = vec4(rigidtest.position[i - som.position.size()]+vec3(0,0,70.0f), 1.0f); par[i].pred_position = par[i].position; testrig += par[i].position; par[i].LayerMask = RIGID_BODY; } if (LockNum > N){ printf("The mesh file need %d particles but the total particle number is set to %d!\n", LockNum, N); printf("Program down!\n"); exit(-1); } if(LockNum>0){ cudaMemcpy(particles,par,LockNum*sizeof(particle),cudaMemcpyHostToDevice); } //delete [] par; checkCUDAErrorWithLine("particles cudamalloc failed"); cudaMalloc((void**)&neighbors, MAX_NEIGHBORS*N*sizeof(int)); cudaMalloc((void**)&num_neighbors, N*sizeof(int)); cudaMalloc((void**)&grid_idx, N*sizeof(int)); checkCUDAErrorWithLine("grid idx cudamalloc failed!"); cudaMalloc((void**)&grid, totalGridSize*sizeof(int)); checkCUDAErrorWithLine("grid cudamalloc failed!"); cudaMalloc((void**)&grid_elements, grid_width*grid_depth*grid_height*sizeof(GridElement)); cudaMalloc((void**)&grid_lock, grid_width*grid_depth*grid_height*sizeof(int)); cudaMemset(grid_lock, 0, grid_width*grid_depth*grid_height*sizeof(int)); checkCUDAErrorWithLine("grid_elements cudamalloc failed!"); cudaMalloc((void**)&rigPredictedPos, rigidtest.position.size()*sizeof(vec4)); cudaMalloc((void**)&rigPredictedRot, rigidtest.position.size()*sizeof(mat3)); initializeParticles<<<fullBlocksPerGrid, blockSize>>>(N, particles,LockNum); MassCenterPredictedPosition << <fullBlocksPerGrid, blockSize >> >(rigPredictedPos,particles, N, som.position.size()); thrust::device_ptr<vec4> begin(rigPredictedPos); //begin+=som.position.size(); thrust::device_ptr<vec4> end = begin + rigtest.size; //end += rigidtest.position.size(); rigtest.oldMassCenter = thrust::reduce(begin, end,vec4(0.0), thrust::plus<vec4>())/rigtest.size; rigtest.oldMassCenter.w = 1.0f; printf("zhehuo: [%f, %f, %f]\n", rigtest.oldMassCenter.x, rigtest.oldMassCenter.y, rigtest.oldMassCenter.z); printf("tester: [%f, %f, %f]\n", testrig.x, testrig.y, testrig.z); checkCUDAErrorWithLine("Kernel failed!"); cudaThreadSynchronize(); } void cudaPBFUpdateWrapper(float dt) { dim3 fullBlocksPerGrid((int)ceil(float(numParticles)/float(blockSize))); int innerLockNum=0; if(cleanupFixedPoints&&innerLockNum!=LockNum){ //cleanup<<<fullBlocksPerGrid, blockSize>>>(numParticles,particles,inn } innerLockNum=LockNum; /*if (ExtForceSet){ ExtForceSet = false; setExternalForces << < fullBlocksPerGrid, blockSize >> >(numParticles, particles,innerLockNum,gravity); }*/ //printf("Good\n"); applyExternalForces << <fullBlocksPerGrid, blockSize >> >(numParticles, dt, particles, innerLockNum); checkCUDAErrorWithLine("applyExternalForces failed!"); //findNeighbors(particles, grid_idx, grid, neighbors, numParticles,innerLockNum); findParticleNeighborsWrapper(particles, neighbors, num_neighbors, numParticles, innerLockNum); checkCUDAErrorWithLine("findNeighbors failed!"); boxCollisionResponse << <fullBlocksPerGrid, blockSize >> >(numParticles, particles, innerLockNum); MassCenterPredictedPosition << <fullBlocksPerGrid, blockSize >> >(rigPredictedPos, particles, numParticles, rigtest.start); checkCUDAErrorWithLine("MassCenterPredictedPosition failed!"); thrust::device_ptr<vec4> begin(rigPredictedPos); thrust::device_ptr<vec4> end = begin + rigtest.size; rigtest.newMassCenter = thrust::reduce(begin, end, vec4(0.0), thrust::plus<vec4>())/rigtest.size; rigtest.newMassCenter.w = 1.0f; //printf("mass center: [%f, %f, %f]\n", rigtest.newMassCenter.x, rigtest.newMassCenter.y, rigtest.newMassCenter.z); MassCenterPredictedMatrix << <fullBlocksPerGrid, blockSize >> >(rigPredictedRot, particles, numParticles, rigtest.start, rigtest.oldMassCenter, rigtest.newMassCenter); checkCUDAErrorWithLine("MassCenterPredictedMatrix failed!"); thrust::device_ptr<mat3> rotBegin(rigPredictedRot); thrust::device_ptr<mat3> rotEnd = rotBegin + rigtest.size; rigRotMat = thrust::reduce(rotBegin, rotEnd, mat3(0.0), thrust::plus<mat3>()); mat3 rotationDecomposition; mat3 scaleDecomposition; //mat3 view = rigRotMat; /*printf("view[3][3]=\n[%f,%f,%f]\n[%f,%f,%f]\n[%f,%f,%f]\n", view[0][0], view[0][1], view[0][2], view[1][0], view[1][1], view[1][2], view[2][0], view[2][1], view[2][2]);*/ polarDecomposition(rigRotMat, rotationDecomposition, scaleDecomposition); checkCUDAErrorWithLine("polarDecomposition failed!"); SetGoalPosition << <fullBlocksPerGrid, blockSize >> >(particles, numParticles, rotationDecomposition, rigtest.oldMassCenter, rigtest.newMassCenter); checkCUDAErrorWithLine("SetGoalPosition failed!"); MassCenterPredictedPosition << <fullBlocksPerGrid, blockSize >> >(rigPredictedPos, particles, numParticles, rigtest.start); checkCUDAErrorWithLine("MassCenterPredictedPosition failed!"); begin=thrust::device_ptr<vec4>(rigPredictedPos); end = begin + rigtest.size; rigtest.oldMassCenter = thrust::reduce(begin, end, vec4(0.0), thrust::plus<vec4>()) / rigtest.size; rigtest.oldMassCenter.w = 1.0f; for(int i = 0; i < SOLVER_ITERATIONS; i++){ calculateLambda<<<fullBlocksPerGrid, blockSize>>>(particles, neighbors, num_neighbors, numParticles,innerLockNum); calculateDeltaPi<<<fullBlocksPerGrid, blockSize>>>(particles, neighbors, num_neighbors, numParticles,innerLockNum); //PEFORM COLLISION DETECTION AND RESPONSE updatePredictedPosition<<<fullBlocksPerGrid, blockSize>>>(numParticles, particles,innerLockNum); } updateVelocity << <fullBlocksPerGrid, blockSize >> >(numParticles, particles, dt, innerLockNum); calculateCurl << <fullBlocksPerGrid, blockSize >> >(particles, neighbors, num_neighbors, numParticles, innerLockNum); applyVorticity << <fullBlocksPerGrid, blockSize >> >(particles, neighbors, num_neighbors, numParticles, innerLockNum); updatePosition<<<fullBlocksPerGrid, blockSize>>>(numParticles, particles,innerLockNum); checkCUDAErrorWithLine("updatePosition failed!"); cudaThreadSynchronize(); } void cudaUpdateVBO(float * vbodptr, int width, int height) { dim3 fullBlocksPerGrid((int)ceil(float(numParticles)/float(blockSize))); sendToVBO<<<fullBlocksPerGrid, blockSize>>>(numParticles, particles, vbodptr, width, height, scene_scale,LockNum); cudaThreadSynchronize(); } void freeCuda(){ cudaFree(particles); cudaFree(neighbors); cudaFree(num_neighbors); cudaFree(grid_idx); cudaFree(grid); }
88dd2fc1da0f22f3c1486924a1a0da7b1897a21c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> extern "C" void dot_acc(int*, int*, int*, int, int); extern "C" void dot(int*, int*, int*, int, int); int main() { int i, j, m, n; int *A, *B, *C, *D; int *A_d, *B_d, *C_d; srand(0); m = 4098; n = 4098; A = (int*) malloc( m*n * sizeof(int)); B = (int*) malloc( m*n * sizeof(int)); C = (int*) malloc( m * sizeof(int)); D = (int*) malloc( m * sizeof(int)); for( i = 0; i < m; i++ ) { for( j = 0; j < n; j++ ) { A[i*n+j] = rand() % 100 + 1; B[i*n+j] = rand() % 100 + 1; } } hipMalloc((void **)&A_d, m*n*sizeof(int)); hipMalloc((void **)&B_d, m*n*sizeof(int)); hipMalloc((void **)&C_d, m* sizeof(int)); hipMemcpy(A_d, A, m*n*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(B_d, B, m*n*sizeof(int), hipMemcpyHostToDevice); dot_acc(A_d,B_d,C_d,m,n); hipMemcpy(C, C_d, m*sizeof(int), hipMemcpyDeviceToHost); hipFree(A_d); hipFree(B_d); hipFree(C_d); dot(A,B,D,m,n); for( i = 0; i < m; i++ ) { if( C[i] != D[i] ) { printf("Error at index %i\n", i); return 0; } } printf("Program finished sucessfully.\n"); return 0; }
88dd2fc1da0f22f3c1486924a1a0da7b1897a21c.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> extern "C" void dot_acc(int*, int*, int*, int, int); extern "C" void dot(int*, int*, int*, int, int); int main() { int i, j, m, n; int *A, *B, *C, *D; int *A_d, *B_d, *C_d; srand(0); m = 4098; n = 4098; A = (int*) malloc( m*n * sizeof(int)); B = (int*) malloc( m*n * sizeof(int)); C = (int*) malloc( m * sizeof(int)); D = (int*) malloc( m * sizeof(int)); for( i = 0; i < m; i++ ) { for( j = 0; j < n; j++ ) { A[i*n+j] = rand() % 100 + 1; B[i*n+j] = rand() % 100 + 1; } } cudaMalloc((void **)&A_d, m*n*sizeof(int)); cudaMalloc((void **)&B_d, m*n*sizeof(int)); cudaMalloc((void **)&C_d, m* sizeof(int)); cudaMemcpy(A_d, A, m*n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(B_d, B, m*n*sizeof(int), cudaMemcpyHostToDevice); dot_acc(A_d,B_d,C_d,m,n); cudaMemcpy(C, C_d, m*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(A_d); cudaFree(B_d); cudaFree(C_d); dot(A,B,D,m,n); for( i = 0; i < m; i++ ) { if( C[i] != D[i] ) { printf("Error at index %i\n", i); return 0; } } printf("Program finished sucessfully.\n"); return 0; }
96e251088be1c078ce2847eb60d78a2e290a5700.hip
// !!! This is a file automatically generated by hipify!!! #include <gtest/gtest.h> #include <vector> #include <thrust/device_vector.h> #include "nvstrings/NVStrings.h" #include "./utils.h" struct TestConvert : public GdfTest{}; TEST_F(TestConvert, Hash) { std::vector<const char*> hstrs{ "thes", nullptr, "are", "the", "tst", "strings", "" }; NVStrings* strs = NVStrings::create_from_array(hstrs.data(),hstrs.size()); thrust::device_vector<unsigned int> results(hstrs.size(),0); strs->hash(results.data().get()); unsigned int expected[] = { 126208335, 0, 3771471008, 2967174367, 1378466566, 3184694146, 1257683291 }; for( int idx = 0; idx < (int) hstrs.size(); ++idx ) EXPECT_EQ(results[idx],expected[idx]); NVStrings::destroy(strs); } TEST_F(TestConvert, ToInteger) { std::vector<const char*> hstrs{"1234", nullptr, "-876", "543.2", "-0.12", ".55", "-.002", "", "de", "abc123", "123abc", "456e", "-1.78e+5"}; NVStrings* strs = NVStrings::create_from_array(hstrs.data(),hstrs.size()); { thrust::device_vector<int> results(hstrs.size(),0); strs->stoi(results.data().get()); int expected[] = { 1234, 0, -876, 543, 0, 0, 0, 0, 0, 0, 123, 456, -1 }; for( int idx = 0; idx < (int) hstrs.size(); ++idx ) EXPECT_EQ(results[idx],expected[idx]); } { thrust::device_vector<long> results(hstrs.size(),0); strs->stol(results.data().get()); long expected[] = { 1234, 0, -876, 543, 0, 0, 0, 0, 0, 0, 123, 456, -1 }; for( int idx = 0; idx < (int) hstrs.size(); ++idx ) EXPECT_EQ(results[idx],expected[idx]); } NVStrings::destroy(strs); } TEST_F(TestConvert, FromInteger) { { int values[] = {100, 987654321, -12761, 0, 5, -4}; thrust::device_vector<int> results(6); hipMemcpy( results.data().get(), values, 6*sizeof(int), hipMemcpyHostToDevice); NVStrings* got = NVStrings::itos(results.data().get(),6); const char* expected[] = { "100", "987654321", "-12761", "0", "5", "-4" }; EXPECT_TRUE( verify_strings(got,expected)); NVStrings::destroy(got); } { long values[] = {100000, 9876543210, -1276100, 0, 5, -4}; thrust::device_vector<long> results(6); hipMemcpy( results.data().get(), values, 6*sizeof(long), hipMemcpyHostToDevice); NVStrings* got = NVStrings::ltos(results.data().get(),6); const char* expected[] = { "100000", "9876543210", "-1276100", "0", "5", "-4" }; EXPECT_TRUE( verify_strings(got,expected)); NVStrings::destroy(got); } } TEST_F(TestConvert, Hex) { std::vector<const char*> hstrs{"1234", nullptr, "98BEEF", "1a5", "CAFE", "2face"}; NVStrings* strs = NVStrings::create_from_array(hstrs.data(),hstrs.size()); thrust::device_vector<unsigned int> results(hstrs.size(),0); strs->htoi(results.data().get()); unsigned int expected[] = { 4660, 0, 10010351, 421, 51966, 195278 }; for( int idx = 0; idx < (int) hstrs.size(); ++idx ) EXPECT_EQ(results[idx],expected[idx]); NVStrings::destroy(strs); } TEST_F(TestConvert, ToFloat) { std::vector<const char*> hstrs{"1234", nullptr, "-876", "543.2", "-0.12", ".25", "-.002", "", "NaN", "abc123", "123abc", "456e", "-1.78e+5", "-122.33644782123456789", "12e+309" }; NVStrings* strs = NVStrings::create_from_array(hstrs.data(),hstrs.size()); { float nanval = std::numeric_limits<float>::quiet_NaN(); float infval = std::numeric_limits<float>::infinity(); thrust::device_vector<float> results(hstrs.size(),0); strs->stof(results.data().get()); float expected[] = { 1234.0, 0, -876.0, 543.2, -0.12, 0.25, -0.002, 0, nanval, 0, 123.0, 456.0, -178000.0, -122.33645, infval }; for( int idx = 0; idx < (int) hstrs.size(); ++idx ) { float fval1 = results[idx]; float fval2 = expected[idx]; if( std::isnan(fval1) ) EXPECT_TRUE( std::isnan(fval2) ); else if( std::isinf(fval1) ) EXPECT_TRUE( std::isinf(fval2) ); else EXPECT_FLOAT_EQ(fval1,fval2); } } { double nanval = std::numeric_limits<double>::quiet_NaN(); double infval = std::numeric_limits<double>::infinity(); thrust::device_vector<double> results(hstrs.size(),0); strs->stod(results.data().get()); double expected[] = { 1234.0, 0, -876.0, 543.2, -0.12, 0.25, -0.002, 0, nanval, 0, 123.0, 456.0, -178000.0, -122.33644782123469, infval }; for( int idx = 0; idx < (int) hstrs.size(); ++idx ) { double fval1 = results[idx]; double fval2 = expected[idx]; if( std::isnan(fval1) ) EXPECT_TRUE( std::isnan(fval2) ); else if( std::isinf(fval1) ) EXPECT_TRUE( std::isinf(fval2) ); else EXPECT_NEAR(fval1,fval2,1e-10); } } NVStrings::destroy(strs); } TEST_F(TestConvert, FromFloat) { { float values[] = {100, 654321.25, -12761.125, 0, 5, -4, std::numeric_limits<float>::quiet_NaN()}; thrust::device_vector<float> results(7); hipMemcpy( results.data().get(), values, 7*sizeof(float), hipMemcpyHostToDevice); NVStrings* got = NVStrings::ftos(results.data().get(),7); const char* expected[] = { "100.0", "654321.25", "-12761.125", "0.0", "5.0", "-4.0", "NaN" }; EXPECT_TRUE( verify_strings(got,expected)); NVStrings::destroy(got); } { double values[] = {0.0000012345, 65432125000, -12761.125, 0, 5, -4, std::numeric_limits<double>::infinity()}; thrust::device_vector<double> results(7); hipMemcpy( results.data().get(), values, 7*sizeof(double), hipMemcpyHostToDevice); NVStrings* got = NVStrings::dtos(results.data().get(),7); const char* expected[] = { "1.2345e-06", "6.5432125e+10", "-12761.125", "0.0", "5.0", "-4.0", "Inf" }; EXPECT_TRUE( verify_strings(got,expected)); NVStrings::destroy(got); } } TEST_F(TestConvert, ToBool) { std::vector<const char*> hstrs{"false", nullptr, "", "true", "True", "False"}; NVStrings* strs = NVStrings::create_from_array(hstrs.data(),hstrs.size()); thrust::device_vector<bool> results(hstrs.size(),0); strs->to_bools(results.data().get(), "true"); bool expected[] = { false, false, false, true, false, false }; for( int idx = 0; idx < (int) hstrs.size(); ++idx ) EXPECT_EQ(results[idx],expected[idx]); NVStrings::destroy(strs); } TEST_F(TestConvert, FromBool) { bool values[] = { true, false, false, true, true, true }; thrust::device_vector<bool> results(6); hipMemcpy( results.data().get(), values, 6*sizeof(bool), hipMemcpyHostToDevice); NVStrings* got = NVStrings::create_from_bools(results.data().get(),6, "true", "false"); const char* expected[] = { "true", "false", "false", "true", "true", "true" }; EXPECT_TRUE( verify_strings(got,expected)); NVStrings::destroy(got); } TEST_F(TestConvert, ToIPv4) { std::vector<const char*> hstrs{ nullptr, "", "hello", "41.168.0.1", "127.0.0.1", "41.197.0.1" }; NVStrings* strs = NVStrings::create_from_array(hstrs.data(),hstrs.size()); thrust::device_vector<unsigned int> results(hstrs.size(),0); strs->ip2int(results.data().get()); unsigned int expected[] = { 0,0,0, 698875905, 2130706433, 700776449 }; for( int idx = 0; idx < (int) hstrs.size(); ++idx ) EXPECT_EQ(results[idx],expected[idx]); NVStrings::destroy(strs); } TEST_F(TestConvert, FromIPv4) { unsigned values[] = { 3232235521, 167772161, 0, 0, 700055553, 700776449 }; thrust::device_vector<unsigned int> results(6); hipMemcpy( results.data().get(), values, 6*sizeof(unsigned int), hipMemcpyHostToDevice); NVStrings* got = NVStrings::int2ip(results.data().get(),6); const char* expected[] = { "192.168.0.1", "10.0.0.1", "0.0.0.0", "0.0.0.0", "41.186.0.1", "41.197.0.1" }; EXPECT_TRUE( verify_strings(got,expected)); NVStrings::destroy(got); }
96e251088be1c078ce2847eb60d78a2e290a5700.cu
#include <gtest/gtest.h> #include <vector> #include <thrust/device_vector.h> #include "nvstrings/NVStrings.h" #include "./utils.h" struct TestConvert : public GdfTest{}; TEST_F(TestConvert, Hash) { std::vector<const char*> hstrs{ "thesé", nullptr, "are", "the", "tést", "strings", "" }; NVStrings* strs = NVStrings::create_from_array(hstrs.data(),hstrs.size()); thrust::device_vector<unsigned int> results(hstrs.size(),0); strs->hash(results.data().get()); unsigned int expected[] = { 126208335, 0, 3771471008, 2967174367, 1378466566, 3184694146, 1257683291 }; for( int idx = 0; idx < (int) hstrs.size(); ++idx ) EXPECT_EQ(results[idx],expected[idx]); NVStrings::destroy(strs); } TEST_F(TestConvert, ToInteger) { std::vector<const char*> hstrs{"1234", nullptr, "-876", "543.2", "-0.12", ".55", "-.002", "", "de", "abc123", "123abc", "456e", "-1.78e+5"}; NVStrings* strs = NVStrings::create_from_array(hstrs.data(),hstrs.size()); { thrust::device_vector<int> results(hstrs.size(),0); strs->stoi(results.data().get()); int expected[] = { 1234, 0, -876, 543, 0, 0, 0, 0, 0, 0, 123, 456, -1 }; for( int idx = 0; idx < (int) hstrs.size(); ++idx ) EXPECT_EQ(results[idx],expected[idx]); } { thrust::device_vector<long> results(hstrs.size(),0); strs->stol(results.data().get()); long expected[] = { 1234, 0, -876, 543, 0, 0, 0, 0, 0, 0, 123, 456, -1 }; for( int idx = 0; idx < (int) hstrs.size(); ++idx ) EXPECT_EQ(results[idx],expected[idx]); } NVStrings::destroy(strs); } TEST_F(TestConvert, FromInteger) { { int values[] = {100, 987654321, -12761, 0, 5, -4}; thrust::device_vector<int> results(6); cudaMemcpy( results.data().get(), values, 6*sizeof(int), cudaMemcpyHostToDevice); NVStrings* got = NVStrings::itos(results.data().get(),6); const char* expected[] = { "100", "987654321", "-12761", "0", "5", "-4" }; EXPECT_TRUE( verify_strings(got,expected)); NVStrings::destroy(got); } { long values[] = {100000, 9876543210, -1276100, 0, 5, -4}; thrust::device_vector<long> results(6); cudaMemcpy( results.data().get(), values, 6*sizeof(long), cudaMemcpyHostToDevice); NVStrings* got = NVStrings::ltos(results.data().get(),6); const char* expected[] = { "100000", "9876543210", "-1276100", "0", "5", "-4" }; EXPECT_TRUE( verify_strings(got,expected)); NVStrings::destroy(got); } } TEST_F(TestConvert, Hex) { std::vector<const char*> hstrs{"1234", nullptr, "98BEEF", "1a5", "CAFE", "2face"}; NVStrings* strs = NVStrings::create_from_array(hstrs.data(),hstrs.size()); thrust::device_vector<unsigned int> results(hstrs.size(),0); strs->htoi(results.data().get()); unsigned int expected[] = { 4660, 0, 10010351, 421, 51966, 195278 }; for( int idx = 0; idx < (int) hstrs.size(); ++idx ) EXPECT_EQ(results[idx],expected[idx]); NVStrings::destroy(strs); } TEST_F(TestConvert, ToFloat) { std::vector<const char*> hstrs{"1234", nullptr, "-876", "543.2", "-0.12", ".25", "-.002", "", "NaN", "abc123", "123abc", "456e", "-1.78e+5", "-122.33644782123456789", "12e+309" }; NVStrings* strs = NVStrings::create_from_array(hstrs.data(),hstrs.size()); { float nanval = std::numeric_limits<float>::quiet_NaN(); float infval = std::numeric_limits<float>::infinity(); thrust::device_vector<float> results(hstrs.size(),0); strs->stof(results.data().get()); float expected[] = { 1234.0, 0, -876.0, 543.2, -0.12, 0.25, -0.002, 0, nanval, 0, 123.0, 456.0, -178000.0, -122.33645, infval }; for( int idx = 0; idx < (int) hstrs.size(); ++idx ) { float fval1 = results[idx]; float fval2 = expected[idx]; if( std::isnan(fval1) ) EXPECT_TRUE( std::isnan(fval2) ); else if( std::isinf(fval1) ) EXPECT_TRUE( std::isinf(fval2) ); else EXPECT_FLOAT_EQ(fval1,fval2); } } { double nanval = std::numeric_limits<double>::quiet_NaN(); double infval = std::numeric_limits<double>::infinity(); thrust::device_vector<double> results(hstrs.size(),0); strs->stod(results.data().get()); double expected[] = { 1234.0, 0, -876.0, 543.2, -0.12, 0.25, -0.002, 0, nanval, 0, 123.0, 456.0, -178000.0, -122.33644782123469, infval }; for( int idx = 0; idx < (int) hstrs.size(); ++idx ) { double fval1 = results[idx]; double fval2 = expected[idx]; if( std::isnan(fval1) ) EXPECT_TRUE( std::isnan(fval2) ); else if( std::isinf(fval1) ) EXPECT_TRUE( std::isinf(fval2) ); else EXPECT_NEAR(fval1,fval2,1e-10); } } NVStrings::destroy(strs); } TEST_F(TestConvert, FromFloat) { { float values[] = {100, 654321.25, -12761.125, 0, 5, -4, std::numeric_limits<float>::quiet_NaN()}; thrust::device_vector<float> results(7); cudaMemcpy( results.data().get(), values, 7*sizeof(float), cudaMemcpyHostToDevice); NVStrings* got = NVStrings::ftos(results.data().get(),7); const char* expected[] = { "100.0", "654321.25", "-12761.125", "0.0", "5.0", "-4.0", "NaN" }; EXPECT_TRUE( verify_strings(got,expected)); NVStrings::destroy(got); } { double values[] = {0.0000012345, 65432125000, -12761.125, 0, 5, -4, std::numeric_limits<double>::infinity()}; thrust::device_vector<double> results(7); cudaMemcpy( results.data().get(), values, 7*sizeof(double), cudaMemcpyHostToDevice); NVStrings* got = NVStrings::dtos(results.data().get(),7); const char* expected[] = { "1.2345e-06", "6.5432125e+10", "-12761.125", "0.0", "5.0", "-4.0", "Inf" }; EXPECT_TRUE( verify_strings(got,expected)); NVStrings::destroy(got); } } TEST_F(TestConvert, ToBool) { std::vector<const char*> hstrs{"false", nullptr, "", "true", "True", "False"}; NVStrings* strs = NVStrings::create_from_array(hstrs.data(),hstrs.size()); thrust::device_vector<bool> results(hstrs.size(),0); strs->to_bools(results.data().get(), "true"); bool expected[] = { false, false, false, true, false, false }; for( int idx = 0; idx < (int) hstrs.size(); ++idx ) EXPECT_EQ(results[idx],expected[idx]); NVStrings::destroy(strs); } TEST_F(TestConvert, FromBool) { bool values[] = { true, false, false, true, true, true }; thrust::device_vector<bool> results(6); cudaMemcpy( results.data().get(), values, 6*sizeof(bool), cudaMemcpyHostToDevice); NVStrings* got = NVStrings::create_from_bools(results.data().get(),6, "true", "false"); const char* expected[] = { "true", "false", "false", "true", "true", "true" }; EXPECT_TRUE( verify_strings(got,expected)); NVStrings::destroy(got); } TEST_F(TestConvert, ToIPv4) { std::vector<const char*> hstrs{ nullptr, "", "hello", "41.168.0.1", "127.0.0.1", "41.197.0.1" }; NVStrings* strs = NVStrings::create_from_array(hstrs.data(),hstrs.size()); thrust::device_vector<unsigned int> results(hstrs.size(),0); strs->ip2int(results.data().get()); unsigned int expected[] = { 0,0,0, 698875905, 2130706433, 700776449 }; for( int idx = 0; idx < (int) hstrs.size(); ++idx ) EXPECT_EQ(results[idx],expected[idx]); NVStrings::destroy(strs); } TEST_F(TestConvert, FromIPv4) { unsigned values[] = { 3232235521, 167772161, 0, 0, 700055553, 700776449 }; thrust::device_vector<unsigned int> results(6); cudaMemcpy( results.data().get(), values, 6*sizeof(unsigned int), cudaMemcpyHostToDevice); NVStrings* got = NVStrings::int2ip(results.data().get(),6); const char* expected[] = { "192.168.0.1", "10.0.0.1", "0.0.0.0", "0.0.0.0", "41.186.0.1", "41.197.0.1" }; EXPECT_TRUE( verify_strings(got,expected)); NVStrings::destroy(got); }
272e007c0cb8ecbf4b3c75ac8490971d3078b0c4.hip
// !!! This is a file automatically generated by hipify!!! #ifdef _WIN32 #include <windows.h> #endif #include <thrust/copy.h> #include <thrust/device_vector.h> #include <cuda_gl_interop.h> #include "vtkScalarsToColors.h" #include "vtkPistonDataObject.h" #include "vtkPistonDataWrangling.h" #include "vtkPistonScalarsColors.h" #include "vtkPistonMinMax.h" #include "vtkPistonReference.h" #include "vtkgl.h" #include <iostream> using namespace std; namespace vtkpiston { bool AlmostEqualRelativeAndAbs(float A, float B, float maxDiff, float maxRelDiff) { // Check if the numbers are really close -- needed // when comparing numbers near zero. float diff = fabs(A - B); if (diff <= maxDiff) return true; A = fabs(A); B = fabs(B); float largest = (B > A) ? B : A; if (diff <= largest * maxRelDiff) return true; return false; } template <typename ValueType> struct color_map : thrust::unary_function<ValueType, float3> { const ValueType min; const ValueType max; const int size; float *table; const int numberOfChanels; color_map(float *rtable, int arrSize, int noOfChanels, ValueType rMin, ValueType rMax) : min(rMin), max(rMax), size((arrSize / noOfChanels) - 1), table(rtable), numberOfChanels(noOfChanels) { } __host__ __device__ float3 operator()(ValueType val) { int index = 0; if((max - min) > 0.0) { index = ( (val - min) / (max - min) ) * size; } if (index < 0) index = 0; if (index > size) index = size; index *= numberOfChanels; float3 color; if(numberOfChanels == 1) { color = make_float3(table[index], table[index], table[index]); } else if(numberOfChanels == 2) { color = make_float3(table[index], table[index + 1], 0.0f); } else if(numberOfChanels == 3) { color = make_float3(table[index], table[index + 1], table[index + 2]); } else { // Not supported } return color; } }; //------------------------------------------------------------------------------ void CudaGLInit() { hipDeviceProp_t prop; int dev; // Fill it with zeros memset(&prop,0,sizeof(hipDeviceProp_t)); // Pick a GPU capable of 1.0 or better prop.major=1; prop.minor=0; hipChooseDevice(&dev,&prop); // Set OpenGL device hipError_t res = hipGLSetGLDevice(dev); if (res != hipSuccess) { cerr << "Set device failed ... " << hipGetErrorString(res) << endl; return; } } //------------------------------------------------------------------------------ void CudaRegisterBuffer(struct cudaGraphicsResource **vboResource, GLuint vboBuffer) { hipError_t res = hipGraphicsGLRegisterBuffer(vboResource, vboBuffer, hipGraphicsMapFlagsWriteDiscard); if (res != hipSuccess) { cerr << "Register buffer failed ... " << hipGetErrorString(res) << endl; return; } } //------------------------------------------------------------------------------ void CudaUnregisterResource(struct cudaGraphicsResource *vboResource) { hipError_t res = hipGraphicsUnregisterResource(vboResource); if (res != hipSuccess) { cerr << "Unregister buffer failed ... " << hipGetErrorString(res) << endl; return; } } //------------------------------------------------------------------------------ void CudaTransferToGL(vtkPistonDataObject *id, vtkMTimeType dataObjectMTimeCache, vtkPistonScalarsColors *psc, cudaGraphicsResource **vboResources, bool &hasNormals, bool &hasColors) { vtkPistonReference *tr = id->GetReference(); if (tr->type != VTK_POLY_DATA || tr->data == NULL) { // Type mismatch, don't bother trying return; } vtk_polydata *pD = (vtk_polydata *)tr->data; // Claim access to buffer for cuda hipError_t res; res = hipGraphicsMapResources(3, vboResources, 0); if (res != hipSuccess) { cerr << "Claim for CUDA failed ... " << hipGetErrorString(res) << endl; return; } size_t num_bytes; float *vertexBufferData, *normalsBufferData; float3 *colorsBufferData; res = hipGraphicsResourceGetMappedPointer ((void **)&vertexBufferData, &num_bytes, vboResources[0]); if(res != hipSuccess) { cerr << "Get mappedpointer for vertices failed ... " << hipGetErrorString(res) << endl; return; } res = hipGraphicsResourceGetMappedPointer ((void **)&normalsBufferData, &num_bytes, vboResources[1]); if(res != hipSuccess) { cerr << "Get mappedpointer for normals failed ... " << hipGetErrorString(res) << endl; return; } res = hipGraphicsResourceGetMappedPointer ((void **)&colorsBufferData, &num_bytes, vboResources[2]); if(res != hipSuccess) { cerr << "Get mappedpointer for colors failed ... " << hipGetErrorString(res) << endl; return; } // Copy on card verts to the shared on card gl buffer thrust::copy(pD->points->begin(), pD->points->end(), thrust::device_ptr<float>(vertexBufferData)); hasNormals = false; if (pD->normals) { hasNormals = true; // Copy on card verts to the shared on card gl buffer thrust::copy(pD->normals->begin(), pD->normals->end(), thrust::device_ptr<float>(normalsBufferData)); } hasColors = false; if (pD->scalars) { double scalarRange[2]; id->GetScalarsRange(scalarRange); hasColors = true; if(id->GetMTime() > dataObjectMTimeCache) { vtkPiston::minmax_pair<float> result = vtkPiston::find_min_max( pD->scalars); scalarRange[0] = static_cast<double>(result.min_val); scalarRange[1] = static_cast<double>(result.max_val); // Set parameters to compute scalars colors const int numvalues = 256; id->SetScalarsRange(scalarRange); psc->SetTableRange(scalarRange[0], scalarRange[1]); psc->SetNumberOfValues(numvalues); } std::vector<float> *colors = psc->ComputeScalarsColorsf(VTK_RGB); // Copy to GPU thrust::device_vector<float> onGPU(colors->begin(), colors->end()); float *raw_ptr = thrust::raw_pointer_cast(&onGPU[0]); // Now run each scalar data through the map to choose a color for it // \NOTE: Since GPU most likely going to calculate range using single // floating point precision, we may lose precision and hence, we need // to check if the range min and max are almost equal //TODO: Remove this when piston gives us exactly same values for //isocontour. float tempRange[2] = { static_cast<float>(scalarRange[0]), static_cast<float>(scalarRange[1]) }; if( AlmostEqualRelativeAndAbs(scalarRange[0], scalarRange[1], numeric_limits<float>::epsilon(), numeric_limits<float>::epsilon() * 10) ) { tempRange[1] = tempRange[0]+1.0; } color_map<float> colorMap(raw_ptr, onGPU.size(), VTK_RGB, tempRange[0], tempRange[1]); thrust::copy(thrust::make_transform_iterator(pD->scalars->begin(), colorMap), thrust::make_transform_iterator(pD->scalars->end(), colorMap), thrust::device_ptr<float3>(colorsBufferData)); } // Allow GL to access again res = hipGraphicsUnmapResources(3, vboResources, 0); if (res != hipSuccess) { cerr << "Release from CUDA failed ... " << hipGetErrorString(res) << endl; return; } return; } } //namespace
272e007c0cb8ecbf4b3c75ac8490971d3078b0c4.cu
#ifdef _WIN32 #include <windows.h> #endif #include <thrust/copy.h> #include <thrust/device_vector.h> #include <cuda_gl_interop.h> #include "vtkScalarsToColors.h" #include "vtkPistonDataObject.h" #include "vtkPistonDataWrangling.h" #include "vtkPistonScalarsColors.h" #include "vtkPistonMinMax.h" #include "vtkPistonReference.h" #include "vtkgl.h" #include <iostream> using namespace std; namespace vtkpiston { bool AlmostEqualRelativeAndAbs(float A, float B, float maxDiff, float maxRelDiff) { // Check if the numbers are really close -- needed // when comparing numbers near zero. float diff = fabs(A - B); if (diff <= maxDiff) return true; A = fabs(A); B = fabs(B); float largest = (B > A) ? B : A; if (diff <= largest * maxRelDiff) return true; return false; } template <typename ValueType> struct color_map : thrust::unary_function<ValueType, float3> { const ValueType min; const ValueType max; const int size; float *table; const int numberOfChanels; color_map(float *rtable, int arrSize, int noOfChanels, ValueType rMin, ValueType rMax) : min(rMin), max(rMax), size((arrSize / noOfChanels) - 1), table(rtable), numberOfChanels(noOfChanels) { } __host__ __device__ float3 operator()(ValueType val) { int index = 0; if((max - min) > 0.0) { index = ( (val - min) / (max - min) ) * size; } if (index < 0) index = 0; if (index > size) index = size; index *= numberOfChanels; float3 color; if(numberOfChanels == 1) { color = make_float3(table[index], table[index], table[index]); } else if(numberOfChanels == 2) { color = make_float3(table[index], table[index + 1], 0.0f); } else if(numberOfChanels == 3) { color = make_float3(table[index], table[index + 1], table[index + 2]); } else { // Not supported } return color; } }; //------------------------------------------------------------------------------ void CudaGLInit() { cudaDeviceProp prop; int dev; // Fill it with zeros memset(&prop,0,sizeof(cudaDeviceProp)); // Pick a GPU capable of 1.0 or better prop.major=1; prop.minor=0; cudaChooseDevice(&dev,&prop); // Set OpenGL device cudaError_t res = cudaGLSetGLDevice(dev); if (res != cudaSuccess) { cerr << "Set device failed ... " << cudaGetErrorString(res) << endl; return; } } //------------------------------------------------------------------------------ void CudaRegisterBuffer(struct cudaGraphicsResource **vboResource, GLuint vboBuffer) { cudaError_t res = cudaGraphicsGLRegisterBuffer(vboResource, vboBuffer, cudaGraphicsMapFlagsWriteDiscard); if (res != cudaSuccess) { cerr << "Register buffer failed ... " << cudaGetErrorString(res) << endl; return; } } //------------------------------------------------------------------------------ void CudaUnregisterResource(struct cudaGraphicsResource *vboResource) { cudaError_t res = cudaGraphicsUnregisterResource(vboResource); if (res != cudaSuccess) { cerr << "Unregister buffer failed ... " << cudaGetErrorString(res) << endl; return; } } //------------------------------------------------------------------------------ void CudaTransferToGL(vtkPistonDataObject *id, vtkMTimeType dataObjectMTimeCache, vtkPistonScalarsColors *psc, cudaGraphicsResource **vboResources, bool &hasNormals, bool &hasColors) { vtkPistonReference *tr = id->GetReference(); if (tr->type != VTK_POLY_DATA || tr->data == NULL) { // Type mismatch, don't bother trying return; } vtk_polydata *pD = (vtk_polydata *)tr->data; // Claim access to buffer for cuda cudaError_t res; res = cudaGraphicsMapResources(3, vboResources, 0); if (res != cudaSuccess) { cerr << "Claim for CUDA failed ... " << cudaGetErrorString(res) << endl; return; } size_t num_bytes; float *vertexBufferData, *normalsBufferData; float3 *colorsBufferData; res = cudaGraphicsResourceGetMappedPointer ((void **)&vertexBufferData, &num_bytes, vboResources[0]); if(res != cudaSuccess) { cerr << "Get mappedpointer for vertices failed ... " << cudaGetErrorString(res) << endl; return; } res = cudaGraphicsResourceGetMappedPointer ((void **)&normalsBufferData, &num_bytes, vboResources[1]); if(res != cudaSuccess) { cerr << "Get mappedpointer for normals failed ... " << cudaGetErrorString(res) << endl; return; } res = cudaGraphicsResourceGetMappedPointer ((void **)&colorsBufferData, &num_bytes, vboResources[2]); if(res != cudaSuccess) { cerr << "Get mappedpointer for colors failed ... " << cudaGetErrorString(res) << endl; return; } // Copy on card verts to the shared on card gl buffer thrust::copy(pD->points->begin(), pD->points->end(), thrust::device_ptr<float>(vertexBufferData)); hasNormals = false; if (pD->normals) { hasNormals = true; // Copy on card verts to the shared on card gl buffer thrust::copy(pD->normals->begin(), pD->normals->end(), thrust::device_ptr<float>(normalsBufferData)); } hasColors = false; if (pD->scalars) { double scalarRange[2]; id->GetScalarsRange(scalarRange); hasColors = true; if(id->GetMTime() > dataObjectMTimeCache) { vtkPiston::minmax_pair<float> result = vtkPiston::find_min_max( pD->scalars); scalarRange[0] = static_cast<double>(result.min_val); scalarRange[1] = static_cast<double>(result.max_val); // Set parameters to compute scalars colors const int numvalues = 256; id->SetScalarsRange(scalarRange); psc->SetTableRange(scalarRange[0], scalarRange[1]); psc->SetNumberOfValues(numvalues); } std::vector<float> *colors = psc->ComputeScalarsColorsf(VTK_RGB); // Copy to GPU thrust::device_vector<float> onGPU(colors->begin(), colors->end()); float *raw_ptr = thrust::raw_pointer_cast(&onGPU[0]); // Now run each scalar data through the map to choose a color for it // \NOTE: Since GPU most likely going to calculate range using single // floating point precision, we may lose precision and hence, we need // to check if the range min and max are almost equal //TODO: Remove this when piston gives us exactly same values for //isocontour. float tempRange[2] = { static_cast<float>(scalarRange[0]), static_cast<float>(scalarRange[1]) }; if( AlmostEqualRelativeAndAbs(scalarRange[0], scalarRange[1], numeric_limits<float>::epsilon(), numeric_limits<float>::epsilon() * 10) ) { tempRange[1] = tempRange[0]+1.0; } color_map<float> colorMap(raw_ptr, onGPU.size(), VTK_RGB, tempRange[0], tempRange[1]); thrust::copy(thrust::make_transform_iterator(pD->scalars->begin(), colorMap), thrust::make_transform_iterator(pD->scalars->end(), colorMap), thrust::device_ptr<float3>(colorsBufferData)); } // Allow GL to access again res = cudaGraphicsUnmapResources(3, vboResources, 0); if (res != cudaSuccess) { cerr << "Release from CUDA failed ... " << cudaGetErrorString(res) << endl; return; } return; } } //namespace
3b72f298b79385f10472cabfb2d537ed8cd06c3e.hip
// !!! This is a file automatically generated by hipify!!! #include <cv.h> #include <hip/hip_runtime.h> #include <highgui.h> #include <bits/stdc++.h> #define RED 2 #define GREEN 1 #define BLUE 0 #define MASK_WIDTH 3 #define BLOCK_SIZE 32 #define TILE_WIDTH BLOCK_SIZE + MASK_WIDTH - 1 #define gpu_error(ans) { gpu_assert((ans), __LINE__); } using namespace cv; using namespace std; __constant__ int d_maskc[MASK_WIDTH * MASK_WIDTH]; inline void gpu_assert(hipError_t code, int line){ if (code != hipSuccess) cerr<<"GPUerror: "<<hipGetErrorString(code)<<" in "<< line<<endl; } typedef unsigned char uchar; __host__ __device__ uchar sol(int i, int j) { i = (i < 0)? 0 : i; i = (i > 254)? 254 : i; j = (j < 0)? 0 : j; j = (j > 255)? 255 : j; int out = sqrt((double)(i*i + j*j)); return (out > 255)? 255 : out; } __global__ void D_grisesN(uchar *rgbImage, uchar *grayImage, int width, int height) { size_t i = blockIdx.y*blockDim.y+threadIdx.y; size_t j = blockIdx.x*blockDim.x+threadIdx.x; if((i < height) && (j < width)) grayImage[i*width + j] = rgbImage[(i*width + j)*3 + RED] * 0.299 + rgbImage[(i*width+ j)*3 + GREEN] * 0.587\ + rgbImage[(i*width + j)*3 + BLUE] * 0.114; } __host__ void D_grises(uchar *h_rgbImage, uchar *h_grayImage, int width, int height) { uchar *d_rgbImage, *d_grayImage; int size = sizeof(uchar) * width * height; gpu_error(hipMalloc(&d_rgbImage, size * 3 )); gpu_error(hipMemcpy(d_rgbImage, h_rgbImage, size * 3, hipMemcpyHostToDevice)); gpu_error(hipMalloc(&d_grayImage, size)); dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1); dim3 dimGrid(ceil(width/float(BLOCK_SIZE)),ceil(height/float(BLOCK_SIZE)),1); hipLaunchKernelGGL(( D_grisesN), dim3(dimGrid),dim3(dimBlock), 0, 0, d_rgbImage, d_grayImage, width, height); hipDeviceSynchronize(); gpu_error(hipMemcpy(h_grayImage, d_grayImage, size, hipMemcpyDeviceToHost) ); hipFree(d_rgbImage); hipFree(d_grayImage); } __host__ void H_grises(uchar *rgbImage, uchar *grayImage, int width, int height) { for(int i = 0; i < height; i++) { for(int j = 0; j < width; j++){ grayImage[i*width + j] = rgbImage[(i*width + j)*3 + RED] * 0.299 + rgbImage[(i*width+ j)*3 + GREEN] * 0.587\ + rgbImage[(i*width + j)*3 + BLUE] * 0.114; } } } __global__ void D_sobelN(uchar *grayImage, int *mask, uchar *sobelImage, int width, int height) { int tmp, s_row, s_col, pv1, pv2; size_t i = blockIdx.y*blockDim.y+threadIdx.y; size_t j = blockIdx.x*blockDim.x+threadIdx.x; if(i < height and j < width) { tmp = 0; pv1 = pv2 = 0; s_row = i - (MASK_WIDTH/2); s_col = j - (MASK_WIDTH/2); for(int mask_i = 0; mask_i < MASK_WIDTH; mask_i++) { for(int mask_j = 0; mask_j < MASK_WIDTH; mask_j++) { if(s_row + mask_i >= 0 and s_row + mask_i < height and s_col + mask_j >= 0 and s_col + mask_j < width) { tmp = (int)grayImage[(s_row+mask_i)*width +(s_col+mask_j)]; pv1 += tmp * mask[mask_i * MASK_WIDTH + mask_j]; pv2 += tmp * mask[mask_j * MASK_WIDTH + mask_i]; } } } sobelImage[i*width + j] = sol(pv1, pv2); } } __global__ void D_sobelC(uchar *grayImage, uchar *sobelImage, int width, int height) { int tmp, s_row, s_col, pv1, pv2; size_t i = blockIdx.y*blockDim.y+threadIdx.y; size_t j = blockIdx.x*blockDim.x+threadIdx.x; if(i < height and j < width) { tmp = 0; pv1 = pv2 = 0; s_row = i - (MASK_WIDTH/2); s_col = j - (MASK_WIDTH/2); for(int mask_i = 0; mask_i < MASK_WIDTH; mask_i++) { for(int mask_j = 0; mask_j < MASK_WIDTH; mask_j++) { if(s_row + mask_i >= 0 and s_row + mask_i < height and s_col + mask_j >= 0 and s_col + mask_j < width) { tmp = (int)grayImage[(s_row+mask_i)*width +(s_col+mask_j)]; pv1 += tmp * d_maskc[mask_i * MASK_WIDTH + mask_j]; pv2 += tmp * d_maskc[mask_j * MASK_WIDTH + mask_i]; } } } sobelImage[i*width + j] = sol(pv1, pv2); } } __global__ void D_sobelT(uchar *grayImage, uchar *sobelImage, int width, int height) { int tmp, pv1, pv2; __shared__ int tile[TILE_WIDTH][TILE_WIDTH]; int n = MASK_WIDTH/2; int row = blockIdx.y*blockDim.y+threadIdx.y - n; int col = blockIdx.x*blockDim.x+threadIdx.x - n; int trow = threadIdx.y; int tcol = threadIdx.x; //size_t ti = threadIdx.x * if(trow == 0 or trow == BLOCK_SIZE-1 or tcol == 0 or tcol == BLOCK_SIZE-1) { for(int i = 0; i < MASK_WIDTH; i++) { for(int j = 0; j < MASK_WIDTH and (i != n and j != n); j++) { if(row + i < 0 or col + j < 0 or row + i >= height or col + i >= width) tile[i + trow][j + tcol] = 0; else tile[i + trow][j + tcol] = grayImage[(row + i)*width + (col + j)]; } } } if(row < height and col < width) { row += n; col += n; tile[trow][tcol] = grayImage[row* width + col]; __syncthreads(); tmp = 0; pv1 = pv2 = 0; //s_row = trow - (MASK_WIDTH/2); //s_col = tcol - (MASK_WIDTH/2); for(int mask_i = 0; mask_i < MASK_WIDTH; mask_i++) { for(int mask_j = 0; mask_j < MASK_WIDTH; mask_j++) { if(trow + mask_i >= 0 and trow + mask_i < TILE_WIDTH and tcol + mask_j >= 0 and tcol + mask_j < TILE_WIDTH) { tmp = tile[trow + mask_i][tcol + mask_j]; pv1 += tmp * d_maskc[mask_i * MASK_WIDTH + mask_j]; pv2 += tmp * d_maskc[mask_j * MASK_WIDTH + mask_i]; } } } sobelImage[row*width + col] = sol(pv1, pv2); } } __host__ void D_sobel(uchar *grayImage, int mask[], uchar* sobelImage, int width, int height) { uchar *d_grayImage, *d_sobelImage; //int *d_mask; //global int size = sizeof(uchar) * width * height; gpu_error( hipMalloc(&d_grayImage, size) ); gpu_error( hipMemcpy(d_grayImage, grayImage, size, hipMemcpyHostToDevice)); gpu_error( hipMalloc(&d_sobelImage, size) ); gpu_error( hipMemcpyToSymbol(d_maskc, mask, MASK_WIDTH * MASK_WIDTH * sizeof(int)) ); //cache y tile //gpu_error( hipMalloc(&d_mask, MASK_WIDTH * MASK_WIDTH * sizeof(int)) ); //global //gpu_error( hipMemcpy(d_mask, mask, MASK_WIDTH * MASK_WIDTH * sizeof(int), hipMemcpyHostToDevice)); //global dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1); dim3 dimGrid(ceil(width/float(BLOCK_SIZE)),ceil(height/float(BLOCK_SIZE)),1); //D_sobelN<<<dimGrid,dimBlock>>>(d_grayImage, d_mask, d_sobelImage, width, height); //global //D_sobelC<<<dimGrid,dimBlock>>>(d_grayImage, d_sobelImage, width, height); //cache hipLaunchKernelGGL(( D_sobelT), dim3(dimGrid),dim3(dimBlock), 0, 0, d_grayImage, d_sobelImage, width, height); //cache hipDeviceSynchronize(); gpu_error(hipMemcpy(sobelImage, d_sobelImage, size, hipMemcpyDeviceToHost) ); hipFree(d_grayImage); hipFree(d_sobelImage); //hipFree(d_mask); //global } __host__ void H_sobel(uchar *grayImage, int mask[], uchar* sobelImage, int width, int height) { int tmp, s_row, s_col, pv1, pv2; for(int i = 0; i < height; i++) { for(int j = 0; j < width; j++) { tmp = 0; pv1 = pv2 = 0; s_row = i - (MASK_WIDTH/2); s_col = j - (MASK_WIDTH/2); for(int mask_i = 0; mask_i < MASK_WIDTH; mask_i++) { for(int mask_j = 0; mask_j < MASK_WIDTH; mask_j++) { if(s_row + mask_i >= 0 and s_row + mask_i < height and s_col + mask_j >= 0 and s_col + mask_j < width) { tmp = (int)grayImage[(s_row+mask_i)*width +(s_col+mask_j)]; pv1 += tmp * mask[mask_i * MASK_WIDTH + mask_j]; pv2 += tmp * mask[mask_j * MASK_WIDTH + mask_i]; } } } sobelImage[i*width + j] = sol(pv1, pv2); } } } int main( ) { Mat image; double promSec = 0.0, promPar = 0.0; uchar *dataimage, *grayimage, *sobelimage; image = imread( "img5.jpg",1); int Mask[] = {-1, 0, 1, -2 , 0, 2, -1 ,0 ,1}; dataimage = image.data; Mat gray_image, sobel_image; Size s = image.size(); int width = s.width; int height = s.height; int sizeGray = sizeof(uchar)*width*height; grayimage = (uchar *)malloc(sizeGray); sobelimage = (uchar *)malloc(sizeGray); int n = 10; while(n--){ clock_t t = clock(); H_grises(dataimage, grayimage, width, height); H_sobel(grayimage, Mask, sobelimage, width, height); promSec += (clock() - t)/(float)CLOCKS_PER_SEC; t = clock(); D_grises(dataimage, grayimage, width, height); D_sobel(grayimage, Mask, sobelimage, width, height); promPar += (clock() - t)/(float)CLOCKS_PER_SEC; } promSec /= 10; promPar /= 10; gray_image.create(height, width, CV_8UC1); gray_image.data = grayimage; imwrite("./Gray_Image.jpg",gray_image); sobel_image.create(height, width, CV_8UC1); sobel_image.data = sobelimage; imwrite("./Sobel_Image.jpg", sobel_image); cout<<"Secuencial:"<<endl; cout<<promSec<<endl; cout<<"Paralelo"<<endl; cout<<promPar<<endl; cout<<"Aceleracion"<<endl; cout<<promSec/promPar<<endl; return 0; }
3b72f298b79385f10472cabfb2d537ed8cd06c3e.cu
#include <cv.h> #include <cuda.h> #include <highgui.h> #include <bits/stdc++.h> #define RED 2 #define GREEN 1 #define BLUE 0 #define MASK_WIDTH 3 #define BLOCK_SIZE 32 #define TILE_WIDTH BLOCK_SIZE + MASK_WIDTH - 1 #define gpu_error(ans) { gpu_assert((ans), __LINE__); } using namespace cv; using namespace std; __constant__ int d_maskc[MASK_WIDTH * MASK_WIDTH]; inline void gpu_assert(cudaError_t code, int line){ if (code != cudaSuccess) cerr<<"GPUerror: "<<cudaGetErrorString(code)<<" in "<< line<<endl; } typedef unsigned char uchar; __host__ __device__ uchar sol(int i, int j) { i = (i < 0)? 0 : i; i = (i > 254)? 254 : i; j = (j < 0)? 0 : j; j = (j > 255)? 255 : j; int out = sqrt((double)(i*i + j*j)); return (out > 255)? 255 : out; } __global__ void D_grisesN(uchar *rgbImage, uchar *grayImage, int width, int height) { size_t i = blockIdx.y*blockDim.y+threadIdx.y; size_t j = blockIdx.x*blockDim.x+threadIdx.x; if((i < height) && (j < width)) grayImage[i*width + j] = rgbImage[(i*width + j)*3 + RED] * 0.299 + rgbImage[(i*width+ j)*3 + GREEN] * 0.587\ + rgbImage[(i*width + j)*3 + BLUE] * 0.114; } __host__ void D_grises(uchar *h_rgbImage, uchar *h_grayImage, int width, int height) { uchar *d_rgbImage, *d_grayImage; int size = sizeof(uchar) * width * height; gpu_error(cudaMalloc(&d_rgbImage, size * 3 )); gpu_error(cudaMemcpy(d_rgbImage, h_rgbImage, size * 3, cudaMemcpyHostToDevice)); gpu_error(cudaMalloc(&d_grayImage, size)); dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1); dim3 dimGrid(ceil(width/float(BLOCK_SIZE)),ceil(height/float(BLOCK_SIZE)),1); D_grisesN<<<dimGrid,dimBlock>>>(d_rgbImage, d_grayImage, width, height); cudaDeviceSynchronize(); gpu_error(cudaMemcpy(h_grayImage, d_grayImage, size, cudaMemcpyDeviceToHost) ); cudaFree(d_rgbImage); cudaFree(d_grayImage); } __host__ void H_grises(uchar *rgbImage, uchar *grayImage, int width, int height) { for(int i = 0; i < height; i++) { for(int j = 0; j < width; j++){ grayImage[i*width + j] = rgbImage[(i*width + j)*3 + RED] * 0.299 + rgbImage[(i*width+ j)*3 + GREEN] * 0.587\ + rgbImage[(i*width + j)*3 + BLUE] * 0.114; } } } __global__ void D_sobelN(uchar *grayImage, int *mask, uchar *sobelImage, int width, int height) { int tmp, s_row, s_col, pv1, pv2; size_t i = blockIdx.y*blockDim.y+threadIdx.y; size_t j = blockIdx.x*blockDim.x+threadIdx.x; if(i < height and j < width) { tmp = 0; pv1 = pv2 = 0; s_row = i - (MASK_WIDTH/2); s_col = j - (MASK_WIDTH/2); for(int mask_i = 0; mask_i < MASK_WIDTH; mask_i++) { for(int mask_j = 0; mask_j < MASK_WIDTH; mask_j++) { if(s_row + mask_i >= 0 and s_row + mask_i < height and s_col + mask_j >= 0 and s_col + mask_j < width) { tmp = (int)grayImage[(s_row+mask_i)*width +(s_col+mask_j)]; pv1 += tmp * mask[mask_i * MASK_WIDTH + mask_j]; pv2 += tmp * mask[mask_j * MASK_WIDTH + mask_i]; } } } sobelImage[i*width + j] = sol(pv1, pv2); } } __global__ void D_sobelC(uchar *grayImage, uchar *sobelImage, int width, int height) { int tmp, s_row, s_col, pv1, pv2; size_t i = blockIdx.y*blockDim.y+threadIdx.y; size_t j = blockIdx.x*blockDim.x+threadIdx.x; if(i < height and j < width) { tmp = 0; pv1 = pv2 = 0; s_row = i - (MASK_WIDTH/2); s_col = j - (MASK_WIDTH/2); for(int mask_i = 0; mask_i < MASK_WIDTH; mask_i++) { for(int mask_j = 0; mask_j < MASK_WIDTH; mask_j++) { if(s_row + mask_i >= 0 and s_row + mask_i < height and s_col + mask_j >= 0 and s_col + mask_j < width) { tmp = (int)grayImage[(s_row+mask_i)*width +(s_col+mask_j)]; pv1 += tmp * d_maskc[mask_i * MASK_WIDTH + mask_j]; pv2 += tmp * d_maskc[mask_j * MASK_WIDTH + mask_i]; } } } sobelImage[i*width + j] = sol(pv1, pv2); } } __global__ void D_sobelT(uchar *grayImage, uchar *sobelImage, int width, int height) { int tmp, pv1, pv2; __shared__ int tile[TILE_WIDTH][TILE_WIDTH]; int n = MASK_WIDTH/2; int row = blockIdx.y*blockDim.y+threadIdx.y - n; int col = blockIdx.x*blockDim.x+threadIdx.x - n; int trow = threadIdx.y; int tcol = threadIdx.x; //size_t ti = threadIdx.x * if(trow == 0 or trow == BLOCK_SIZE-1 or tcol == 0 or tcol == BLOCK_SIZE-1) { for(int i = 0; i < MASK_WIDTH; i++) { for(int j = 0; j < MASK_WIDTH and (i != n and j != n); j++) { if(row + i < 0 or col + j < 0 or row + i >= height or col + i >= width) tile[i + trow][j + tcol] = 0; else tile[i + trow][j + tcol] = grayImage[(row + i)*width + (col + j)]; } } } if(row < height and col < width) { row += n; col += n; tile[trow][tcol] = grayImage[row* width + col]; __syncthreads(); tmp = 0; pv1 = pv2 = 0; //s_row = trow - (MASK_WIDTH/2); //s_col = tcol - (MASK_WIDTH/2); for(int mask_i = 0; mask_i < MASK_WIDTH; mask_i++) { for(int mask_j = 0; mask_j < MASK_WIDTH; mask_j++) { if(trow + mask_i >= 0 and trow + mask_i < TILE_WIDTH and tcol + mask_j >= 0 and tcol + mask_j < TILE_WIDTH) { tmp = tile[trow + mask_i][tcol + mask_j]; pv1 += tmp * d_maskc[mask_i * MASK_WIDTH + mask_j]; pv2 += tmp * d_maskc[mask_j * MASK_WIDTH + mask_i]; } } } sobelImage[row*width + col] = sol(pv1, pv2); } } __host__ void D_sobel(uchar *grayImage, int mask[], uchar* sobelImage, int width, int height) { uchar *d_grayImage, *d_sobelImage; //int *d_mask; //global int size = sizeof(uchar) * width * height; gpu_error( cudaMalloc(&d_grayImage, size) ); gpu_error( cudaMemcpy(d_grayImage, grayImage, size, cudaMemcpyHostToDevice)); gpu_error( cudaMalloc(&d_sobelImage, size) ); gpu_error( cudaMemcpyToSymbol(d_maskc, mask, MASK_WIDTH * MASK_WIDTH * sizeof(int)) ); //cache y tile //gpu_error( cudaMalloc(&d_mask, MASK_WIDTH * MASK_WIDTH * sizeof(int)) ); //global //gpu_error( cudaMemcpy(d_mask, mask, MASK_WIDTH * MASK_WIDTH * sizeof(int), cudaMemcpyHostToDevice)); //global dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1); dim3 dimGrid(ceil(width/float(BLOCK_SIZE)),ceil(height/float(BLOCK_SIZE)),1); //D_sobelN<<<dimGrid,dimBlock>>>(d_grayImage, d_mask, d_sobelImage, width, height); //global //D_sobelC<<<dimGrid,dimBlock>>>(d_grayImage, d_sobelImage, width, height); //cache D_sobelT<<<dimGrid,dimBlock>>>(d_grayImage, d_sobelImage, width, height); //cache cudaDeviceSynchronize(); gpu_error(cudaMemcpy(sobelImage, d_sobelImage, size, cudaMemcpyDeviceToHost) ); cudaFree(d_grayImage); cudaFree(d_sobelImage); //cudaFree(d_mask); //global } __host__ void H_sobel(uchar *grayImage, int mask[], uchar* sobelImage, int width, int height) { int tmp, s_row, s_col, pv1, pv2; for(int i = 0; i < height; i++) { for(int j = 0; j < width; j++) { tmp = 0; pv1 = pv2 = 0; s_row = i - (MASK_WIDTH/2); s_col = j - (MASK_WIDTH/2); for(int mask_i = 0; mask_i < MASK_WIDTH; mask_i++) { for(int mask_j = 0; mask_j < MASK_WIDTH; mask_j++) { if(s_row + mask_i >= 0 and s_row + mask_i < height and s_col + mask_j >= 0 and s_col + mask_j < width) { tmp = (int)grayImage[(s_row+mask_i)*width +(s_col+mask_j)]; pv1 += tmp * mask[mask_i * MASK_WIDTH + mask_j]; pv2 += tmp * mask[mask_j * MASK_WIDTH + mask_i]; } } } sobelImage[i*width + j] = sol(pv1, pv2); } } } int main( ) { Mat image; double promSec = 0.0, promPar = 0.0; uchar *dataimage, *grayimage, *sobelimage; image = imread( "img5.jpg",1); int Mask[] = {-1, 0, 1, -2 , 0, 2, -1 ,0 ,1}; dataimage = image.data; Mat gray_image, sobel_image; Size s = image.size(); int width = s.width; int height = s.height; int sizeGray = sizeof(uchar)*width*height; grayimage = (uchar *)malloc(sizeGray); sobelimage = (uchar *)malloc(sizeGray); int n = 10; while(n--){ clock_t t = clock(); H_grises(dataimage, grayimage, width, height); H_sobel(grayimage, Mask, sobelimage, width, height); promSec += (clock() - t)/(float)CLOCKS_PER_SEC; t = clock(); D_grises(dataimage, grayimage, width, height); D_sobel(grayimage, Mask, sobelimage, width, height); promPar += (clock() - t)/(float)CLOCKS_PER_SEC; } promSec /= 10; promPar /= 10; gray_image.create(height, width, CV_8UC1); gray_image.data = grayimage; imwrite("./Gray_Image.jpg",gray_image); sobel_image.create(height, width, CV_8UC1); sobel_image.data = sobelimage; imwrite("./Sobel_Image.jpg", sobel_image); cout<<"Secuencial:"<<endl; cout<<promSec<<endl; cout<<"Paralelo"<<endl; cout<<promPar<<endl; cout<<"Aceleracion"<<endl; cout<<promSec/promPar<<endl; return 0; }
aa4ac8214c3717986823780b9fa58a7e8106e89e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "single_block_reduction.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *sumResults = NULL; hipMalloc(&sumResults, XSIZE*YSIZE); float *squareResults = NULL; hipMalloc(&squareResults, XSIZE*YSIZE); const size_t n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( single_block_reduction), dim3(gridBlock),dim3(threadBlock), 0, 0, sumResults,squareResults,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( single_block_reduction), dim3(gridBlock),dim3(threadBlock), 0, 0, sumResults,squareResults,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( single_block_reduction), dim3(gridBlock),dim3(threadBlock), 0, 0, sumResults,squareResults,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
aa4ac8214c3717986823780b9fa58a7e8106e89e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "single_block_reduction.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *sumResults = NULL; cudaMalloc(&sumResults, XSIZE*YSIZE); float *squareResults = NULL; cudaMalloc(&squareResults, XSIZE*YSIZE); const size_t n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); single_block_reduction<<<gridBlock,threadBlock>>>(sumResults,squareResults,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { single_block_reduction<<<gridBlock,threadBlock>>>(sumResults,squareResults,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { single_block_reduction<<<gridBlock,threadBlock>>>(sumResults,squareResults,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3bbc1df425d486b313e57d108ded03ce15d34bde.hip
// !!! This is a file automatically generated by hipify!!! #include "gpu_func.h" #include <hip/hip_runtime.h> #include "rocblas.h" #include <thrust/scan.h> #include <thrust/device_vector.h> #include <thrust/iterator/reverse_iterator.h> #include <thrust/device_ptr.h> #include <stdlib.h> typedef thrust::device_vector<numeric>::iterator Iterator; #define MAX_THREAD_PER_BLOCK 1024 void allocate_device_memory(cox_data &dev_data, cox_cache &dev_cache, cox_param &dev_param, int total_cases, int K, int p) { hipMalloc((void**)&dev_data.X, sizeof(numeric) *p * total_cases); hipMalloc((void**)&dev_data.censor, sizeof(numeric) * total_cases); hipMalloc((void**)&dev_data.rankmin, sizeof(int) * total_cases); hipMalloc((void**)&dev_data.rankmax, sizeof(int) * total_cases); hipMalloc((void**)&dev_cache.outer_accumu, sizeof(numeric) * total_cases); hipMalloc((void**)&dev_cache.eta, sizeof(numeric) * total_cases); hipMalloc((void**)&dev_cache.exp_eta, sizeof(numeric) * total_cases); hipMalloc((void**)&dev_cache.exp_accumu, sizeof(numeric) * total_cases); hipMalloc((void**)&dev_cache.residual, sizeof(numeric) * total_cases); hipMalloc((void**)&dev_cache.B_col_norm, sizeof(numeric) * p); hipMalloc((void**)&dev_cache.cox_val, sizeof(numeric) * K); hipMalloc((void**)&dev_param.B, sizeof(numeric) * K * p); hipMalloc((void**)&dev_param.v, sizeof(numeric) * K * p); hipMalloc((void**)&dev_param.grad, sizeof(numeric) * K * p); hipMalloc((void**)&dev_param.prev_B, sizeof(numeric) * K * p); hipMalloc((void**)&dev_param.grad_ls, sizeof(numeric) * K * p); hipMalloc((void**)&dev_param.penalty_factor, sizeof(numeric) * p); hipMalloc((void**)&dev_param.ls_result, sizeof(numeric) * 2); hipMalloc((void**)&dev_param.change, sizeof(numeric) * 1); } void free_device_memory(cox_data &dev_data, cox_cache &dev_cache, cox_param &dev_param) { hipFree(dev_data.X); hipFree(dev_data.censor); hipFree(dev_data.rankmax); hipFree(dev_data.rankmin); hipFree(dev_cache.outer_accumu); hipFree(dev_cache.eta); hipFree(dev_cache.exp_eta); hipFree(dev_cache.exp_accumu); hipFree(dev_cache.residual); hipFree(dev_cache.B_col_norm); hipFree(dev_cache.cox_val); hipFree(dev_param.B); hipFree(dev_param.v); hipFree(dev_param.grad); hipFree(dev_param.prev_B); hipFree(dev_param.penalty_factor); hipFree(dev_param.ls_result); hipFree(dev_param.grad_ls); hipFree(dev_param.change); } #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif // double atomicMax, copied from https://github.com/treecode/Bonsai/blob/master/runtime/profiling/derived_atomic_functions.h __device__ __forceinline__ double atomicMax(double *address, double val) { unsigned long long ret = __double_as_longlong(*address); while(val > __longlong_as_double(ret)) { unsigned long long old = ret; if((ret = atomicCAS((unsigned long long *)address, old, __double_as_longlong(val))) == old) break; } return __longlong_as_double(ret); } // float atomicMax __device__ __forceinline__ float atomicMax(float *address, float val) { int ret = __float_as_int(*address); while(val > __int_as_float(ret)) { int old = ret; if((ret = atomicCAS((int *)address, old, __float_as_int(val))) == old) break; } return __int_as_float(ret); } void compute_product(numeric *A, numeric *x, numeric *b, int n, int p, hipStream_t stream, hipblasHandle_t handle, hipblasOperation_t trans=HIPBLAS_OP_N) { numeric alpha = 1.0; numeric beta = 0.0; hipblasSetStream(handle, stream); hipblasDgemv(handle, trans, n, p, &alpha, A, n, x, 1, &beta, b, 1); } __global__ void apply_exp_gpu(const numeric *x, numeric *ex, int len) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid < len) { ex[tid] = exp(x[tid]); } } void apply_exp(const numeric *x, numeric *ex, int len, hipStream_t stream) { constexpr int num_thread = 128; int num_block = (len + num_thread - 1)/num_thread; hipLaunchKernelGGL(( apply_exp_gpu), dim3(num_block), dim3(num_thread), 0, stream, x, ex, len); } // do rev_cumsum of x and save it to y void rev_cumsum(numeric *x, numeric *y, int len, hipStream_t stream) { thrust::device_ptr<numeric> dptr_x = thrust::device_pointer_cast<numeric>(x); thrust::reverse_iterator<Iterator> r_x = make_reverse_iterator(dptr_x+len); thrust::device_ptr<numeric> dptr_y = thrust::device_pointer_cast<numeric>(y); thrust::reverse_iterator<Iterator> r_y = make_reverse_iterator(dptr_y+len); thrust::inclusive_scan(thrust::hip::par.on(stream), r_x, r_x+len, r_y); } __global__ void adjust_ties_gpu(const numeric *x, const int *rank, numeric *y, int len) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid < len) { y[tid] = x[rank[tid]]; } } // adjust rank of x and save it to y void adjust_ties(const numeric *x, const int *rank, numeric *y, int len , hipStream_t stream) { constexpr int num_thread = 128; int num_block = (len + num_thread - 1)/num_thread; hipLaunchKernelGGL(( adjust_ties_gpu), dim3(num_block), dim3(num_thread), 0, stream, x, rank, y, len); } __global__ void cwise_div_gpu(const numeric *x, const numeric *y, numeric *z, int len) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid < len) { z[tid] = x[tid]/y[tid]; } } // Compute x./y and save the result to z void cwise_div(const numeric *x, const numeric *y, numeric *z, int len, hipStream_t stream) { constexpr int num_thread = 128; int num_block = (len + num_thread - 1)/num_thread; hipLaunchKernelGGL(( cwise_div_gpu), dim3(num_block), dim3(num_thread), 0, stream, x, y, z, len); } void cumsum(numeric *x, int len, hipStream_t stream) { thrust::device_ptr<numeric> dev_ptr = thrust::device_pointer_cast(x); thrust::inclusive_scan(thrust::hip::par.on(stream), dev_ptr, dev_ptr+len, dev_ptr); } __global__ void mult_add_gpu(numeric *z, const numeric *a, const numeric *b, const numeric *c, int len) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid < len) { z[tid] = a[tid] * b[tid] - c[tid]; } } // Set z = a*b - c void mult_add(numeric *z, const numeric *a, const numeric *b, const numeric *c, int len,hipStream_t stream) { constexpr int num_thread = 128; int num_block = (len + num_thread - 1)/num_thread; hipLaunchKernelGGL(( mult_add_gpu), dim3(num_block), dim3(num_thread), 0, stream, z, a, b, c, len); } __global__ void coxval_gpu(const numeric *x, numeric *y, const numeric *z, numeric *val, int len) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i==0){ val[0] = 0.0; } if(i < len) { y[i] = (log(x[i]) - y[i]) * z[i]; } __shared__ numeric sdata[128]; sdata[threadIdx.x] = (i<len)?y[i]:0.0; __syncthreads(); // do reduction in shared mem for (int s=1; s < blockDim.x; s *=2) { int index = 2 * s * threadIdx.x;; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (threadIdx.x == 0){ atomicAdd(val,sdata[0]); } } // compute sum((log(x) - y) *z), x will be modified, result saved in val void get_coxvalue(const numeric *x, numeric *y, const numeric *z, numeric *val, int len, hipStream_t stream) { constexpr int num_thread = 128; int num_block = (len + num_thread - 1)/num_thread; hipLaunchKernelGGL(( coxval_gpu), dim3(num_block), dim3(num_thread), 0, stream, x, y, z, val, len); } __global__ void update_parameters_gpu(numeric *B, const numeric *v, const numeric *g, const numeric *penalty_factor, int K, int p,numeric step_size, numeric lambda_1, numeric lambda_2) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < p) { numeric ba; numeric lambdap1 = lambda_1*penalty_factor[i]*step_size; numeric lambdap2 = lambda_2*penalty_factor[i]*step_size; numeric norm = 0.0; for (int k = 0; k <K; ++k) { int ind = i+k*p; // gradient descent B[ind] = v[ind] - step_size*g[ind]; //soft-thresholding ba = fabs(B[ind]); B[ind] = signbit(lambdap1-ba)*copysign(ba-lambdap1, B[ind]); norm += B[ind]*B[ind]; } // Group soft-thresholding norm = fmax(sqrt(norm), lambdap2); for(int k = 0; k <K; ++k) { int ind = i+k*p; B[ind] *= ((norm - lambdap2)/norm); } } } void update_parameters(cox_param &dev_param, int K, int p, numeric step_size, numeric lambda_1, numeric lambda_2) { constexpr int num_thread = 128; int num_block = (p + num_thread - 1)/num_thread; hipLaunchKernelGGL(( update_parameters_gpu), dim3(num_block), dim3(num_thread), 0, 0, dev_param.B, dev_param.v, dev_param.grad, dev_param.penalty_factor, K, p,step_size, lambda_1,lambda_2); } __global__ void ls_stop_v1_gpu(const numeric *B, const numeric *v, const numeric *g, numeric *result, int K, int p, numeric step_size) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i==0){ result[0] = 0.0; } numeric local = 0.0; if(i < K*p) { numeric diff = B[i] - v[i]; local = g[i]*diff + diff*diff/(2*step_size); } __shared__ numeric sdata[256]; sdata[threadIdx.x] = local; __syncthreads(); // do reduction in shared mem for (int s=1; s < blockDim.x; s *=2) { int index = 2 * s * threadIdx.x;; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (threadIdx.x == 0){ atomicAdd(result, sdata[0]); } } numeric ls_stop_v1(cox_param &dev_param, numeric step_size, int K, int p) { constexpr int num_thread = 256; int num_block = (K*p + num_thread - 1)/num_thread; hipLaunchKernelGGL(( ls_stop_v1_gpu), dim3(num_block), dim3(num_thread), 0, 0, dev_param.B, dev_param.v, dev_param.grad, dev_param.ls_result, K, p, step_size); numeric result[1]; hipMemcpy(result, dev_param.ls_result, sizeof(numeric)*1, hipMemcpyDeviceToHost); return result[0]; } __global__ void ls_stop_v2_gpu(const numeric *B, const numeric *v, const numeric *g, const numeric *g_ls, numeric *result, int K, int p, numeric step_size) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<2){ result[i] = 0.0; } numeric local = 0.0; numeric diff = 0.0; if(i < K*p) { diff = B[i] - v[i]; local = diff*diff; } __shared__ numeric sdata[256]; sdata[threadIdx.x] = local; __syncthreads(); // do reduction in shared mem for (int s=1; s < blockDim.x; s *=2) { int index = 2 * s * threadIdx.x;; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (threadIdx.x == 0){ atomicAdd(result, sdata[0]); } // second term if(i < K*p) { local = diff*(g_ls[i]-g[i]); } sdata[threadIdx.x] = local; __syncthreads(); // do reduction in shared mem for (int s=1; s < blockDim.x; s *=2) { int index = 2 * s * threadIdx.x;; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (threadIdx.x == 0){ atomicAdd(result+1, sdata[0]); } } numeric ls_stop_v2(cox_param &dev_param, numeric step_size, int K, int p) { constexpr int num_thread = 256; int num_block = (K*p + num_thread - 1)/num_thread; hipLaunchKernelGGL(( ls_stop_v2_gpu), dim3(num_block), dim3(num_thread), 0, 0, dev_param.B, dev_param.v, dev_param.grad, dev_param.grad_ls, dev_param.ls_result, K, p, step_size); numeric result[2]; hipMemcpy(result, dev_param.ls_result, sizeof(numeric)*2, hipMemcpyDeviceToHost); return (result[0]/(2*step_size) - abs(result[1])); } void nesterov_update(cox_param &dev_param, int K, int p, numeric weight_old, numeric weight_new, hipStream_t stream, hipblasHandle_t handle) { numeric alpha = (weight_old - 1)/weight_new + 1; numeric beta = (1 - weight_old)/weight_new; hipblasSetStream(handle, stream); hipblasDgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, p, K, &alpha, dev_param.B, p , &beta, dev_param.prev_B, p, dev_param.v, p); } __global__ void max_diff_gpu(numeric *A, numeric *B, numeric *result, int len) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i == 0) { result[0] = 0.0; } numeric local = 0; if(i < len) { local = fabs(A[i] - B[i]); } __shared__ numeric sdata[256]; sdata[threadIdx.x] = local; __syncthreads(); // do reduction in shared mem for (int s=1; s < blockDim.x; s *=2) { int index = 2 * s * threadIdx.x;; if (index < blockDim.x) { sdata[index] = fmax(sdata[index + s], sdata[index]); } __syncthreads(); } // write result for this block to global mem if (threadIdx.x == 0){ atomicMax(result, sdata[0]); } } numeric max_diff(cox_param &dev_param, int K, int p) { constexpr int num_thread = 256; int num_block = (K*p + num_thread - 1)/num_thread; hipLaunchKernelGGL(( max_diff_gpu), dim3(num_block), dim3(num_thread), 0, 0, dev_param.B, dev_param.prev_B, dev_param.change, K*p); numeric result[1]; hipMemcpy(result, dev_param.change, sizeof(numeric)*1, hipMemcpyDeviceToHost); return result[0]; } void cublas_copy(cox_param &dev_param, int len, hipStream_t stream, hipblasHandle_t handle) { hipblasSetStream(handle, stream); hipblasDcopy(handle, len,dev_param.B, 1,dev_param.prev_B, 1); }
3bbc1df425d486b313e57d108ded03ce15d34bde.cu
#include "gpu_func.h" #include <cuda_runtime.h> #include "cublas_v2.h" #include <thrust/scan.h> #include <thrust/device_vector.h> #include <thrust/iterator/reverse_iterator.h> #include <thrust/device_ptr.h> #include <stdlib.h> typedef thrust::device_vector<numeric>::iterator Iterator; #define MAX_THREAD_PER_BLOCK 1024 void allocate_device_memory(cox_data &dev_data, cox_cache &dev_cache, cox_param &dev_param, int total_cases, int K, int p) { cudaMalloc((void**)&dev_data.X, sizeof(numeric) *p * total_cases); cudaMalloc((void**)&dev_data.censor, sizeof(numeric) * total_cases); cudaMalloc((void**)&dev_data.rankmin, sizeof(int) * total_cases); cudaMalloc((void**)&dev_data.rankmax, sizeof(int) * total_cases); cudaMalloc((void**)&dev_cache.outer_accumu, sizeof(numeric) * total_cases); cudaMalloc((void**)&dev_cache.eta, sizeof(numeric) * total_cases); cudaMalloc((void**)&dev_cache.exp_eta, sizeof(numeric) * total_cases); cudaMalloc((void**)&dev_cache.exp_accumu, sizeof(numeric) * total_cases); cudaMalloc((void**)&dev_cache.residual, sizeof(numeric) * total_cases); cudaMalloc((void**)&dev_cache.B_col_norm, sizeof(numeric) * p); cudaMalloc((void**)&dev_cache.cox_val, sizeof(numeric) * K); cudaMalloc((void**)&dev_param.B, sizeof(numeric) * K * p); cudaMalloc((void**)&dev_param.v, sizeof(numeric) * K * p); cudaMalloc((void**)&dev_param.grad, sizeof(numeric) * K * p); cudaMalloc((void**)&dev_param.prev_B, sizeof(numeric) * K * p); cudaMalloc((void**)&dev_param.grad_ls, sizeof(numeric) * K * p); cudaMalloc((void**)&dev_param.penalty_factor, sizeof(numeric) * p); cudaMalloc((void**)&dev_param.ls_result, sizeof(numeric) * 2); cudaMalloc((void**)&dev_param.change, sizeof(numeric) * 1); } void free_device_memory(cox_data &dev_data, cox_cache &dev_cache, cox_param &dev_param) { cudaFree(dev_data.X); cudaFree(dev_data.censor); cudaFree(dev_data.rankmax); cudaFree(dev_data.rankmin); cudaFree(dev_cache.outer_accumu); cudaFree(dev_cache.eta); cudaFree(dev_cache.exp_eta); cudaFree(dev_cache.exp_accumu); cudaFree(dev_cache.residual); cudaFree(dev_cache.B_col_norm); cudaFree(dev_cache.cox_val); cudaFree(dev_param.B); cudaFree(dev_param.v); cudaFree(dev_param.grad); cudaFree(dev_param.prev_B); cudaFree(dev_param.penalty_factor); cudaFree(dev_param.ls_result); cudaFree(dev_param.grad_ls); cudaFree(dev_param.change); } #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif // double atomicMax, copied from https://github.com/treecode/Bonsai/blob/master/runtime/profiling/derived_atomic_functions.h __device__ __forceinline__ double atomicMax(double *address, double val) { unsigned long long ret = __double_as_longlong(*address); while(val > __longlong_as_double(ret)) { unsigned long long old = ret; if((ret = atomicCAS((unsigned long long *)address, old, __double_as_longlong(val))) == old) break; } return __longlong_as_double(ret); } // float atomicMax __device__ __forceinline__ float atomicMax(float *address, float val) { int ret = __float_as_int(*address); while(val > __int_as_float(ret)) { int old = ret; if((ret = atomicCAS((int *)address, old, __float_as_int(val))) == old) break; } return __int_as_float(ret); } void compute_product(numeric *A, numeric *x, numeric *b, int n, int p, cudaStream_t stream, cublasHandle_t handle, cublasOperation_t trans=CUBLAS_OP_N) { numeric alpha = 1.0; numeric beta = 0.0; cublasSetStream(handle, stream); cublasDgemv(handle, trans, n, p, &alpha, A, n, x, 1, &beta, b, 1); } __global__ void apply_exp_gpu(const numeric *x, numeric *ex, int len) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid < len) { ex[tid] = exp(x[tid]); } } void apply_exp(const numeric *x, numeric *ex, int len, cudaStream_t stream) { constexpr int num_thread = 128; int num_block = (len + num_thread - 1)/num_thread; apply_exp_gpu<<<num_block, num_thread, 0, stream>>>(x, ex, len); } // do rev_cumsum of x and save it to y void rev_cumsum(numeric *x, numeric *y, int len, cudaStream_t stream) { thrust::device_ptr<numeric> dptr_x = thrust::device_pointer_cast<numeric>(x); thrust::reverse_iterator<Iterator> r_x = make_reverse_iterator(dptr_x+len); thrust::device_ptr<numeric> dptr_y = thrust::device_pointer_cast<numeric>(y); thrust::reverse_iterator<Iterator> r_y = make_reverse_iterator(dptr_y+len); thrust::inclusive_scan(thrust::cuda::par.on(stream), r_x, r_x+len, r_y); } __global__ void adjust_ties_gpu(const numeric *x, const int *rank, numeric *y, int len) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid < len) { y[tid] = x[rank[tid]]; } } // adjust rank of x and save it to y void adjust_ties(const numeric *x, const int *rank, numeric *y, int len , cudaStream_t stream) { constexpr int num_thread = 128; int num_block = (len + num_thread - 1)/num_thread; adjust_ties_gpu<<<num_block, num_thread, 0, stream>>>(x, rank, y, len); } __global__ void cwise_div_gpu(const numeric *x, const numeric *y, numeric *z, int len) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid < len) { z[tid] = x[tid]/y[tid]; } } // Compute x./y and save the result to z void cwise_div(const numeric *x, const numeric *y, numeric *z, int len, cudaStream_t stream) { constexpr int num_thread = 128; int num_block = (len + num_thread - 1)/num_thread; cwise_div_gpu<<<num_block, num_thread, 0, stream>>>(x, y, z, len); } void cumsum(numeric *x, int len, cudaStream_t stream) { thrust::device_ptr<numeric> dev_ptr = thrust::device_pointer_cast(x); thrust::inclusive_scan(thrust::cuda::par.on(stream), dev_ptr, dev_ptr+len, dev_ptr); } __global__ void mult_add_gpu(numeric *z, const numeric *a, const numeric *b, const numeric *c, int len) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid < len) { z[tid] = a[tid] * b[tid] - c[tid]; } } // Set z = a*b - c void mult_add(numeric *z, const numeric *a, const numeric *b, const numeric *c, int len,cudaStream_t stream) { constexpr int num_thread = 128; int num_block = (len + num_thread - 1)/num_thread; mult_add_gpu<<<num_block, num_thread, 0, stream>>>(z, a, b, c, len); } __global__ void coxval_gpu(const numeric *x, numeric *y, const numeric *z, numeric *val, int len) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i==0){ val[0] = 0.0; } if(i < len) { y[i] = (log(x[i]) - y[i]) * z[i]; } __shared__ numeric sdata[128]; sdata[threadIdx.x] = (i<len)?y[i]:0.0; __syncthreads(); // do reduction in shared mem for (int s=1; s < blockDim.x; s *=2) { int index = 2 * s * threadIdx.x;; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (threadIdx.x == 0){ atomicAdd(val,sdata[0]); } } // compute sum((log(x) - y) *z), x will be modified, result saved in val void get_coxvalue(const numeric *x, numeric *y, const numeric *z, numeric *val, int len, cudaStream_t stream) { constexpr int num_thread = 128; int num_block = (len + num_thread - 1)/num_thread; coxval_gpu<<<num_block, num_thread, 0, stream>>>(x, y, z, val, len); } __global__ void update_parameters_gpu(numeric *B, const numeric *v, const numeric *g, const numeric *penalty_factor, int K, int p,numeric step_size, numeric lambda_1, numeric lambda_2) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < p) { numeric ba; numeric lambdap1 = lambda_1*penalty_factor[i]*step_size; numeric lambdap2 = lambda_2*penalty_factor[i]*step_size; numeric norm = 0.0; for (int k = 0; k <K; ++k) { int ind = i+k*p; // gradient descent B[ind] = v[ind] - step_size*g[ind]; //soft-thresholding ba = fabs(B[ind]); B[ind] = signbit(lambdap1-ba)*copysign(ba-lambdap1, B[ind]); norm += B[ind]*B[ind]; } // Group soft-thresholding norm = fmax(sqrt(norm), lambdap2); for(int k = 0; k <K; ++k) { int ind = i+k*p; B[ind] *= ((norm - lambdap2)/norm); } } } void update_parameters(cox_param &dev_param, int K, int p, numeric step_size, numeric lambda_1, numeric lambda_2) { constexpr int num_thread = 128; int num_block = (p + num_thread - 1)/num_thread; update_parameters_gpu<<<num_block, num_thread>>>(dev_param.B, dev_param.v, dev_param.grad, dev_param.penalty_factor, K, p,step_size, lambda_1,lambda_2); } __global__ void ls_stop_v1_gpu(const numeric *B, const numeric *v, const numeric *g, numeric *result, int K, int p, numeric step_size) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i==0){ result[0] = 0.0; } numeric local = 0.0; if(i < K*p) { numeric diff = B[i] - v[i]; local = g[i]*diff + diff*diff/(2*step_size); } __shared__ numeric sdata[256]; sdata[threadIdx.x] = local; __syncthreads(); // do reduction in shared mem for (int s=1; s < blockDim.x; s *=2) { int index = 2 * s * threadIdx.x;; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (threadIdx.x == 0){ atomicAdd(result, sdata[0]); } } numeric ls_stop_v1(cox_param &dev_param, numeric step_size, int K, int p) { constexpr int num_thread = 256; int num_block = (K*p + num_thread - 1)/num_thread; ls_stop_v1_gpu<<<num_block, num_thread>>>(dev_param.B, dev_param.v, dev_param.grad, dev_param.ls_result, K, p, step_size); numeric result[1]; cudaMemcpy(result, dev_param.ls_result, sizeof(numeric)*1, cudaMemcpyDeviceToHost); return result[0]; } __global__ void ls_stop_v2_gpu(const numeric *B, const numeric *v, const numeric *g, const numeric *g_ls, numeric *result, int K, int p, numeric step_size) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<2){ result[i] = 0.0; } numeric local = 0.0; numeric diff = 0.0; if(i < K*p) { diff = B[i] - v[i]; local = diff*diff; } __shared__ numeric sdata[256]; sdata[threadIdx.x] = local; __syncthreads(); // do reduction in shared mem for (int s=1; s < blockDim.x; s *=2) { int index = 2 * s * threadIdx.x;; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (threadIdx.x == 0){ atomicAdd(result, sdata[0]); } // second term if(i < K*p) { local = diff*(g_ls[i]-g[i]); } sdata[threadIdx.x] = local; __syncthreads(); // do reduction in shared mem for (int s=1; s < blockDim.x; s *=2) { int index = 2 * s * threadIdx.x;; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (threadIdx.x == 0){ atomicAdd(result+1, sdata[0]); } } numeric ls_stop_v2(cox_param &dev_param, numeric step_size, int K, int p) { constexpr int num_thread = 256; int num_block = (K*p + num_thread - 1)/num_thread; ls_stop_v2_gpu<<<num_block, num_thread>>>(dev_param.B, dev_param.v, dev_param.grad, dev_param.grad_ls, dev_param.ls_result, K, p, step_size); numeric result[2]; cudaMemcpy(result, dev_param.ls_result, sizeof(numeric)*2, cudaMemcpyDeviceToHost); return (result[0]/(2*step_size) - abs(result[1])); } void nesterov_update(cox_param &dev_param, int K, int p, numeric weight_old, numeric weight_new, cudaStream_t stream, cublasHandle_t handle) { numeric alpha = (weight_old - 1)/weight_new + 1; numeric beta = (1 - weight_old)/weight_new; cublasSetStream(handle, stream); cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, p, K, &alpha, dev_param.B, p , &beta, dev_param.prev_B, p, dev_param.v, p); } __global__ void max_diff_gpu(numeric *A, numeric *B, numeric *result, int len) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i == 0) { result[0] = 0.0; } numeric local = 0; if(i < len) { local = fabs(A[i] - B[i]); } __shared__ numeric sdata[256]; sdata[threadIdx.x] = local; __syncthreads(); // do reduction in shared mem for (int s=1; s < blockDim.x; s *=2) { int index = 2 * s * threadIdx.x;; if (index < blockDim.x) { sdata[index] = fmax(sdata[index + s], sdata[index]); } __syncthreads(); } // write result for this block to global mem if (threadIdx.x == 0){ atomicMax(result, sdata[0]); } } numeric max_diff(cox_param &dev_param, int K, int p) { constexpr int num_thread = 256; int num_block = (K*p + num_thread - 1)/num_thread; max_diff_gpu<<<num_block, num_thread>>>(dev_param.B, dev_param.prev_B, dev_param.change, K*p); numeric result[1]; cudaMemcpy(result, dev_param.change, sizeof(numeric)*1, cudaMemcpyDeviceToHost); return result[0]; } void cublas_copy(cox_param &dev_param, int len, cudaStream_t stream, cublasHandle_t handle) { cublasSetStream(handle, stream); cublasDcopy(handle, len,dev_param.B, 1,dev_param.prev_B, 1); }
6842020cb04debdebbf89d0d8b7f4d6ecf641019.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ // FILL HERE: translate C-version vectorAdd to CUDA-version kernel code __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int index = threadIdx.x + blockIdx.x * blockDim.x; C[index] = A[index] + B[index]; int warpid = threadIdx.x >> 5; printf("block id = %d,warp id = %d\n",blockIdx.x,warpid); /* for (int i = 0; i < numElements; i++) { C[i] = A[i] + B[i]; } */ } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; // Print the vector length to be used, and compute its size int numElements = 1024; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input vector A float *d_A = NULL; err = hipMalloc(&d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = hipMalloc(&d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = hipMalloc(&d_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(d_A,h_A,size,hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_B,h_B,size,hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel // FILL HERE: call 'vectorAdd' function with // 4 blocks of 256 threads int blocksPerGrid = 4; int threadsPerBlock = 256; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //vectorAdd(A, B, C, numElements); hipLaunchKernelGGL(( vectorAdd) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A,d_B,d_C,numElements); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(h_C,d_C,size,hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = hipFree(d_A); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_B); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_C); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); // Reset the device and exit // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits err = hipDeviceReset(); if (err != hipSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
6842020cb04debdebbf89d0d8b7f4d6ecf641019.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ // FILL HERE: translate C-version vectorAdd to CUDA-version kernel code __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int index = threadIdx.x + blockIdx.x * blockDim.x; C[index] = A[index] + B[index]; int warpid = threadIdx.x >> 5; printf("block id = %d,warp id = %d\n",blockIdx.x,warpid); /* for (int i = 0; i < numElements; i++) { C[i] = A[i] + B[i]; } */ } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the vector length to be used, and compute its size int numElements = 1024; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input vector A float *d_A = NULL; err = cudaMalloc(&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = cudaMalloc(&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = cudaMalloc(&d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B,h_B,size,cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel // FILL HERE: call 'vectorAdd' function with // 4 blocks of 256 threads int blocksPerGrid = 4; int threadsPerBlock = 256; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //vectorAdd(A, B, C, numElements); vectorAdd <<<blocksPerGrid, threadsPerBlock>>>(d_A,d_B,d_C,numElements); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_C,d_C,size,cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); // Reset the device and exit // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
cf511d3e942d097a7cb48ae57046d5435480b94f.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <cmath> #ifdef USE_ROCM #include <hip/hip_runtime.h> #endif #include "nbodycuda.h" using std::tie; template<class T> __device__ __host__ constexpr const T& clamp(const T& v, const T& lo, const T& hi) { if (v < lo) return lo; else if (v > hi) return hi; else return v; } __device__ __host__ constexpr float particle_render_kernel( float x1, float y1, float x2, float y2 ) { constexpr float max_value = 250; const float sqdistance = powf(x1 - x2, 2) + powf(y1 - y2, 2); const float value = 1 / powf(sqdistance, 1.7 / 2) * max_value; return value; } __device__ __host__ constexpr inline size_t coords2d_to_1d(size_t row_size, size_t x, size_t y) { return row_size * y + x; } NBodyRenderer::NBodyRenderer(size_t width, size_t height) : m_width{width}, m_height{height} { auto particle_generator = UniformRandomParticleGenerator{ 0.0f, (float)width, 0.0f, (float)height, 15000u, }; vector<tuple<float, float, float, float>> particles = particle_generator.get_particles(); #ifdef USE_ROCM hipMallocManaged(&frame_buffer, m_width * m_height * sizeof(uint32_t)); hipMallocManaged(&particle_x_arr, particles.size() * sizeof(float)); hipMallocManaged(&particle_y_arr, particles.size() * sizeof(float)); hipMallocManaged(&particle_x_speed, particles.size() * sizeof(float)); hipMallocManaged(&particle_y_speed, particles.size() * sizeof(float)); #else frame_buffer = (uint32_t*)malloc(m_width * m_height * sizeof(uint32_t)); particle_x_arr = (float*)malloc(particles.size() * sizeof(float)); particle_y_arr = (float*)malloc(particles.size() * sizeof(float)); particle_x_speed = (float*)malloc(particles.size() * sizeof(float)); particle_y_speed = (float*)malloc(particles.size() * sizeof(float)); #endif particle_count = particles.size(); for (size_t i = 0; i < particle_count; ++i) { particle_x_arr[i] = std::get<0>(particles[i]); particle_y_arr[i] = std::get<1>(particles[i]); particle_x_speed[i] = std::get<2>(particles[i]); particle_y_speed[i] = std::get<3>(particles[i]); } } NBodyRenderer::~NBodyRenderer() { #ifdef USE_ROCM hipFree(frame_buffer); hipFree(particle_x_arr); hipFree(particle_y_arr); hipFree(particle_x_speed); hipFree(particle_y_speed); #else free(frame_buffer); free(particle_x_arr); free(particle_y_arr); free(particle_x_speed); free(particle_y_speed); #endif } void NBodyRenderer::update_software() { // TODO: update particle positions for (size_t pixel_x = 0; pixel_x < m_width; ++pixel_x) { for (size_t pixel_y = 0; pixel_y < m_height; ++pixel_y) { float brightness = 0; for (size_t i = 0; i < particle_count; ++i) { float x = particle_x_arr[i]; float y = particle_y_arr[i]; brightness += particle_render_kernel(pixel_x, pixel_y, x, y); } uint32_t channel = clamp((int)brightness, 0, 255); uint32_t pixel = ( (channel << 16) | (channel << 8) | channel ); frame_buffer[coords2d_to_1d(m_width, pixel_x, pixel_y)] = pixel; } } } __global__ void cuda_render( uint32_t frame_buffer[], size_t width, size_t height, const float particle_x_arr[], const float particle_y_arr[], size_t particle_count ) { const size_t particle_index = blockIdx.x * 10 + threadIdx.z; if (particle_index >= particle_count) return; float px = particle_x_arr[particle_index]; float py = particle_y_arr[particle_index]; ssize_t center_pixel_x = int(px); ssize_t center_pixel_y = int(py); constexpr ssize_t window_size = 9; for (ssize_t shift_x = -4; shift_x <= 4; ++shift_x) { for (ssize_t shift_y = -4; shift_y <= 4; ++shift_y) { ssize_t pixel_x = center_pixel_x + (window_size * ((ssize_t)threadIdx.x - 2)) + shift_x; ssize_t pixel_y = center_pixel_y + (window_size * ((ssize_t)threadIdx.y - 2)) + shift_y; if (pixel_x < 0 || pixel_x >= width || pixel_y < 0 || pixel_y >= height) { // pixel out of bounds continue; } float dbrightness = particle_render_kernel((float)pixel_x, (float)pixel_y, px, py); int dchannel = clamp((uint32_t)dbrightness, (uint32_t)0, (uint32_t)255); uint32_t dpixel = ( (dchannel << 16) | (dchannel << 8) | dchannel ); uint32_t *pixel_addr = &frame_buffer[coords2d_to_1d(width, pixel_x, pixel_y)]; uint32_t current = *pixel_addr; uint32_t assumed = 0; do { assumed = current; current = atomicCAS( (unsigned int*)pixel_addr, assumed, clamp(assumed + dpixel, 0u, 0x00FFFFFFu) ); } while (assumed != current); } } } __global__ void cuda_accelerate( const float particle_x_arr[], const float particle_y_arr[], float particle_x_speed[], float particle_y_speed[], size_t particle_count ) { // target is one being accelerated, source is one applying force const size_t particle_target = blockIdx.x * 16 + threadIdx.x; const size_t particle_source = blockIdx.y * 16 + threadIdx.y; if (particle_target >= particle_count || particle_source >= particle_count) return; if (particle_target == particle_source) return; const float targetx = particle_x_arr[particle_target]; const float targety = particle_y_arr[particle_target]; const float sourcex = particle_x_arr[particle_source]; const float sourcey = particle_y_arr[particle_source]; constexpr float g = 0.001; constexpr float maxaccel = 0.01; const float dx = sourcex - targetx; const float dy = sourcey - targety; const float sqdistance = dx * dx + dy * dy; float accelx = dx / sqdistance * g; float accely = dy / sqdistance * g; accelx = clamp(accelx, -maxaccel, maxaccel); accely = clamp(accely, -maxaccel, maxaccel); particle_x_speed[particle_target] += accelx; particle_y_speed[particle_target] += accely; } __global__ void cuda_move( float particle_x_arr[], float particle_y_arr[], float particle_x_speed[], float particle_y_speed[], size_t particle_count ) { const size_t particle_index = blockIdx.x * 100 + threadIdx.x; if (particle_index >= particle_count) return; float speedx = particle_x_speed[particle_index]; float speedy = particle_y_speed[particle_index]; atomicAdd(&particle_x_arr[particle_index], speedx); atomicAdd(&particle_y_arr[particle_index], speedy); } void NBodyRenderer::update_cuda() { // clear the frame hipMemset(frame_buffer, 0, m_width * m_height * sizeof(uint32_t)); // accelerate particles dim3 athreads_per_block(16, 16, 1); // 16 particles x 16 particles dim3 ablocks_count(ceil((float)particle_count / 16), ceil((float)particle_count / 16), 1); hipLaunchKernelGGL(( cuda_accelerate), dim3(ablocks_count), dim3(athreads_per_block), 0, 0, particle_x_arr, particle_y_arr, particle_x_speed, particle_y_speed, particle_count ); // move particles dim3 mthreads_per_block(100, 1, 1); dim3 mblocks_count(ceil((float)particle_count / 100), 1, 1); hipLaunchKernelGGL(( cuda_move), dim3(mblocks_count), dim3(mthreads_per_block), 0, 0, particle_x_arr, particle_y_arr, particle_x_speed, particle_y_speed, particle_count ); // render particles dim3 rthreads_per_block(5, 5, 10); // 10 particles x 5 column blocks x 5 rows blocks dim3 rblocks_count(ceil((float)particle_count / 10), 1, 1); hipLaunchKernelGGL(( cuda_render), dim3(rblocks_count), dim3(rthreads_per_block), 0, 0, frame_buffer, m_width, m_height, particle_x_arr, particle_y_arr, particle_count ); hipDeviceSynchronize(); } void NBodyRenderer::update() { #ifdef USE_ROCM update_cuda(); #else update_software(); #endif } int NBodyRenderer::width() { return m_width; } int NBodyRenderer::height() { return m_height; } size_t NBodyRenderer::buffer_size() const { return m_width * m_height; } const uint32_t* NBodyRenderer::get_buffer() { return frame_buffer; }
cf511d3e942d097a7cb48ae57046d5435480b94f.cu
#include <algorithm> #include <cmath> #ifdef USE_CUDA #include <cuda_runtime.h> #endif #include "nbodycuda.h" using std::tie; template<class T> __device__ __host__ constexpr const T& clamp(const T& v, const T& lo, const T& hi) { if (v < lo) return lo; else if (v > hi) return hi; else return v; } __device__ __host__ constexpr float particle_render_kernel( float x1, float y1, float x2, float y2 ) { constexpr float max_value = 250; const float sqdistance = powf(x1 - x2, 2) + powf(y1 - y2, 2); const float value = 1 / powf(sqdistance, 1.7 / 2) * max_value; return value; } __device__ __host__ constexpr inline size_t coords2d_to_1d(size_t row_size, size_t x, size_t y) { return row_size * y + x; } NBodyRenderer::NBodyRenderer(size_t width, size_t height) : m_width{width}, m_height{height} { auto particle_generator = UniformRandomParticleGenerator{ 0.0f, (float)width, 0.0f, (float)height, 15000u, }; vector<tuple<float, float, float, float>> particles = particle_generator.get_particles(); #ifdef USE_CUDA cudaMallocManaged(&frame_buffer, m_width * m_height * sizeof(uint32_t)); cudaMallocManaged(&particle_x_arr, particles.size() * sizeof(float)); cudaMallocManaged(&particle_y_arr, particles.size() * sizeof(float)); cudaMallocManaged(&particle_x_speed, particles.size() * sizeof(float)); cudaMallocManaged(&particle_y_speed, particles.size() * sizeof(float)); #else frame_buffer = (uint32_t*)malloc(m_width * m_height * sizeof(uint32_t)); particle_x_arr = (float*)malloc(particles.size() * sizeof(float)); particle_y_arr = (float*)malloc(particles.size() * sizeof(float)); particle_x_speed = (float*)malloc(particles.size() * sizeof(float)); particle_y_speed = (float*)malloc(particles.size() * sizeof(float)); #endif particle_count = particles.size(); for (size_t i = 0; i < particle_count; ++i) { particle_x_arr[i] = std::get<0>(particles[i]); particle_y_arr[i] = std::get<1>(particles[i]); particle_x_speed[i] = std::get<2>(particles[i]); particle_y_speed[i] = std::get<3>(particles[i]); } } NBodyRenderer::~NBodyRenderer() { #ifdef USE_CUDA cudaFree(frame_buffer); cudaFree(particle_x_arr); cudaFree(particle_y_arr); cudaFree(particle_x_speed); cudaFree(particle_y_speed); #else free(frame_buffer); free(particle_x_arr); free(particle_y_arr); free(particle_x_speed); free(particle_y_speed); #endif } void NBodyRenderer::update_software() { // TODO: update particle positions for (size_t pixel_x = 0; pixel_x < m_width; ++pixel_x) { for (size_t pixel_y = 0; pixel_y < m_height; ++pixel_y) { float brightness = 0; for (size_t i = 0; i < particle_count; ++i) { float x = particle_x_arr[i]; float y = particle_y_arr[i]; brightness += particle_render_kernel(pixel_x, pixel_y, x, y); } uint32_t channel = clamp((int)brightness, 0, 255); uint32_t pixel = ( (channel << 16) | (channel << 8) | channel ); frame_buffer[coords2d_to_1d(m_width, pixel_x, pixel_y)] = pixel; } } } __global__ void cuda_render( uint32_t frame_buffer[], size_t width, size_t height, const float particle_x_arr[], const float particle_y_arr[], size_t particle_count ) { const size_t particle_index = blockIdx.x * 10 + threadIdx.z; if (particle_index >= particle_count) return; float px = particle_x_arr[particle_index]; float py = particle_y_arr[particle_index]; ssize_t center_pixel_x = int(px); ssize_t center_pixel_y = int(py); constexpr ssize_t window_size = 9; for (ssize_t shift_x = -4; shift_x <= 4; ++shift_x) { for (ssize_t shift_y = -4; shift_y <= 4; ++shift_y) { ssize_t pixel_x = center_pixel_x + (window_size * ((ssize_t)threadIdx.x - 2)) + shift_x; ssize_t pixel_y = center_pixel_y + (window_size * ((ssize_t)threadIdx.y - 2)) + shift_y; if (pixel_x < 0 || pixel_x >= width || pixel_y < 0 || pixel_y >= height) { // pixel out of bounds continue; } float dbrightness = particle_render_kernel((float)pixel_x, (float)pixel_y, px, py); int dchannel = clamp((uint32_t)dbrightness, (uint32_t)0, (uint32_t)255); uint32_t dpixel = ( (dchannel << 16) | (dchannel << 8) | dchannel ); uint32_t *pixel_addr = &frame_buffer[coords2d_to_1d(width, pixel_x, pixel_y)]; uint32_t current = *pixel_addr; uint32_t assumed = 0; do { assumed = current; current = atomicCAS( (unsigned int*)pixel_addr, assumed, clamp(assumed + dpixel, 0u, 0x00FFFFFFu) ); } while (assumed != current); } } } __global__ void cuda_accelerate( const float particle_x_arr[], const float particle_y_arr[], float particle_x_speed[], float particle_y_speed[], size_t particle_count ) { // target is one being accelerated, source is one applying force const size_t particle_target = blockIdx.x * 16 + threadIdx.x; const size_t particle_source = blockIdx.y * 16 + threadIdx.y; if (particle_target >= particle_count || particle_source >= particle_count) return; if (particle_target == particle_source) return; const float targetx = particle_x_arr[particle_target]; const float targety = particle_y_arr[particle_target]; const float sourcex = particle_x_arr[particle_source]; const float sourcey = particle_y_arr[particle_source]; constexpr float g = 0.001; constexpr float maxaccel = 0.01; const float dx = sourcex - targetx; const float dy = sourcey - targety; const float sqdistance = dx * dx + dy * dy; float accelx = dx / sqdistance * g; float accely = dy / sqdistance * g; accelx = clamp(accelx, -maxaccel, maxaccel); accely = clamp(accely, -maxaccel, maxaccel); particle_x_speed[particle_target] += accelx; particle_y_speed[particle_target] += accely; } __global__ void cuda_move( float particle_x_arr[], float particle_y_arr[], float particle_x_speed[], float particle_y_speed[], size_t particle_count ) { const size_t particle_index = blockIdx.x * 100 + threadIdx.x; if (particle_index >= particle_count) return; float speedx = particle_x_speed[particle_index]; float speedy = particle_y_speed[particle_index]; atomicAdd(&particle_x_arr[particle_index], speedx); atomicAdd(&particle_y_arr[particle_index], speedy); } void NBodyRenderer::update_cuda() { // clear the frame cudaMemset(frame_buffer, 0, m_width * m_height * sizeof(uint32_t)); // accelerate particles dim3 athreads_per_block(16, 16, 1); // 16 particles x 16 particles dim3 ablocks_count(ceil((float)particle_count / 16), ceil((float)particle_count / 16), 1); cuda_accelerate<<<ablocks_count, athreads_per_block>>>( particle_x_arr, particle_y_arr, particle_x_speed, particle_y_speed, particle_count ); // move particles dim3 mthreads_per_block(100, 1, 1); dim3 mblocks_count(ceil((float)particle_count / 100), 1, 1); cuda_move<<<mblocks_count, mthreads_per_block>>>( particle_x_arr, particle_y_arr, particle_x_speed, particle_y_speed, particle_count ); // render particles dim3 rthreads_per_block(5, 5, 10); // 10 particles x 5 column blocks x 5 rows blocks dim3 rblocks_count(ceil((float)particle_count / 10), 1, 1); cuda_render<<<rblocks_count, rthreads_per_block>>>( frame_buffer, m_width, m_height, particle_x_arr, particle_y_arr, particle_count ); cudaDeviceSynchronize(); } void NBodyRenderer::update() { #ifdef USE_CUDA update_cuda(); #else update_software(); #endif } int NBodyRenderer::width() { return m_width; } int NBodyRenderer::height() { return m_height; } size_t NBodyRenderer::buffer_size() const { return m_width * m_height; } const uint32_t* NBodyRenderer::get_buffer() { return frame_buffer; }
e19d6f2f99f0dddb0e279f658ae650cdbdaf3466.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if GOOGLE_CUDA #define EIGEN_USE_GPU //#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" __global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ const int batch=512; __shared__ float buf[batch*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int k2=0;k2<m;k2+=batch){ int end_k=min(m,k2+batch)-k2; for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){ buf[j]=xyz2[(i*m+k2)*3+j]; } __syncthreads(); for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz[(i*n+j)*3+0]; float y1=xyz[(i*n+j)*3+1]; float z1=xyz[(i*n+j)*3+2]; int best_i=0; float best=0; int end_ka=end_k-(end_k&3); if (end_ka==batch){ for (int k=0;k<batch;k+=4){ { float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } { float x2=buf[k*3+3]-x1; float y2=buf[k*3+4]-y1; float z2=buf[k*3+5]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+1; } } { float x2=buf[k*3+6]-x1; float y2=buf[k*3+7]-y1; float z2=buf[k*3+8]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+2; } } { float x2=buf[k*3+9]-x1; float y2=buf[k*3+10]-y1; float z2=buf[k*3+11]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+3; } } } }else{ for (int k=0;k<end_ka;k+=4){ { float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } { float x2=buf[k*3+3]-x1; float y2=buf[k*3+4]-y1; float z2=buf[k*3+5]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+1; } } { float x2=buf[k*3+6]-x1; float y2=buf[k*3+7]-y1; float z2=buf[k*3+8]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+2; } } { float x2=buf[k*3+9]-x1; float y2=buf[k*3+10]-y1; float z2=buf[k*3+11]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+3; } } } } for (int k=end_ka;k<end_k;k++){ float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } if (k2==0 || result[(i*n+j)]>best){ result[(i*n+j)]=best; result_i[(i*n+j)]=best_i; } } __syncthreads(); } } } void NmDistanceKernelLauncher(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i){ hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b,n,xyz,m,xyz2,result,result_i); hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b,m,xyz2,n,xyz,result2,result2_i); } __global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz1[(i*n+j)*3+0]; float y1=xyz1[(i*n+j)*3+1]; float z1=xyz1[(i*n+j)*3+2]; int j2=idx1[i*n+j]; float x2=xyz2[(i*m+j2)*3+0]; float y2=xyz2[(i*m+j2)*3+1]; float z2=xyz2[(i*m+j2)*3+2]; float g=grad_dist1[i*n+j]*2; atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2)); atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2))); } } } void NmDistanceGradKernelLauncher(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2){ hipMemset(grad_xyz1,0,b*n*3*4); hipMemset(grad_xyz2,0,b*m*3*4); hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_xyz1,grad_xyz2); hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, b,m,xyz2,n,xyz1,grad_dist2,idx2,grad_xyz2,grad_xyz1); } #endif
e19d6f2f99f0dddb0e279f658ae650cdbdaf3466.cu
#if GOOGLE_CUDA #define EIGEN_USE_GPU //#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" __global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ const int batch=512; __shared__ float buf[batch*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int k2=0;k2<m;k2+=batch){ int end_k=min(m,k2+batch)-k2; for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){ buf[j]=xyz2[(i*m+k2)*3+j]; } __syncthreads(); for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz[(i*n+j)*3+0]; float y1=xyz[(i*n+j)*3+1]; float z1=xyz[(i*n+j)*3+2]; int best_i=0; float best=0; int end_ka=end_k-(end_k&3); if (end_ka==batch){ for (int k=0;k<batch;k+=4){ { float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } { float x2=buf[k*3+3]-x1; float y2=buf[k*3+4]-y1; float z2=buf[k*3+5]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+1; } } { float x2=buf[k*3+6]-x1; float y2=buf[k*3+7]-y1; float z2=buf[k*3+8]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+2; } } { float x2=buf[k*3+9]-x1; float y2=buf[k*3+10]-y1; float z2=buf[k*3+11]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+3; } } } }else{ for (int k=0;k<end_ka;k+=4){ { float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } { float x2=buf[k*3+3]-x1; float y2=buf[k*3+4]-y1; float z2=buf[k*3+5]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+1; } } { float x2=buf[k*3+6]-x1; float y2=buf[k*3+7]-y1; float z2=buf[k*3+8]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+2; } } { float x2=buf[k*3+9]-x1; float y2=buf[k*3+10]-y1; float z2=buf[k*3+11]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+3; } } } } for (int k=end_ka;k<end_k;k++){ float x2=buf[k*3+0]-x1; float y2=buf[k*3+1]-y1; float z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2; } } if (k2==0 || result[(i*n+j)]>best){ result[(i*n+j)]=best; result_i[(i*n+j)]=best_i; } } __syncthreads(); } } } void NmDistanceKernelLauncher(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i){ NmDistanceKernel<<<dim3(32,16,1),512>>>(b,n,xyz,m,xyz2,result,result_i); NmDistanceKernel<<<dim3(32,16,1),512>>>(b,m,xyz2,n,xyz,result2,result2_i); } __global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz1[(i*n+j)*3+0]; float y1=xyz1[(i*n+j)*3+1]; float z1=xyz1[(i*n+j)*3+2]; int j2=idx1[i*n+j]; float x2=xyz2[(i*m+j2)*3+0]; float y2=xyz2[(i*m+j2)*3+1]; float z2=xyz2[(i*m+j2)*3+2]; float g=grad_dist1[i*n+j]*2; atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2)); atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2))); } } } void NmDistanceGradKernelLauncher(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2){ cudaMemset(grad_xyz1,0,b*n*3*4); cudaMemset(grad_xyz2,0,b*m*3*4); NmDistanceGradKernel<<<dim3(1,16,1),256>>>(b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_xyz1,grad_xyz2); NmDistanceGradKernel<<<dim3(1,16,1),256>>>(b,m,xyz2,n,xyz1,grad_dist2,idx2,grad_xyz2,grad_xyz1); } #endif
875ba4492b2af9483a433645ad22361f044c115d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <hip/hip_runtime.h> #include "hip/hip_runtime.h" #include <thrust\transform.h> #include <thrust\transform_reduce.h> #include <thrust\device_ptr.h> #include <thrust\device_vector.h> #include <thrust\host_vector.h> #include <thrust\functional.h> #include <thrust\iterator\counting_iterator.h> #include <thrust\sequence.h> #include "LR_GPU_Functors.cu" //DLL exports extern "C" __declspec(dllexport) int __cdecl Learn(float*, float*, unsigned int, unsigned int, unsigned int, float, float, float*, float*, float*); extern "C" __declspec(dllexport) int __cdecl Predict(float*, unsigned int, unsigned int, float*, float *, float *, float *); // //This method does mean normalization // void NormalizeFeaturesByMeanAndStd(unsigned int trainingDataCount, float * d_trainingData, thrust::device_vector<float> dv_mean, thrust::device_vector<float> dv_std) { //Calculate mean norm: (x - mean) / std unsigned int featureCount = dv_mean.size(); float * dvp_Mean = thrust::raw_pointer_cast( &dv_mean[0] ); float * dvp_Std = thrust::raw_pointer_cast( &dv_std[0] ); FeatureNormalizationgFunctor featureNormalizationgFunctor(dvp_Mean, dvp_Std, featureCount); thrust::device_ptr<float> dvp_trainingData(d_trainingData); thrust::transform(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(trainingDataCount * featureCount), dvp_trainingData, dvp_trainingData, featureNormalizationgFunctor); } // //This method calculates mean, standard deviation and does mean normalization // void NormalizeFeatures(unsigned int featureCount, unsigned int trainingDataCount, float * d_trainingData, float * meanResult, float * stdResult) { //Calculate the mean. One thread per feature. thrust::device_vector<float> dv_mean(featureCount,0); MeanFunctor meanFunctor(d_trainingData, trainingDataCount, featureCount); thrust::transform(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(featureCount), dv_mean.begin(), meanFunctor); //Calculate the standard deviation. One thread per feature. thrust::device_vector<float> dv_std(featureCount,0); STDFunctor stdFunctor(d_trainingData, trainingDataCount, featureCount); thrust::transform(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(featureCount), dv_mean.begin(), dv_std.begin(), stdFunctor); //Calculate mean norm: (x - mean) / std NormalizeFeaturesByMeanAndStd(trainingDataCount, d_trainingData, dv_mean, dv_std); thrust::copy(dv_mean.begin(), dv_mean.end(), meanResult); thrust::copy(dv_std.begin(), dv_std.end(), stdResult); } void AddBiasTerm(float * inputData, float * outputData, int dataCount, int featureCount) { //transfer the trainindata by adding also the bias term for(int i = 0; i < dataCount; i++) { outputData[i * featureCount] = 1; for(int f = 1; f < featureCount; f++) outputData[i * featureCount + f] = inputData[(i * (featureCount - 1)) + (f-1)]; } } #define IsValidNumber(x) (x == x && x <= DBL_MAX && x >= -DBL_MAX) // //Learn the hypothesis for the given data // extern int Learn(float* trainingData, float * labelData, unsigned int featureCount, unsigned int trainingDataCount, unsigned int gdIterationCount, float learningRate, float regularizationParam, float * result, float * meanResult, float * stdResult) { featureCount++; //allcate host memory thrust::host_vector<float> hv_hypothesis(featureCount, 0); thrust::host_vector<float> hv_trainingData(trainingDataCount * featureCount); thrust::host_vector<float> hv_labelData(labelData, labelData + trainingDataCount); //transfer the trainindata by adding also the bias term AddBiasTerm(trainingData, &hv_trainingData[0], trainingDataCount, featureCount); //allocate device vector thrust::device_vector<float> dv_hypothesis = hv_hypothesis; thrust::device_vector<float> dv_trainingData = hv_trainingData; thrust::device_vector<float> dv_labelData = hv_labelData; thrust::device_vector<float> dv_costData(trainingDataCount, 0); //Get device vector pointers float * pdv_hypothesis = thrust::raw_pointer_cast( &dv_hypothesis[0] ); float * pdv_trainingData = thrust::raw_pointer_cast( &dv_trainingData[0] ); float * pdv_costData = thrust::raw_pointer_cast( &dv_costData[0] ); //Normalize the features NormalizeFeatures(featureCount, trainingDataCount, pdv_trainingData, meanResult, stdResult); TrainFunctor tf(pdv_trainingData, pdv_hypothesis, featureCount); TrainFunctor2 tf2(pdv_costData, pdv_trainingData, featureCount); //run gdIterationCount of gradient descent iterations for(int i = 0; i < gdIterationCount; i++) { thrust::transform(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(trainingDataCount), dv_labelData.begin(), dv_costData.begin(), tf); //calculate gradient descent iterations for(int featureNumber = 0; featureNumber < featureCount; featureNumber++) { tf2.SetFeatureNumber(featureNumber); float totalCost = thrust::transform_reduce(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(trainingDataCount), tf2, 0.0f, thrust::plus<float>()); if (!IsValidNumber(totalCost)) { i = gdIterationCount; break; } float regularizationTerm = 1 - (learningRate * (regularizationParam / trainingDataCount)); hv_hypothesis[featureNumber] = (hv_hypothesis[featureNumber] * regularizationTerm) - learningRate * (totalCost / trainingDataCount); } //Copy the theta back to the device vector dv_hypothesis = hv_hypothesis; } //copy the hypothesis into the result buffer thrust::copy(hv_hypothesis.begin(), hv_hypothesis.end(), result); return 0; } // //makes prediction for the given test data based on the hypothesis. Also applies feature normalization. // extern int Predict(float* testData, unsigned int featureCount, unsigned int testDataCount, float* hypothesis, float * mean, float * std, float * result) { featureCount++; thrust::host_vector<float> hv_testData(testDataCount * featureCount); AddBiasTerm(testData, &hv_testData[0], testDataCount, featureCount); //Allocate device memory thrust::device_vector<float> dv_hypothesis(hypothesis, hypothesis + featureCount); thrust::device_vector<float> dv_testData = hv_testData; thrust::device_vector<float> dv_result(testDataCount); thrust::device_vector<float> dv_mean(mean, mean + featureCount); thrust::device_vector<float> dv_std(std, std + featureCount); //Normalize features float * pdv_hypothesis = thrust::raw_pointer_cast( &dv_hypothesis[0] ); float * pdv_testData = thrust::raw_pointer_cast( &dv_testData[0] ); NormalizeFeaturesByMeanAndStd(testDataCount, pdv_testData, dv_mean, dv_std); //Predict PredictFunctor predictFunctor(pdv_testData, pdv_hypothesis, featureCount); thrust::transform(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(testDataCount), dv_result.begin(), predictFunctor); //copy the result from device memory into the result buffer thrust::copy(dv_result.begin(), dv_result.end(), result); return 0; }
875ba4492b2af9483a433645ad22361f044c115d.cu
#include <stdio.h> #include <iostream> #include <cuda.h> #include "cuda_runtime.h" #include <thrust\transform.h> #include <thrust\transform_reduce.h> #include <thrust\device_ptr.h> #include <thrust\device_vector.h> #include <thrust\host_vector.h> #include <thrust\functional.h> #include <thrust\iterator\counting_iterator.h> #include <thrust\sequence.h> #include "LR_GPU_Functors.cu" //DLL exports extern "C" __declspec(dllexport) int __cdecl Learn(float*, float*, unsigned int, unsigned int, unsigned int, float, float, float*, float*, float*); extern "C" __declspec(dllexport) int __cdecl Predict(float*, unsigned int, unsigned int, float*, float *, float *, float *); // //This method does mean normalization // void NormalizeFeaturesByMeanAndStd(unsigned int trainingDataCount, float * d_trainingData, thrust::device_vector<float> dv_mean, thrust::device_vector<float> dv_std) { //Calculate mean norm: (x - mean) / std unsigned int featureCount = dv_mean.size(); float * dvp_Mean = thrust::raw_pointer_cast( &dv_mean[0] ); float * dvp_Std = thrust::raw_pointer_cast( &dv_std[0] ); FeatureNormalizationgFunctor featureNormalizationgFunctor(dvp_Mean, dvp_Std, featureCount); thrust::device_ptr<float> dvp_trainingData(d_trainingData); thrust::transform(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(trainingDataCount * featureCount), dvp_trainingData, dvp_trainingData, featureNormalizationgFunctor); } // //This method calculates mean, standard deviation and does mean normalization // void NormalizeFeatures(unsigned int featureCount, unsigned int trainingDataCount, float * d_trainingData, float * meanResult, float * stdResult) { //Calculate the mean. One thread per feature. thrust::device_vector<float> dv_mean(featureCount,0); MeanFunctor meanFunctor(d_trainingData, trainingDataCount, featureCount); thrust::transform(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(featureCount), dv_mean.begin(), meanFunctor); //Calculate the standard deviation. One thread per feature. thrust::device_vector<float> dv_std(featureCount,0); STDFunctor stdFunctor(d_trainingData, trainingDataCount, featureCount); thrust::transform(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(featureCount), dv_mean.begin(), dv_std.begin(), stdFunctor); //Calculate mean norm: (x - mean) / std NormalizeFeaturesByMeanAndStd(trainingDataCount, d_trainingData, dv_mean, dv_std); thrust::copy(dv_mean.begin(), dv_mean.end(), meanResult); thrust::copy(dv_std.begin(), dv_std.end(), stdResult); } void AddBiasTerm(float * inputData, float * outputData, int dataCount, int featureCount) { //transfer the trainindata by adding also the bias term for(int i = 0; i < dataCount; i++) { outputData[i * featureCount] = 1; for(int f = 1; f < featureCount; f++) outputData[i * featureCount + f] = inputData[(i * (featureCount - 1)) + (f-1)]; } } #define IsValidNumber(x) (x == x && x <= DBL_MAX && x >= -DBL_MAX) // //Learn the hypothesis for the given data // extern int Learn(float* trainingData, float * labelData, unsigned int featureCount, unsigned int trainingDataCount, unsigned int gdIterationCount, float learningRate, float regularizationParam, float * result, float * meanResult, float * stdResult) { featureCount++; //allcate host memory thrust::host_vector<float> hv_hypothesis(featureCount, 0); thrust::host_vector<float> hv_trainingData(trainingDataCount * featureCount); thrust::host_vector<float> hv_labelData(labelData, labelData + trainingDataCount); //transfer the trainindata by adding also the bias term AddBiasTerm(trainingData, &hv_trainingData[0], trainingDataCount, featureCount); //allocate device vector thrust::device_vector<float> dv_hypothesis = hv_hypothesis; thrust::device_vector<float> dv_trainingData = hv_trainingData; thrust::device_vector<float> dv_labelData = hv_labelData; thrust::device_vector<float> dv_costData(trainingDataCount, 0); //Get device vector pointers float * pdv_hypothesis = thrust::raw_pointer_cast( &dv_hypothesis[0] ); float * pdv_trainingData = thrust::raw_pointer_cast( &dv_trainingData[0] ); float * pdv_costData = thrust::raw_pointer_cast( &dv_costData[0] ); //Normalize the features NormalizeFeatures(featureCount, trainingDataCount, pdv_trainingData, meanResult, stdResult); TrainFunctor tf(pdv_trainingData, pdv_hypothesis, featureCount); TrainFunctor2 tf2(pdv_costData, pdv_trainingData, featureCount); //run gdIterationCount of gradient descent iterations for(int i = 0; i < gdIterationCount; i++) { thrust::transform(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(trainingDataCount), dv_labelData.begin(), dv_costData.begin(), tf); //calculate gradient descent iterations for(int featureNumber = 0; featureNumber < featureCount; featureNumber++) { tf2.SetFeatureNumber(featureNumber); float totalCost = thrust::transform_reduce(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(trainingDataCount), tf2, 0.0f, thrust::plus<float>()); if (!IsValidNumber(totalCost)) { i = gdIterationCount; break; } float regularizationTerm = 1 - (learningRate * (regularizationParam / trainingDataCount)); hv_hypothesis[featureNumber] = (hv_hypothesis[featureNumber] * regularizationTerm) - learningRate * (totalCost / trainingDataCount); } //Copy the theta back to the device vector dv_hypothesis = hv_hypothesis; } //copy the hypothesis into the result buffer thrust::copy(hv_hypothesis.begin(), hv_hypothesis.end(), result); return 0; } // //makes prediction for the given test data based on the hypothesis. Also applies feature normalization. // extern int Predict(float* testData, unsigned int featureCount, unsigned int testDataCount, float* hypothesis, float * mean, float * std, float * result) { featureCount++; thrust::host_vector<float> hv_testData(testDataCount * featureCount); AddBiasTerm(testData, &hv_testData[0], testDataCount, featureCount); //Allocate device memory thrust::device_vector<float> dv_hypothesis(hypothesis, hypothesis + featureCount); thrust::device_vector<float> dv_testData = hv_testData; thrust::device_vector<float> dv_result(testDataCount); thrust::device_vector<float> dv_mean(mean, mean + featureCount); thrust::device_vector<float> dv_std(std, std + featureCount); //Normalize features float * pdv_hypothesis = thrust::raw_pointer_cast( &dv_hypothesis[0] ); float * pdv_testData = thrust::raw_pointer_cast( &dv_testData[0] ); NormalizeFeaturesByMeanAndStd(testDataCount, pdv_testData, dv_mean, dv_std); //Predict PredictFunctor predictFunctor(pdv_testData, pdv_hypothesis, featureCount); thrust::transform(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(testDataCount), dv_result.begin(), predictFunctor); //copy the result from device memory into the result buffer thrust::copy(dv_result.begin(), dv_result.end(), result); return 0; }
51ad1ab7bf66ec559eed95ad05263ed00b3081bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include "caffe/layers/fast_rcnn_layers.hpp" namespace caffe { template <typename Dtype> __global__ void SmoothL1Forward(const int n, const Dtype* in, Dtype* out) { // f(x) = 0.5 * x^2 if |x| < 1 // |x| - 0.5 otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1) { out[index] = 0.5 * val * val; } else { out[index] = abs_val - 0.5; } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int count = bottom[0]->count(); caffe_gpu_sub( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), diff_.mutable_gpu_data()); // d := b0 - b1 if (has_weights_) { caffe_gpu_mul( count, bottom[2]->gpu_data(), diff_.gpu_data(), diff_.mutable_gpu_data()); // d := w * (b0 - b1) } hipLaunchKernelGGL(( SmoothL1Forward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, diff_.gpu_data(), errors_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; Dtype loss; caffe_gpu_asum(count, errors_.gpu_data(), &loss); top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num(); } template <typename Dtype> __global__ void SmoothL1Backward(const int n, const Dtype* in, Dtype* out) { // f'(x) = x if |x| < 1 // = sign(x) otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1) { out[index] = val; } else { out[index] = (Dtype(0) < val) - (val < Dtype(0)); } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int count = diff_.count(); hipLaunchKernelGGL(( SmoothL1Backward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, diff_.gpu_data(), diff_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); caffe_gpu_axpby( bottom[i]->count(), // count alpha, // alpha diff_.gpu_data(), // x Dtype(0), // beta bottom[i]->mutable_gpu_diff()); // y } } } INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossLayer); } // namespace caffe
51ad1ab7bf66ec559eed95ad05263ed00b3081bc.cu
// ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include "caffe/layers/fast_rcnn_layers.hpp" namespace caffe { template <typename Dtype> __global__ void SmoothL1Forward(const int n, const Dtype* in, Dtype* out) { // f(x) = 0.5 * x^2 if |x| < 1 // |x| - 0.5 otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1) { out[index] = 0.5 * val * val; } else { out[index] = abs_val - 0.5; } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int count = bottom[0]->count(); caffe_gpu_sub( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), diff_.mutable_gpu_data()); // d := b0 - b1 if (has_weights_) { caffe_gpu_mul( count, bottom[2]->gpu_data(), diff_.gpu_data(), diff_.mutable_gpu_data()); // d := w * (b0 - b1) } SmoothL1Forward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, diff_.gpu_data(), errors_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; Dtype loss; caffe_gpu_asum(count, errors_.gpu_data(), &loss); top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num(); } template <typename Dtype> __global__ void SmoothL1Backward(const int n, const Dtype* in, Dtype* out) { // f'(x) = x if |x| < 1 // = sign(x) otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1) { out[index] = val; } else { out[index] = (Dtype(0) < val) - (val < Dtype(0)); } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int count = diff_.count(); SmoothL1Backward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, diff_.gpu_data(), diff_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); caffe_gpu_axpby( bottom[i]->count(), // count alpha, // alpha diff_.gpu_data(), // x Dtype(0), // beta bottom[i]->mutable_gpu_diff()); // y } } } INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossLayer); } // namespace caffe
38c6e9cb13908909f7dd6f4a3d0c4d6544f65e77.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <rocblas.h> #include <cudnn.h> #include <opencv2/opencv.hpp> #define getMillisecond(start, end) \ (end.tv_sec-start.tv_sec)*1000 + \ (end.tv_usec-start.tv_usec)/1000.0 #define checkCUDA(expression) \ { \ hipError_t status = (expression); \ if (status != hipSuccess) { \ printf("Error on line %d: err code %d (%s)\n", \ __LINE__, status, hipGetErrorString(status)); \ exit(EXIT_FAILURE); \ } \ } #define checkCUBLAS(expression) \ { \ hipblasStatus_t status = (expression); \ if (status != HIPBLAS_STATUS_SUCCESS) { \ printf("Error on line %d: err code %d\n", \ __LINE__, status); \ exit(EXIT_FAILURE); \ } \ } #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ printf("Error on line %d: err code %d (%s)\n", \ __LINE__, status, cudnnGetErrorString(status)); \ exit(EXIT_FAILURE); \ } \ } cv::Mat load_image(const char* image_path) { cv::Mat image = cv::imread(image_path, CV_LOAD_IMAGE_GRAYSCALE); image.convertTo(image, CV_32FC1); cv::normalize(image, image, 0, 1, cv::NORM_MINMAX); return image; } bool load_weight(float* p_weight, int elemCount, const char* filename) { // Read weights file FILE *fp = fopen(filename, "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", filename); return false; } fread(p_weight, sizeof(float), elemCount, fp); fclose(fp); return true; } int main(int argc, char const *argv[]) { hipblasHandle_t cublas; cudnnHandle_t cudnn; checkCUBLAS(hipblasCreate(&cublas)); checkCUDNN(cudnnCreate(&cudnn)); int batch_size = 1; size_t workspace_bytes = 0; cv::Mat image = load_image("image/input.pgm"); /* Input */ int input_dim = 28; int input_channels = 1; float* d_input{NULL}; int input_bytes = batch_size * input_channels * input_dim * input_dim * sizeof(float); hipMalloc(&d_input, input_bytes); hipMemcpy(d_input, image.ptr<float>(0), input_bytes, hipMemcpyHostToDevice); // Input Tensor cudnnTensorDescriptor_t input_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(input_descriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/batch_size, /*channels=*/input_channels, /*height=*/input_dim, /*width=*/input_dim)); /* Layer 1. Convolution */ int l1_kernel_dim = 5; int l1_pad = 0; int l1_stride = 1; int l1_dilation = 1; int l1_out_dim = 24; int l1_out_channels = 20; char* l1_weight_file = "pretrained/conv1.bin"; char* l1_weight_bias_file = "pretrained/conv1.bias.bin"; // Describing Operands cudnnTensorDescriptor_t l1_out_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&l1_out_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(l1_out_descriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/batch_size, /*channels=*/l1_out_channels, /*height=*/l1_out_dim, /*width=*/l1_out_dim)); cudnnTensorDescriptor_t l1_kernel_bias_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&l1_kernel_bias_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(l1_kernel_bias_descriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/l1_out_channels, /*height=*/1, /*width=*/1)); cudnnFilterDescriptor_t l1_kernel_descriptor; checkCUDNN(cudnnCreateFilterDescriptor(&l1_kernel_descriptor)); checkCUDNN(cudnnSetFilter4dDescriptor(l1_kernel_descriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/l1_out_channels, /*in_channels=*/input_channels, /*kernel_height=*/l1_kernel_dim, /*kernel_width=*/l1_kernel_dim)); // Describing the Convolution Kernel cudnnConvolutionDescriptor_t l1_convolution_descriptor; checkCUDNN(cudnnCreateConvolutionDescriptor(&l1_convolution_descriptor)); checkCUDNN(cudnnSetConvolution2dDescriptor(l1_convolution_descriptor, /*pad_height=*/l1_pad, /*pad_width=*/l1_pad, /*vertical_stride=*/l1_stride, /*horizontal_stride=*/l1_stride, /*dilation_height=*/l1_dilation, /*dilation_width=*/l1_dilation, /*mode=*/CUDNN_CROSS_CORRELATION, /*dataType=*/CUDNN_DATA_FLOAT )); cudnnConvolutionFwdAlgo_t l1_convolution_algorithm; checkCUDNN( cudnnGetConvolutionForwardAlgorithm(cudnn, input_descriptor, l1_kernel_descriptor, l1_convolution_descriptor, l1_out_descriptor, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, /*memoryLimitInBytes=*/0, &l1_convolution_algorithm)); size_t l1_workspace_bytes = 0; checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn, input_descriptor, l1_kernel_descriptor, l1_convolution_descriptor, l1_out_descriptor, l1_convolution_algorithm, &l1_workspace_bytes)); workspace_bytes = max(workspace_bytes, l1_workspace_bytes); /* Allocating Memory for Layer 1 */ int l1_out_bytes = batch_size * l1_out_channels * l1_out_dim * l1_out_dim * sizeof(float); float* d_l1_output{NULL}; hipMalloc(&d_l1_output, l1_out_bytes); hipMemset(d_l1_output, 0, l1_out_bytes); int l1_kernel_bytes = input_channels * l1_out_channels * l1_kernel_dim * l1_kernel_dim * sizeof(float); float* l1_kernel = (float*)malloc (l1_kernel_bytes); float* l1_kernel_bias = (float*)malloc (l1_out_channels * sizeof(float)); // load pretrained weight load_weight(l1_kernel, input_channels * l1_out_channels * l1_kernel_dim * l1_kernel_dim, l1_weight_file); load_weight(l1_kernel_bias, l1_out_channels, l1_weight_bias_file); float* d_l1_kernel{NULL}; hipMalloc(&d_l1_kernel, l1_kernel_bytes); hipMemcpy(d_l1_kernel, l1_kernel, l1_kernel_bytes, hipMemcpyHostToDevice); float* d_l1_kernel_bias{NULL}; hipMalloc(&d_l1_kernel_bias, l1_out_channels * sizeof(float)); hipMemcpy(d_l1_kernel_bias, l1_kernel_bias, l1_out_channels * sizeof(float), hipMemcpyHostToDevice); /* Layer 2. Max Pooling */ int l2_pool_dim = 2; int l2_pad = 0; int l2_stride = 2; int l2_out_dim = (l1_out_dim + l2_pad*2) / l2_stride; int l2_out_channels = l1_out_channels; // Describing Operands cudnnTensorDescriptor_t l2_out_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&l2_out_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(l2_out_descriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/batch_size, /*channels=*/l2_out_channels, /*height=*/l2_out_dim, /*width=*/l2_out_dim)); cudnnPoolingDescriptor_t l2_pool_descriptor; checkCUDNN(cudnnCreatePoolingDescriptor(&l2_pool_descriptor)); checkCUDNN(cudnnSetPooling2dDescriptor(l2_pool_descriptor, /*poolingMode=*/CUDNN_POOLING_MAX, /*NanPropagationMode=*/CUDNN_PROPAGATE_NAN, l2_pool_dim, l2_pool_dim, l2_pad, l2_pad, l2_stride, l2_stride)); /* Allocating Memory for Layer 2 */ int l2_out_bytes = batch_size * l2_out_channels * l2_out_dim * l2_out_dim * sizeof(float); float* d_l2_output{NULL}; hipMalloc(&d_l2_output, l2_out_bytes); hipMemset(d_l2_output, 0, l2_out_bytes); /* Layer 3. Convolution */ int l3_kernel_dim = int l3_pad = int l3_stride = int l3_dilation = int l3_out_dim = int l3_out_channels = char* l3_weight_file = "pretrained/conv2.bin"; char* l3_weight_bias_file = "pretrained/conv2.bias.bin"; // Describing Operands cudnnTensorDescriptor_t l3_out_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&l3_out_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(l3_out_descriptor, /*format=*/ , /*dataType=*/ , /*batch_size=*/ , /*channels=*/ , /*height=*/ , /*width=*/ )); cudnnTensorDescriptor_t l3_kernel_bias_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&l3_kernel_bias_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(l3_kernel_bias_descriptor, /*format=*/ , /*dataType=*/ , /*batch_size=*/ , /*channels=*/ , /*height=*/ , /*width=*/ )); cudnnFilterDescriptor_t l3_kernel_descriptor; checkCUDNN(cudnnCreateFilterDescriptor(&l3_kernel_descriptor)); checkCUDNN(cudnnSetFilter4dDescriptor(l3_kernel_descriptor, /*dataType=*/ , /*format=*/ , /*out_channels=*/ , /*in_channels=*/ , /*kernel_height=*/ , /*kernel_width=*/ )); // Describing the Convolution Kernel cudnnConvolutionDescriptor_t l3_convolution_descriptor; checkCUDNN(cudnnCreateConvolutionDescriptor(&l3_convolution_descriptor)); checkCUDNN(cudnnSetConvolution2dDescriptor(l3_convolution_descriptor, /*pad_height=*/ , /*pad_width=*/ , /*vertical_stride=*/ , /*horizontal_stride=*/ , /*dilation_height=*/ , /*dilation_width=*/ , /*mode=*/ , /*dataType=*/ )); cudnnConvolutionFwdAlgo_t l3_convolution_algorithm; checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnn, , , , , , /*memoryLimitInBytes=*/0, )); size_t l3_workspace_bytes = 0; checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn, , , , , , )); workspace_bytes = max(workspace_bytes, l3_workspace_bytes); /* Allocating Memory for Layer 3 */ int l3_out_bytes = float* d_l3_output{NULL}; hipMalloc(&d_l3_output, l3_out_bytes); hipMemset(d_l3_output, 0, l3_out_bytes); int l3_kernel_bytes = float* l3_kernel = (float*)malloc (l3_kernel_bytes); float* l3_kernel_bias = (float*)malloc ( ); // load pretrained weight load_weight(l3_kernel, , l3_weight_file); load_weight(l3_kernel_bias, , l3_weight_bias_file); float* d_l3_kernel{NULL}; hipMalloc(&d_l3_kernel, l3_kernel_bytes); hipMemcpy(d_l3_kernel, l3_kernel, l3_kernel_bytes, hipMemcpyHostToDevice); float* d_l3_kernel_bias{NULL}; hipMalloc(&d_l3_kernel_bias, ); hipMemcpy(d_l3_kernel_bias, l3_kernel_bias, , hipMemcpyHostToDevice); /* Layer 4. Max Pooling */ int l4_pool_dim = int l4_pad = int l4_stride = int l4_out_dim = int l4_out_channels = // Describing Operands cudnnTensorDescriptor_t l4_out_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&l4_out_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(l4_out_descriptor, /*format=*/ , /*dataType=*/ , /*batch_size=*/ , /*channels=*/ , /*height=*/ , /*width=*/ )); cudnnPoolingDescriptor_t l4_pool_descriptor; checkCUDNN(cudnnCreatePoolingDescriptor(&l4_pool_descriptor)); checkCUDNN(cudnnSetPooling2dDescriptor(l4_pool_descriptor, /*poolingMode=*/CUDNN_POOLING_MAX, /*NanPropagationMode=*/CUDNN_PROPAGATE_NAN, , , , , , )); /* Allocating Memory for Layer 2 */ int l4_out_bytes = float* d_l4_output{NULL}; hipMalloc(&d_l4_output, l4_out_bytes); hipMemset(d_l4_output, 0, l4_out_bytes); /* Layer 5. Fully Connected Layer */ int l5_fc_in_dim = (l4_out_channels * l4_out_dim * l4_out_dim); int l5_fc_out_dim = 500; int l5_fc_neuron_size = l5_fc_in_dim * l5_fc_out_dim; char* l5_weight_file = "pretrained/ip1.bin"; char* l5_weight_bias_file = "pretrained/ip1.bias.bin"; cudnnActivationDescriptor_t l5_fc_activation_descriptor; checkCUDNN(cudnnCreateActivationDescriptor(&l5_fc_activation_descriptor)); checkCUDNN(cudnnSetActivationDescriptor(l5_fc_activation_descriptor, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); cudnnTensorDescriptor_t l5_relu_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&l5_relu_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(l5_relu_descriptor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, l5_fc_out_dim, 1, 1)); /* Allocating memory for Layer 5 */ float* l5_fc_neuron = (float*)malloc (l5_fc_neuron_size * sizeof(float)); load_weight(l5_fc_neuron, l5_fc_neuron_size, l5_weight_file); float* l5_fc_neuron_bias = (float*)malloc (l5_fc_out_dim * sizeof(float)); load_weight(l5_fc_neuron_bias, l5_fc_out_dim, l5_weight_bias_file); float* d_l5_fc_neuron{NULL}; checkCUDA(hipMalloc(&d_l5_fc_neuron, l5_fc_neuron_size * sizeof(float))); checkCUDA(hipMemcpy(d_l5_fc_neuron, l5_fc_neuron, l5_fc_neuron_size * sizeof(float), hipMemcpyHostToDevice)); float* d_l5_fc_neuron_bias{NULL}; checkCUDA(hipMalloc(&d_l5_fc_neuron_bias, l5_fc_out_dim * sizeof(float))); checkCUDA(hipMemcpy(d_l5_fc_neuron_bias, l5_fc_neuron_bias, l5_fc_out_dim * sizeof(float), hipMemcpyHostToDevice)); float* d_l5_fc_output{NULL}; checkCUDA(hipMalloc(&d_l5_fc_output, batch_size * l5_fc_out_dim * sizeof(float))); checkCUDA(hipMemset(d_l5_fc_output, 0, l5_fc_out_dim * sizeof(float))); float* d_l5_relu_output{NULL}; checkCUDA(hipMalloc(&d_l5_relu_output, batch_size * l5_fc_out_dim * sizeof(float))); checkCUDA(hipMemset(d_l5_relu_output, 0, l5_fc_out_dim * sizeof(float))); /* Layer 6. Fully Connected Layer */ int l6_fc_in_dim = l5_fc_out_dim; int l6_fc_out_dim = 10; int l6_fc_neuron_size = l6_fc_in_dim * l6_fc_out_dim; char* l6_weight_file = "pretrained/ip2.bin"; char* l6_weight_bias_file = "pretrained/ip2.bias.bin"; cudnnActivationDescriptor_t l6_fc_activation_descriptor; checkCUDNN(cudnnCreateActivationDescriptor(&l6_fc_activation_descriptor)); checkCUDNN(cudnnSetActivationDescriptor(l6_fc_activation_descriptor, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); cudnnTensorDescriptor_t l6_softmax_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&l6_softmax_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(l6_softmax_descriptor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, l6_fc_out_dim, 1, 1)); /* Allocating memory for Layer 6 */ float* l6_fc_neuron = (float*)malloc (l6_fc_neuron_size * sizeof(float)); load_weight(l6_fc_neuron, l6_fc_neuron_size, l6_weight_file); float* l6_fc_neuron_bias = (float*)malloc (l6_fc_out_dim * sizeof(float)); load_weight(l6_fc_neuron_bias, l6_fc_out_dim, l6_weight_bias_file); float* l6_softmax_output = (float*)malloc (l6_fc_out_dim * sizeof(float)); float* d_l6_fc_neuron{NULL}; checkCUDA(hipMalloc(&d_l6_fc_neuron, l6_fc_neuron_size * sizeof(float))); checkCUDA(hipMemcpy(d_l6_fc_neuron, l6_fc_neuron, l6_fc_neuron_size * sizeof(float), hipMemcpyHostToDevice)); float* d_l6_fc_neuron_bias{NULL}; checkCUDA(hipMalloc(&d_l6_fc_neuron_bias, l6_fc_out_dim * sizeof(float))); checkCUDA(hipMemcpy(d_l6_fc_neuron_bias, l6_fc_neuron_bias, l6_fc_out_dim * sizeof(float), hipMemcpyHostToDevice)); float* d_l6_fc_output{NULL}; checkCUDA(hipMalloc(&d_l6_fc_output, batch_size * l6_fc_out_dim * sizeof(float))); checkCUDA(hipMemset(d_l6_fc_output, 0, l6_fc_out_dim * sizeof(float))); float* d_l6_softmax_output{NULL}; checkCUDA(hipMalloc(&d_l6_softmax_output, batch_size * l6_fc_out_dim * sizeof(float))); checkCUDA(hipMemset(d_l6_softmax_output, 0, l6_fc_out_dim * sizeof(float))); /* Forward */ struct timeval start, end; gettimeofday(&start, NULL); // Allocating Memory for Workspace void* d_workspace{NULL}; hipMalloc(&d_workspace, workspace_bytes); // One vector for FC float *d_onevec{NULL}; checkCUDA(hipMalloc(&d_onevec, batch_size * sizeof(float))); checkCUDA(hipMemset(d_onevec, 1, batch_size * sizeof(float))); /* Layer 1. Convolution */ const float alpha = 1, beta = 0; checkCUDNN(cudnnConvolutionForward(cudnn, &alpha, input_descriptor, /*input device mem=*/d_input, l1_kernel_descriptor, /*kernel device mem*/d_l1_kernel, l1_convolution_descriptor, l1_convolution_algorithm, d_workspace, workspace_bytes, &beta, l1_out_descriptor, /*output device mem=*/d_l1_output)); // Add bias checkCUDNN(cudnnAddTensor(cudnn, &alpha, l1_kernel_bias_descriptor, d_l1_kernel_bias, &alpha, l1_out_descriptor, d_l1_output)); /* Layer 2. Max Pooling */ checkCUDNN(cudnnPoolingForward(cudnn, l2_pool_descriptor, &alpha, l1_out_descriptor, d_l1_output, &beta, l2_out_descriptor, d_l2_output)); /* Layer 3. Convolution */ checkCUDNN(cudnnConvolutionForward(cudnn, &alpha, l2_out_descriptor, /*input device mem=*/d_l2_output, l3_kernel_descriptor, /*kernel device mem*/d_l3_kernel, l3_convolution_descriptor, l3_convolution_algorithm, d_workspace, workspace_bytes, &beta, l3_out_descriptor, /*output device mem=*/d_l3_output)); // Add bias checkCUDNN(cudnnAddTensor(cudnn, &alpha, l3_kernel_bias_descriptor, d_l3_kernel_bias, &alpha, l3_out_descriptor, d_l3_output)); /* Layer 4. Max Pooling */ checkCUDNN(cudnnPoolingForward(cudnn, l4_pool_descriptor, &alpha, l3_out_descriptor, d_l3_output, &beta, l4_out_descriptor, d_l4_output)); /* Layer 5. Fully Connected */ // FC1 layer // Forward propagate neurons using weights checkCUBLAS(hipblasSgemm(cublas, HIPBLAS_OP_T, HIPBLAS_OP_N, l5_fc_out_dim, batch_size, l5_fc_in_dim, &alpha, d_l5_fc_neuron, l5_fc_in_dim, d_l4_output, l5_fc_in_dim, &beta, d_l5_fc_output, l5_fc_out_dim)); // Add bias using GEMM's "beta" checkCUBLAS(hipblasSgemm(cublas, HIPBLAS_OP_N, HIPBLAS_OP_N, l5_fc_out_dim, batch_size, 1, &alpha, d_l5_fc_neuron_bias, l5_fc_out_dim, d_onevec, 1, &alpha, d_l5_fc_output, l5_fc_out_dim)); // ReLU activation checkCUDNN(cudnnActivationForward(cudnn, l5_fc_activation_descriptor, &alpha, l5_relu_descriptor, d_l5_fc_output, &beta, l5_relu_descriptor, d_l5_relu_output)); /* Layer 6. Fully Connected (Softmax) */ // FC2 layer checkCUBLAS(hipblasSgemm(cublas, HIPBLAS_OP_T, HIPBLAS_OP_N, l6_fc_out_dim, batch_size, l6_fc_in_dim, &alpha, d_l6_fc_neuron, l6_fc_in_dim, d_l5_relu_output, l6_fc_in_dim, &beta, d_l6_fc_output, l6_fc_out_dim)); // Add bias using GEMM's "beta" checkCUBLAS(hipblasSgemm(cublas, HIPBLAS_OP_N, HIPBLAS_OP_N, l6_fc_out_dim, batch_size, 1, &alpha, d_l6_fc_neuron_bias, l6_fc_out_dim, d_onevec, 1, &alpha, d_l6_fc_output, l6_fc_out_dim)); // Softmax loss checkCUDNN(cudnnSoftmaxForward(cudnn, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, l6_softmax_descriptor, d_l6_fc_output, &beta, l6_softmax_descriptor, d_l6_softmax_output)); /* Show result */ checkCUDA(hipMemcpy(l6_softmax_output, d_l6_softmax_output, l6_fc_out_dim * sizeof(float), hipMemcpyDeviceToHost)); gettimeofday(&end, NULL); int i, chosen = 0; for (i = 0; i < l6_fc_out_dim; i++) { printf("%d: %.2f\n", i, l6_softmax_output[i]); if (l6_softmax_output[i] > l6_softmax_output[chosen]) chosen = i; } printf("\nPredict: %d\n", chosen); printf("Time: %f\n", getMillisecond(start, end)); /* Free */ // input cudnnDestroyTensorDescriptor(input_descriptor); hipFree(d_input); // Layer 1 cudnnDestroyTensorDescriptor(l1_out_descriptor); cudnnDestroyFilterDescriptor(l1_kernel_descriptor); cudnnDestroyTensorDescriptor(l1_kernel_bias_descriptor); cudnnDestroyConvolutionDescriptor(l1_convolution_descriptor); hipFree(d_l1_output); hipFree(d_l1_kernel); hipFree(d_l1_kernel_bias); free(l1_kernel); free(l1_kernel_bias); // Layer 2 cudnnDestroyTensorDescriptor(l2_out_descriptor); cudnnDestroyPoolingDescriptor(l2_pool_descriptor); hipFree(d_l2_output); // Layer 3 cudnnDestroyTensorDescriptor(l3_out_descriptor); cudnnDestroyFilterDescriptor(l3_kernel_descriptor); cudnnDestroyTensorDescriptor(l3_kernel_bias_descriptor); cudnnDestroyConvolutionDescriptor(l3_convolution_descriptor); hipFree(d_l3_output); hipFree(d_l3_kernel); hipFree(d_l3_kernel_bias); free(l3_kernel); free(l3_kernel_bias); // Layer 4 cudnnDestroyTensorDescriptor(l4_out_descriptor); cudnnDestroyPoolingDescriptor(l4_pool_descriptor); hipFree(d_l4_output); // Layer 5 checkCUDNN(cudnnDestroyActivationDescriptor(l5_fc_activation_descriptor)); checkCUDNN(cudnnDestroyTensorDescriptor(l5_relu_descriptor)); checkCUDA(hipFree(d_l5_fc_output)); checkCUDA(hipFree(d_l5_fc_neuron)); checkCUDA(hipFree(d_l5_fc_neuron_bias)); checkCUDA(hipFree(d_l5_relu_output)); free(l5_fc_neuron); free(l5_fc_neuron_bias); // Layer 6 checkCUDNN(cudnnDestroyActivationDescriptor(l6_fc_activation_descriptor)); checkCUDNN(cudnnDestroyTensorDescriptor(l6_softmax_descriptor)); checkCUDA(hipFree(d_l6_fc_output)); checkCUDA(hipFree(d_l6_fc_neuron)); checkCUDA(hipFree(d_l6_fc_neuron_bias)); checkCUDA(hipFree(d_l6_softmax_output)); free(l6_fc_neuron); free(l6_fc_neuron_bias); free(l6_softmax_output); // etc hipFree(d_onevec); hipFree(d_workspace); hipblasDestroy(cublas); cudnnDestroy(cudnn); return 0; }
38c6e9cb13908909f7dd6f4a3d0c4d6544f65e77.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <cublas_v2.h> #include <cudnn.h> #include <opencv2/opencv.hpp> #define getMillisecond(start, end) \ (end.tv_sec-start.tv_sec)*1000 + \ (end.tv_usec-start.tv_usec)/1000.0 #define checkCUDA(expression) \ { \ cudaError_t status = (expression); \ if (status != cudaSuccess) { \ printf("Error on line %d: err code %d (%s)\n", \ __LINE__, status, cudaGetErrorString(status)); \ exit(EXIT_FAILURE); \ } \ } #define checkCUBLAS(expression) \ { \ cublasStatus_t status = (expression); \ if (status != CUBLAS_STATUS_SUCCESS) { \ printf("Error on line %d: err code %d\n", \ __LINE__, status); \ exit(EXIT_FAILURE); \ } \ } #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ printf("Error on line %d: err code %d (%s)\n", \ __LINE__, status, cudnnGetErrorString(status)); \ exit(EXIT_FAILURE); \ } \ } cv::Mat load_image(const char* image_path) { cv::Mat image = cv::imread(image_path, CV_LOAD_IMAGE_GRAYSCALE); image.convertTo(image, CV_32FC1); cv::normalize(image, image, 0, 1, cv::NORM_MINMAX); return image; } bool load_weight(float* p_weight, int elemCount, const char* filename) { // Read weights file FILE *fp = fopen(filename, "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", filename); return false; } fread(p_weight, sizeof(float), elemCount, fp); fclose(fp); return true; } int main(int argc, char const *argv[]) { cublasHandle_t cublas; cudnnHandle_t cudnn; checkCUBLAS(cublasCreate(&cublas)); checkCUDNN(cudnnCreate(&cudnn)); int batch_size = 1; size_t workspace_bytes = 0; cv::Mat image = load_image("image/input.pgm"); /* Input */ int input_dim = 28; int input_channels = 1; float* d_input{NULL}; int input_bytes = batch_size * input_channels * input_dim * input_dim * sizeof(float); cudaMalloc(&d_input, input_bytes); cudaMemcpy(d_input, image.ptr<float>(0), input_bytes, cudaMemcpyHostToDevice); // Input Tensor cudnnTensorDescriptor_t input_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(input_descriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/batch_size, /*channels=*/input_channels, /*height=*/input_dim, /*width=*/input_dim)); /* Layer 1. Convolution */ int l1_kernel_dim = 5; int l1_pad = 0; int l1_stride = 1; int l1_dilation = 1; int l1_out_dim = 24; int l1_out_channels = 20; char* l1_weight_file = "pretrained/conv1.bin"; char* l1_weight_bias_file = "pretrained/conv1.bias.bin"; // Describing Operands cudnnTensorDescriptor_t l1_out_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&l1_out_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(l1_out_descriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/batch_size, /*channels=*/l1_out_channels, /*height=*/l1_out_dim, /*width=*/l1_out_dim)); cudnnTensorDescriptor_t l1_kernel_bias_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&l1_kernel_bias_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(l1_kernel_bias_descriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/l1_out_channels, /*height=*/1, /*width=*/1)); cudnnFilterDescriptor_t l1_kernel_descriptor; checkCUDNN(cudnnCreateFilterDescriptor(&l1_kernel_descriptor)); checkCUDNN(cudnnSetFilter4dDescriptor(l1_kernel_descriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/l1_out_channels, /*in_channels=*/input_channels, /*kernel_height=*/l1_kernel_dim, /*kernel_width=*/l1_kernel_dim)); // Describing the Convolution Kernel cudnnConvolutionDescriptor_t l1_convolution_descriptor; checkCUDNN(cudnnCreateConvolutionDescriptor(&l1_convolution_descriptor)); checkCUDNN(cudnnSetConvolution2dDescriptor(l1_convolution_descriptor, /*pad_height=*/l1_pad, /*pad_width=*/l1_pad, /*vertical_stride=*/l1_stride, /*horizontal_stride=*/l1_stride, /*dilation_height=*/l1_dilation, /*dilation_width=*/l1_dilation, /*mode=*/CUDNN_CROSS_CORRELATION, /*dataType=*/CUDNN_DATA_FLOAT )); cudnnConvolutionFwdAlgo_t l1_convolution_algorithm; checkCUDNN( cudnnGetConvolutionForwardAlgorithm(cudnn, input_descriptor, l1_kernel_descriptor, l1_convolution_descriptor, l1_out_descriptor, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, /*memoryLimitInBytes=*/0, &l1_convolution_algorithm)); size_t l1_workspace_bytes = 0; checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn, input_descriptor, l1_kernel_descriptor, l1_convolution_descriptor, l1_out_descriptor, l1_convolution_algorithm, &l1_workspace_bytes)); workspace_bytes = max(workspace_bytes, l1_workspace_bytes); /* Allocating Memory for Layer 1 */ int l1_out_bytes = batch_size * l1_out_channels * l1_out_dim * l1_out_dim * sizeof(float); float* d_l1_output{NULL}; cudaMalloc(&d_l1_output, l1_out_bytes); cudaMemset(d_l1_output, 0, l1_out_bytes); int l1_kernel_bytes = input_channels * l1_out_channels * l1_kernel_dim * l1_kernel_dim * sizeof(float); float* l1_kernel = (float*)malloc (l1_kernel_bytes); float* l1_kernel_bias = (float*)malloc (l1_out_channels * sizeof(float)); // load pretrained weight load_weight(l1_kernel, input_channels * l1_out_channels * l1_kernel_dim * l1_kernel_dim, l1_weight_file); load_weight(l1_kernel_bias, l1_out_channels, l1_weight_bias_file); float* d_l1_kernel{NULL}; cudaMalloc(&d_l1_kernel, l1_kernel_bytes); cudaMemcpy(d_l1_kernel, l1_kernel, l1_kernel_bytes, cudaMemcpyHostToDevice); float* d_l1_kernel_bias{NULL}; cudaMalloc(&d_l1_kernel_bias, l1_out_channels * sizeof(float)); cudaMemcpy(d_l1_kernel_bias, l1_kernel_bias, l1_out_channels * sizeof(float), cudaMemcpyHostToDevice); /* Layer 2. Max Pooling */ int l2_pool_dim = 2; int l2_pad = 0; int l2_stride = 2; int l2_out_dim = (l1_out_dim + l2_pad*2) / l2_stride; int l2_out_channels = l1_out_channels; // Describing Operands cudnnTensorDescriptor_t l2_out_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&l2_out_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(l2_out_descriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/batch_size, /*channels=*/l2_out_channels, /*height=*/l2_out_dim, /*width=*/l2_out_dim)); cudnnPoolingDescriptor_t l2_pool_descriptor; checkCUDNN(cudnnCreatePoolingDescriptor(&l2_pool_descriptor)); checkCUDNN(cudnnSetPooling2dDescriptor(l2_pool_descriptor, /*poolingMode=*/CUDNN_POOLING_MAX, /*NanPropagationMode=*/CUDNN_PROPAGATE_NAN, l2_pool_dim, l2_pool_dim, l2_pad, l2_pad, l2_stride, l2_stride)); /* Allocating Memory for Layer 2 */ int l2_out_bytes = batch_size * l2_out_channels * l2_out_dim * l2_out_dim * sizeof(float); float* d_l2_output{NULL}; cudaMalloc(&d_l2_output, l2_out_bytes); cudaMemset(d_l2_output, 0, l2_out_bytes); /* Layer 3. Convolution */ int l3_kernel_dim = int l3_pad = int l3_stride = int l3_dilation = int l3_out_dim = int l3_out_channels = char* l3_weight_file = "pretrained/conv2.bin"; char* l3_weight_bias_file = "pretrained/conv2.bias.bin"; // Describing Operands cudnnTensorDescriptor_t l3_out_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&l3_out_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(l3_out_descriptor, /*format=*/ , /*dataType=*/ , /*batch_size=*/ , /*channels=*/ , /*height=*/ , /*width=*/ )); cudnnTensorDescriptor_t l3_kernel_bias_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&l3_kernel_bias_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(l3_kernel_bias_descriptor, /*format=*/ , /*dataType=*/ , /*batch_size=*/ , /*channels=*/ , /*height=*/ , /*width=*/ )); cudnnFilterDescriptor_t l3_kernel_descriptor; checkCUDNN(cudnnCreateFilterDescriptor(&l3_kernel_descriptor)); checkCUDNN(cudnnSetFilter4dDescriptor(l3_kernel_descriptor, /*dataType=*/ , /*format=*/ , /*out_channels=*/ , /*in_channels=*/ , /*kernel_height=*/ , /*kernel_width=*/ )); // Describing the Convolution Kernel cudnnConvolutionDescriptor_t l3_convolution_descriptor; checkCUDNN(cudnnCreateConvolutionDescriptor(&l3_convolution_descriptor)); checkCUDNN(cudnnSetConvolution2dDescriptor(l3_convolution_descriptor, /*pad_height=*/ , /*pad_width=*/ , /*vertical_stride=*/ , /*horizontal_stride=*/ , /*dilation_height=*/ , /*dilation_width=*/ , /*mode=*/ , /*dataType=*/ )); cudnnConvolutionFwdAlgo_t l3_convolution_algorithm; checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnn, , , , , , /*memoryLimitInBytes=*/0, )); size_t l3_workspace_bytes = 0; checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn, , , , , , )); workspace_bytes = max(workspace_bytes, l3_workspace_bytes); /* Allocating Memory for Layer 3 */ int l3_out_bytes = float* d_l3_output{NULL}; cudaMalloc(&d_l3_output, l3_out_bytes); cudaMemset(d_l3_output, 0, l3_out_bytes); int l3_kernel_bytes = float* l3_kernel = (float*)malloc (l3_kernel_bytes); float* l3_kernel_bias = (float*)malloc ( ); // load pretrained weight load_weight(l3_kernel, , l3_weight_file); load_weight(l3_kernel_bias, , l3_weight_bias_file); float* d_l3_kernel{NULL}; cudaMalloc(&d_l3_kernel, l3_kernel_bytes); cudaMemcpy(d_l3_kernel, l3_kernel, l3_kernel_bytes, cudaMemcpyHostToDevice); float* d_l3_kernel_bias{NULL}; cudaMalloc(&d_l3_kernel_bias, ); cudaMemcpy(d_l3_kernel_bias, l3_kernel_bias, , cudaMemcpyHostToDevice); /* Layer 4. Max Pooling */ int l4_pool_dim = int l4_pad = int l4_stride = int l4_out_dim = int l4_out_channels = // Describing Operands cudnnTensorDescriptor_t l4_out_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&l4_out_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(l4_out_descriptor, /*format=*/ , /*dataType=*/ , /*batch_size=*/ , /*channels=*/ , /*height=*/ , /*width=*/ )); cudnnPoolingDescriptor_t l4_pool_descriptor; checkCUDNN(cudnnCreatePoolingDescriptor(&l4_pool_descriptor)); checkCUDNN(cudnnSetPooling2dDescriptor(l4_pool_descriptor, /*poolingMode=*/CUDNN_POOLING_MAX, /*NanPropagationMode=*/CUDNN_PROPAGATE_NAN, , , , , , )); /* Allocating Memory for Layer 2 */ int l4_out_bytes = float* d_l4_output{NULL}; cudaMalloc(&d_l4_output, l4_out_bytes); cudaMemset(d_l4_output, 0, l4_out_bytes); /* Layer 5. Fully Connected Layer */ int l5_fc_in_dim = (l4_out_channels * l4_out_dim * l4_out_dim); int l5_fc_out_dim = 500; int l5_fc_neuron_size = l5_fc_in_dim * l5_fc_out_dim; char* l5_weight_file = "pretrained/ip1.bin"; char* l5_weight_bias_file = "pretrained/ip1.bias.bin"; cudnnActivationDescriptor_t l5_fc_activation_descriptor; checkCUDNN(cudnnCreateActivationDescriptor(&l5_fc_activation_descriptor)); checkCUDNN(cudnnSetActivationDescriptor(l5_fc_activation_descriptor, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); cudnnTensorDescriptor_t l5_relu_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&l5_relu_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(l5_relu_descriptor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, l5_fc_out_dim, 1, 1)); /* Allocating memory for Layer 5 */ float* l5_fc_neuron = (float*)malloc (l5_fc_neuron_size * sizeof(float)); load_weight(l5_fc_neuron, l5_fc_neuron_size, l5_weight_file); float* l5_fc_neuron_bias = (float*)malloc (l5_fc_out_dim * sizeof(float)); load_weight(l5_fc_neuron_bias, l5_fc_out_dim, l5_weight_bias_file); float* d_l5_fc_neuron{NULL}; checkCUDA(cudaMalloc(&d_l5_fc_neuron, l5_fc_neuron_size * sizeof(float))); checkCUDA(cudaMemcpy(d_l5_fc_neuron, l5_fc_neuron, l5_fc_neuron_size * sizeof(float), cudaMemcpyHostToDevice)); float* d_l5_fc_neuron_bias{NULL}; checkCUDA(cudaMalloc(&d_l5_fc_neuron_bias, l5_fc_out_dim * sizeof(float))); checkCUDA(cudaMemcpy(d_l5_fc_neuron_bias, l5_fc_neuron_bias, l5_fc_out_dim * sizeof(float), cudaMemcpyHostToDevice)); float* d_l5_fc_output{NULL}; checkCUDA(cudaMalloc(&d_l5_fc_output, batch_size * l5_fc_out_dim * sizeof(float))); checkCUDA(cudaMemset(d_l5_fc_output, 0, l5_fc_out_dim * sizeof(float))); float* d_l5_relu_output{NULL}; checkCUDA(cudaMalloc(&d_l5_relu_output, batch_size * l5_fc_out_dim * sizeof(float))); checkCUDA(cudaMemset(d_l5_relu_output, 0, l5_fc_out_dim * sizeof(float))); /* Layer 6. Fully Connected Layer */ int l6_fc_in_dim = l5_fc_out_dim; int l6_fc_out_dim = 10; int l6_fc_neuron_size = l6_fc_in_dim * l6_fc_out_dim; char* l6_weight_file = "pretrained/ip2.bin"; char* l6_weight_bias_file = "pretrained/ip2.bias.bin"; cudnnActivationDescriptor_t l6_fc_activation_descriptor; checkCUDNN(cudnnCreateActivationDescriptor(&l6_fc_activation_descriptor)); checkCUDNN(cudnnSetActivationDescriptor(l6_fc_activation_descriptor, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); cudnnTensorDescriptor_t l6_softmax_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&l6_softmax_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(l6_softmax_descriptor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, l6_fc_out_dim, 1, 1)); /* Allocating memory for Layer 6 */ float* l6_fc_neuron = (float*)malloc (l6_fc_neuron_size * sizeof(float)); load_weight(l6_fc_neuron, l6_fc_neuron_size, l6_weight_file); float* l6_fc_neuron_bias = (float*)malloc (l6_fc_out_dim * sizeof(float)); load_weight(l6_fc_neuron_bias, l6_fc_out_dim, l6_weight_bias_file); float* l6_softmax_output = (float*)malloc (l6_fc_out_dim * sizeof(float)); float* d_l6_fc_neuron{NULL}; checkCUDA(cudaMalloc(&d_l6_fc_neuron, l6_fc_neuron_size * sizeof(float))); checkCUDA(cudaMemcpy(d_l6_fc_neuron, l6_fc_neuron, l6_fc_neuron_size * sizeof(float), cudaMemcpyHostToDevice)); float* d_l6_fc_neuron_bias{NULL}; checkCUDA(cudaMalloc(&d_l6_fc_neuron_bias, l6_fc_out_dim * sizeof(float))); checkCUDA(cudaMemcpy(d_l6_fc_neuron_bias, l6_fc_neuron_bias, l6_fc_out_dim * sizeof(float), cudaMemcpyHostToDevice)); float* d_l6_fc_output{NULL}; checkCUDA(cudaMalloc(&d_l6_fc_output, batch_size * l6_fc_out_dim * sizeof(float))); checkCUDA(cudaMemset(d_l6_fc_output, 0, l6_fc_out_dim * sizeof(float))); float* d_l6_softmax_output{NULL}; checkCUDA(cudaMalloc(&d_l6_softmax_output, batch_size * l6_fc_out_dim * sizeof(float))); checkCUDA(cudaMemset(d_l6_softmax_output, 0, l6_fc_out_dim * sizeof(float))); /* Forward */ struct timeval start, end; gettimeofday(&start, NULL); // Allocating Memory for Workspace void* d_workspace{NULL}; cudaMalloc(&d_workspace, workspace_bytes); // One vector for FC float *d_onevec{NULL}; checkCUDA(cudaMalloc(&d_onevec, batch_size * sizeof(float))); checkCUDA(cudaMemset(d_onevec, 1, batch_size * sizeof(float))); /* Layer 1. Convolution */ const float alpha = 1, beta = 0; checkCUDNN(cudnnConvolutionForward(cudnn, &alpha, input_descriptor, /*input device mem=*/d_input, l1_kernel_descriptor, /*kernel device mem*/d_l1_kernel, l1_convolution_descriptor, l1_convolution_algorithm, d_workspace, workspace_bytes, &beta, l1_out_descriptor, /*output device mem=*/d_l1_output)); // Add bias checkCUDNN(cudnnAddTensor(cudnn, &alpha, l1_kernel_bias_descriptor, d_l1_kernel_bias, &alpha, l1_out_descriptor, d_l1_output)); /* Layer 2. Max Pooling */ checkCUDNN(cudnnPoolingForward(cudnn, l2_pool_descriptor, &alpha, l1_out_descriptor, d_l1_output, &beta, l2_out_descriptor, d_l2_output)); /* Layer 3. Convolution */ checkCUDNN(cudnnConvolutionForward(cudnn, &alpha, l2_out_descriptor, /*input device mem=*/d_l2_output, l3_kernel_descriptor, /*kernel device mem*/d_l3_kernel, l3_convolution_descriptor, l3_convolution_algorithm, d_workspace, workspace_bytes, &beta, l3_out_descriptor, /*output device mem=*/d_l3_output)); // Add bias checkCUDNN(cudnnAddTensor(cudnn, &alpha, l3_kernel_bias_descriptor, d_l3_kernel_bias, &alpha, l3_out_descriptor, d_l3_output)); /* Layer 4. Max Pooling */ checkCUDNN(cudnnPoolingForward(cudnn, l4_pool_descriptor, &alpha, l3_out_descriptor, d_l3_output, &beta, l4_out_descriptor, d_l4_output)); /* Layer 5. Fully Connected */ // FC1 layer // Forward propagate neurons using weights checkCUBLAS(cublasSgemm(cublas, CUBLAS_OP_T, CUBLAS_OP_N, l5_fc_out_dim, batch_size, l5_fc_in_dim, &alpha, d_l5_fc_neuron, l5_fc_in_dim, d_l4_output, l5_fc_in_dim, &beta, d_l5_fc_output, l5_fc_out_dim)); // Add bias using GEMM's "beta" checkCUBLAS(cublasSgemm(cublas, CUBLAS_OP_N, CUBLAS_OP_N, l5_fc_out_dim, batch_size, 1, &alpha, d_l5_fc_neuron_bias, l5_fc_out_dim, d_onevec, 1, &alpha, d_l5_fc_output, l5_fc_out_dim)); // ReLU activation checkCUDNN(cudnnActivationForward(cudnn, l5_fc_activation_descriptor, &alpha, l5_relu_descriptor, d_l5_fc_output, &beta, l5_relu_descriptor, d_l5_relu_output)); /* Layer 6. Fully Connected (Softmax) */ // FC2 layer checkCUBLAS(cublasSgemm(cublas, CUBLAS_OP_T, CUBLAS_OP_N, l6_fc_out_dim, batch_size, l6_fc_in_dim, &alpha, d_l6_fc_neuron, l6_fc_in_dim, d_l5_relu_output, l6_fc_in_dim, &beta, d_l6_fc_output, l6_fc_out_dim)); // Add bias using GEMM's "beta" checkCUBLAS(cublasSgemm(cublas, CUBLAS_OP_N, CUBLAS_OP_N, l6_fc_out_dim, batch_size, 1, &alpha, d_l6_fc_neuron_bias, l6_fc_out_dim, d_onevec, 1, &alpha, d_l6_fc_output, l6_fc_out_dim)); // Softmax loss checkCUDNN(cudnnSoftmaxForward(cudnn, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, l6_softmax_descriptor, d_l6_fc_output, &beta, l6_softmax_descriptor, d_l6_softmax_output)); /* Show result */ checkCUDA(cudaMemcpy(l6_softmax_output, d_l6_softmax_output, l6_fc_out_dim * sizeof(float), cudaMemcpyDeviceToHost)); gettimeofday(&end, NULL); int i, chosen = 0; for (i = 0; i < l6_fc_out_dim; i++) { printf("%d: %.2f\n", i, l6_softmax_output[i]); if (l6_softmax_output[i] > l6_softmax_output[chosen]) chosen = i; } printf("\nPredict: %d\n", chosen); printf("Time: %f\n", getMillisecond(start, end)); /* Free */ // input cudnnDestroyTensorDescriptor(input_descriptor); cudaFree(d_input); // Layer 1 cudnnDestroyTensorDescriptor(l1_out_descriptor); cudnnDestroyFilterDescriptor(l1_kernel_descriptor); cudnnDestroyTensorDescriptor(l1_kernel_bias_descriptor); cudnnDestroyConvolutionDescriptor(l1_convolution_descriptor); cudaFree(d_l1_output); cudaFree(d_l1_kernel); cudaFree(d_l1_kernel_bias); free(l1_kernel); free(l1_kernel_bias); // Layer 2 cudnnDestroyTensorDescriptor(l2_out_descriptor); cudnnDestroyPoolingDescriptor(l2_pool_descriptor); cudaFree(d_l2_output); // Layer 3 cudnnDestroyTensorDescriptor(l3_out_descriptor); cudnnDestroyFilterDescriptor(l3_kernel_descriptor); cudnnDestroyTensorDescriptor(l3_kernel_bias_descriptor); cudnnDestroyConvolutionDescriptor(l3_convolution_descriptor); cudaFree(d_l3_output); cudaFree(d_l3_kernel); cudaFree(d_l3_kernel_bias); free(l3_kernel); free(l3_kernel_bias); // Layer 4 cudnnDestroyTensorDescriptor(l4_out_descriptor); cudnnDestroyPoolingDescriptor(l4_pool_descriptor); cudaFree(d_l4_output); // Layer 5 checkCUDNN(cudnnDestroyActivationDescriptor(l5_fc_activation_descriptor)); checkCUDNN(cudnnDestroyTensorDescriptor(l5_relu_descriptor)); checkCUDA(cudaFree(d_l5_fc_output)); checkCUDA(cudaFree(d_l5_fc_neuron)); checkCUDA(cudaFree(d_l5_fc_neuron_bias)); checkCUDA(cudaFree(d_l5_relu_output)); free(l5_fc_neuron); free(l5_fc_neuron_bias); // Layer 6 checkCUDNN(cudnnDestroyActivationDescriptor(l6_fc_activation_descriptor)); checkCUDNN(cudnnDestroyTensorDescriptor(l6_softmax_descriptor)); checkCUDA(cudaFree(d_l6_fc_output)); checkCUDA(cudaFree(d_l6_fc_neuron)); checkCUDA(cudaFree(d_l6_fc_neuron_bias)); checkCUDA(cudaFree(d_l6_softmax_output)); free(l6_fc_neuron); free(l6_fc_neuron_bias); free(l6_softmax_output); // etc cudaFree(d_onevec); cudaFree(d_workspace); cublasDestroy(cublas); cudnnDestroy(cudnn); return 0; }
9c3953a84018d6c61aed04e08ce1341480d6ad73.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // *********************************************************************** // Numerical discretization on a grid using Gauss-Legendre quadrature // // *********************************************************************** /* MIT License Copyright (c) 2018 Kunal Kumar Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <iostream> #include <math.h> #include <fstream> #include <vector> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/transform.h> using namespace thrust::placeholders; // // Definig the CLOCK for performance testing. // long long wall_clock_time() { #ifdef __linux__ struct timespec tp; clock_gettime(CLOCK_REALTIME, &tp); return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll); #else struct timeval tv; gettimeofday(&tv, NULL); return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll); #endif } // // The parameters to compute the discrete centers (Xn, Yn) of the expansion // functions Psi_n(x,y) are defined here. // The axis limits along the x-axis are given by AXIS_MIN_X and AXIS_MAX_X, the // axis limits along the y-axis are given by AXIS_MIN_Y and AXIS_MAX_Y. // // NOTE: These axis limits are not the limits of integration. The limits of // integration are (Xn - lx/2, Xn + lx/2) and (Yn - ly/2, Yn + ly/2). // // The number of discrete points Xn and Yn are given by NUM_PTS_X and NUM_PTS_Y. // These points can have different sizes and should be a multiple of the // BLOCK_SIZE in the respective dimension. // #define AXIS_MIN_X -1 #define AXIS_MAX_X 1 #define AXIS_MIN_Y -1 #define AXIS_MAX_Y 1 #define NUM_PSI_X 256 #define NUM_PSI_Y 256 // // The CUDA parameters are defined here. // The BLOCK_SIZE parameter for the CUDA x-dimension can be different than the // CUDA y-dimension. // // The Z_BLOCK_SIZE should be a factor of sizeof(Gy)/sizeof(Gy[0]). // #define BLOCK_SIZE 16 #define Z_BLOCK_SIZE 4 #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else static __inline__ __device__ double atomicAdd(double *address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; if (val==0.0) return __longlong_as_double(old); do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +__longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif // // Define the Gauss-Hermite nodes n_i and weights w_i for // the two integrals. The size of Gy and Gx can be different depending on the // required precision of the quadrature approximation. // __constant__ double Gy[8][2] = {{-0.960289856498,0.10122853629},{-0.796666477414,0.222381034453}, {-0.525532409916,0.313706645878},{-0.183434642496,0.362683783378}, {0.183434642496,0.362683783378},{0.525532409916,0.313706645878}, {0.796666477414,0.222381034453},{0.960289856498,0.10122853629}}; __constant__ double Gx[8][2] = {{-0.960289856498,0.10122853629},{-0.796666477414,0.222381034453}, {-0.525532409916,0.313706645878},{-0.183434642496,0.362683783378}, {0.183434642496,0.362683783378},{0.525532409916,0.313706645878}, {0.796666477414,0.222381034453},{0.960289856498,0.10122853629}}; // // Declare the global vectors Xn, Yn, Cn, and Del here. // thrust::host_vector<double> Del; thrust::host_vector<double> Xn; thrust::host_vector<double> Yn; thrust::host_vector<double> Cn(NUM_PSI_X * NUM_PSI_Y); // // Define the function f(x,y) here. // __device__ double Fun(double x, double y) { return exp(-(pow(x,2) + pow(y,2))/0.5); } // // The inner quadrature sum, with weights wx and nodes nx, is computed here. // __device__ double Sum(double *ptrXn, double *ptrDel, double *ny, int *idx) { double a = ptrXn[*idx] - ptrDel[0]/2; double b = ptrXn[*idx] + ptrDel[0]/2; double C3 = 0.5*(b - a); double C4 = 0.5*(b + a); double nx, wx, Q1 = 0.0f;; int Nx = sizeof(Gx)/sizeof(Gx[0]); for (int k=0; k<Nx; k++) { nx = C4 + C3 * Gx[k][0]; wx = Gx[k][1]; Q1 += wx * Fun(nx, *ny); } return C3*Q1; } // // The CUDA kernel is defined here and the outer quadrature sum, with weights // wy and nodes ny, is computed here. // __global__ void Discretization_Kernel(double *ptrXn, double *ptrYn, double *ptrCn, double *ptrDel){ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; int idy = blockIdx.y * BLOCK_SIZE + threadIdx.y; int idz = blockIdx.z * Z_BLOCK_SIZE + threadIdx.z; double c = ptrYn[idy] - ptrDel[1]/2; double d = ptrYn[idy] + ptrDel[1]/2; double C1 = 0.5*(d - c); double C2 = 0.5*(d + c); double ny, wy; int stride_z = blockDim.z * gridDim.z; int Ny = sizeof(Gy)/sizeof(Gy[0]); while (idz < Ny ) { ny = C2 + C1 * Gy[idz][0]; wy = C1 * Gy[idz][1]; atomicAdd( &( ptrCn[idy * NUM_PSI_X + idx]), wy * Sum(ptrXn, ptrDel, &ny, &idx)); idz += stride_z; } } int Kernelcall(){ thrust::device_vector<double> d_Del = Del; thrust::device_vector<double> d_Xn = Xn; thrust::device_vector<double> d_Yn = Yn; thrust::device_vector<double> d_Cn = Cn; double * ptrDel = thrust::raw_pointer_cast(&d_Del[0]); double * ptrXn = thrust::raw_pointer_cast(&d_Xn[0]); double * ptrYn = thrust::raw_pointer_cast(&d_Yn[0]); double * ptrCn = thrust::raw_pointer_cast(&d_Cn[0]); int Ny = sizeof(Gy)/sizeof(Gy[0]); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, Z_BLOCK_SIZE); dim3 dimGrid((Xn.size() + dimBlock.x - 1) / dimBlock.x, (Yn.size() + dimBlock.y - 1) / dimBlock.y, (Ny + dimBlock.z - 1) / dimBlock.z); hipLaunchKernelGGL(( Discretization_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ptrXn, ptrYn, ptrCn, ptrDel); thrust::copy(d_Cn.begin(), d_Cn.end(), Cn.begin()); // // Constant required since <Psi_n(x,y), Psi_m(x,y)> = lx*ly*Delta_nm // double NormSquared = 1/(Del[0]* Del[1]); thrust::transform(Cn.begin(), Cn.end(), Cn.begin(), NormSquared * _1 ); hipError_t rc; rc = hipGetLastError(); if (rc != hipSuccess) printf("Last CUDA error %s\n", hipGetErrorString(rc)); // // Save result to a file // char buffer[32]; // The filename buffer. snprintf(buffer, sizeof(char) * 32, "FILE%i.txt", 0); std::ofstream out(buffer, std::ios_base::app); out.setf(std::ios::scientific); if( !out ) { std::cout << "Couldn't open file." << std::endl; return 1; } for (int i = 0; i < NUM_PSI_Y; i++) { for (int j = 0; j < NUM_PSI_X; j++) { out << Cn[i * NUM_PSI_X + j] <<','; } out <<'\n'; } out.close(); return 0; } // // The main() function. // int main(int argc, char *argv[]){ long long before, after; before = wall_clock_time(); // TIME START double xl = AXIS_MIN_X, xr = AXIS_MAX_X, yl = AXIS_MIN_Y, yr = AXIS_MAX_Y; int xpix = NUM_PSI_X, ypix = NUM_PSI_Y; Del.push_back((xr - xl) / xpix); Del.push_back((yr - yl) / ypix); for(int i=0; i < xpix; i++){ Xn.push_back(xl + Del[0] * (i + 0.5)); } for(int i=0; i < ypix; i++){ Yn.push_back(yl + Del[1] * (i + 0.5)); } Kernelcall(); after = wall_clock_time(); // TIME END fprintf(stderr, "Process took %3.5f seconds ", ((float)(after - before))/1000000000); return 0; }
9c3953a84018d6c61aed04e08ce1341480d6ad73.cu
// *********************************************************************** // Numerical discretization on a grid using Gauss-Legendre quadrature // // *********************************************************************** /* MIT License Copyright (c) 2018 Kunal Kumar Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <iostream> #include <math.h> #include <fstream> #include <vector> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/transform.h> using namespace thrust::placeholders; // // Definig the CLOCK for performance testing. // long long wall_clock_time() { #ifdef __linux__ struct timespec tp; clock_gettime(CLOCK_REALTIME, &tp); return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll); #else struct timeval tv; gettimeofday(&tv, NULL); return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll); #endif } // // The parameters to compute the discrete centers (Xn, Yn) of the expansion // functions Psi_n(x,y) are defined here. // The axis limits along the x-axis are given by AXIS_MIN_X and AXIS_MAX_X, the // axis limits along the y-axis are given by AXIS_MIN_Y and AXIS_MAX_Y. // // NOTE: These axis limits are not the limits of integration. The limits of // integration are (Xn - lx/2, Xn + lx/2) and (Yn - ly/2, Yn + ly/2). // // The number of discrete points Xn and Yn are given by NUM_PTS_X and NUM_PTS_Y. // These points can have different sizes and should be a multiple of the // BLOCK_SIZE in the respective dimension. // #define AXIS_MIN_X -1 #define AXIS_MAX_X 1 #define AXIS_MIN_Y -1 #define AXIS_MAX_Y 1 #define NUM_PSI_X 256 #define NUM_PSI_Y 256 // // The CUDA parameters are defined here. // The BLOCK_SIZE parameter for the CUDA x-dimension can be different than the // CUDA y-dimension. // // The Z_BLOCK_SIZE should be a factor of sizeof(Gy)/sizeof(Gy[0]). // #define BLOCK_SIZE 16 #define Z_BLOCK_SIZE 4 #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else static __inline__ __device__ double atomicAdd(double *address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; if (val==0.0) return __longlong_as_double(old); do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +__longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif // // Define the Gauss-Hermite nodes n_i and weights w_i for // the two integrals. The size of Gy and Gx can be different depending on the // required precision of the quadrature approximation. // __constant__ double Gy[8][2] = {{-0.960289856498,0.10122853629},{-0.796666477414,0.222381034453}, {-0.525532409916,0.313706645878},{-0.183434642496,0.362683783378}, {0.183434642496,0.362683783378},{0.525532409916,0.313706645878}, {0.796666477414,0.222381034453},{0.960289856498,0.10122853629}}; __constant__ double Gx[8][2] = {{-0.960289856498,0.10122853629},{-0.796666477414,0.222381034453}, {-0.525532409916,0.313706645878},{-0.183434642496,0.362683783378}, {0.183434642496,0.362683783378},{0.525532409916,0.313706645878}, {0.796666477414,0.222381034453},{0.960289856498,0.10122853629}}; // // Declare the global vectors Xn, Yn, Cn, and Del here. // thrust::host_vector<double> Del; thrust::host_vector<double> Xn; thrust::host_vector<double> Yn; thrust::host_vector<double> Cn(NUM_PSI_X * NUM_PSI_Y); // // Define the function f(x,y) here. // __device__ double Fun(double x, double y) { return exp(-(pow(x,2) + pow(y,2))/0.5); } // // The inner quadrature sum, with weights wx and nodes nx, is computed here. // __device__ double Sum(double *ptrXn, double *ptrDel, double *ny, int *idx) { double a = ptrXn[*idx] - ptrDel[0]/2; double b = ptrXn[*idx] + ptrDel[0]/2; double C3 = 0.5*(b - a); double C4 = 0.5*(b + a); double nx, wx, Q1 = 0.0f;; int Nx = sizeof(Gx)/sizeof(Gx[0]); for (int k=0; k<Nx; k++) { nx = C4 + C3 * Gx[k][0]; wx = Gx[k][1]; Q1 += wx * Fun(nx, *ny); } return C3*Q1; } // // The CUDA kernel is defined here and the outer quadrature sum, with weights // wy and nodes ny, is computed here. // __global__ void Discretization_Kernel(double *ptrXn, double *ptrYn, double *ptrCn, double *ptrDel){ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; int idy = blockIdx.y * BLOCK_SIZE + threadIdx.y; int idz = blockIdx.z * Z_BLOCK_SIZE + threadIdx.z; double c = ptrYn[idy] - ptrDel[1]/2; double d = ptrYn[idy] + ptrDel[1]/2; double C1 = 0.5*(d - c); double C2 = 0.5*(d + c); double ny, wy; int stride_z = blockDim.z * gridDim.z; int Ny = sizeof(Gy)/sizeof(Gy[0]); while (idz < Ny ) { ny = C2 + C1 * Gy[idz][0]; wy = C1 * Gy[idz][1]; atomicAdd( &( ptrCn[idy * NUM_PSI_X + idx]), wy * Sum(ptrXn, ptrDel, &ny, &idx)); idz += stride_z; } } int Kernelcall(){ thrust::device_vector<double> d_Del = Del; thrust::device_vector<double> d_Xn = Xn; thrust::device_vector<double> d_Yn = Yn; thrust::device_vector<double> d_Cn = Cn; double * ptrDel = thrust::raw_pointer_cast(&d_Del[0]); double * ptrXn = thrust::raw_pointer_cast(&d_Xn[0]); double * ptrYn = thrust::raw_pointer_cast(&d_Yn[0]); double * ptrCn = thrust::raw_pointer_cast(&d_Cn[0]); int Ny = sizeof(Gy)/sizeof(Gy[0]); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, Z_BLOCK_SIZE); dim3 dimGrid((Xn.size() + dimBlock.x - 1) / dimBlock.x, (Yn.size() + dimBlock.y - 1) / dimBlock.y, (Ny + dimBlock.z - 1) / dimBlock.z); Discretization_Kernel<<<dimGrid, dimBlock>>>(ptrXn, ptrYn, ptrCn, ptrDel); thrust::copy(d_Cn.begin(), d_Cn.end(), Cn.begin()); // // Constant required since <Psi_n(x,y), Psi_m(x,y)> = lx*ly*Delta_nm // double NormSquared = 1/(Del[0]* Del[1]); thrust::transform(Cn.begin(), Cn.end(), Cn.begin(), NormSquared * _1 ); cudaError_t rc; rc = cudaGetLastError(); if (rc != cudaSuccess) printf("Last CUDA error %s\n", cudaGetErrorString(rc)); // // Save result to a file // char buffer[32]; // The filename buffer. snprintf(buffer, sizeof(char) * 32, "FILE%i.txt", 0); std::ofstream out(buffer, std::ios_base::app); out.setf(std::ios::scientific); if( !out ) { std::cout << "Couldn't open file." << std::endl; return 1; } for (int i = 0; i < NUM_PSI_Y; i++) { for (int j = 0; j < NUM_PSI_X; j++) { out << Cn[i * NUM_PSI_X + j] <<','; } out <<'\n'; } out.close(); return 0; } // // The main() function. // int main(int argc, char *argv[]){ long long before, after; before = wall_clock_time(); // TIME START double xl = AXIS_MIN_X, xr = AXIS_MAX_X, yl = AXIS_MIN_Y, yr = AXIS_MAX_Y; int xpix = NUM_PSI_X, ypix = NUM_PSI_Y; Del.push_back((xr - xl) / xpix); Del.push_back((yr - yl) / ypix); for(int i=0; i < xpix; i++){ Xn.push_back(xl + Del[0] * (i + 0.5)); } for(int i=0; i < ypix; i++){ Yn.push_back(yl + Del[1] * (i + 0.5)); } Kernelcall(); after = wall_clock_time(); // TIME END fprintf(stderr, "Process took %3.5f seconds ", ((float)(after - before))/1000000000); return 0; }
7fd9e515f3ceaeba73167042f1d10244da6c4525.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Note: //Cara running program ./nama_file mode besar_matrix besar_grid besar_block //Ukuran matrix: besar_matrix x besar matrix //Grid: besar_grid x besar_grid (block per grid) | Max: Mengacu pada NVIDIA Compute Capability dari setiap seri GPU //Block: besar_block x besar_block (thread per block) | Max: Mengacu pada NVIDIA Compute Capability dari setiap seri GPU // mode 2 ketas belum selesai dikerjakan, masih belum sempurna (ukuran matrix harus setara block) // Mode: // 0: Matrix multiplication pada 1 GPU tanpa melihat hasil sekuensial // 1: Matrix multiplication pada 1 GPU dengan hasil sekuensial // 2: Matrix multiplication pada multiple GPU tanpa melihat hasil sekuensial // 3: Matrix multiplication pada multiple GPU dengan hasil sekuensial #include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #define sharedsize 32 //Operasi perkalian matrix pada gpu __global__ void matrixmul_kernel(float *gpu_matrixA, float *gpu_matrixB, float *gpu_result, int matrix_size, int grid, int block){ int l, m; float R = 0; //int n, o, displacement; //if(matrix_size > (grid * block)) displacement = matrix_size/(grid * block); //else displacement = 1; __shared__ float SM_A[sharedsize][sharedsize]; __shared__ float SM_B[sharedsize][sharedsize]; int row_index = blockIdx.y * sharedsize + threadIdx.y; int col_index = blockIdx.x * sharedsize + threadIdx.x; int mrow_index; int mcol_index; int max_iter = sharedsize < matrix_size ? sharedsize : matrix_size; // if(row_index < matrix_size && col_index < matrix_size){ //for(n = 0; n < displacement; n++){ //for(o = 0; o < displacement; o++){ <<<<<<< HEAD:CUDA/870/mm_sm.cu for(m = 0; m < (matrix_size + sharedsize - 1)/sharedsize; m++){ ======= for(m = 0; m < (sharedsize + matrix_size - 1)/sharedsize; m++){ >>>>>>> 5bc549dc584051c56f186af6f5235b07bc79bdef:cuda/780/mm_sm.cu mrow_index = row_index * matrix_size + m * sharedsize + threadIdx.x; // if(threadIdx.y + n < sharedsize && threadIdx.y + o < sharedsize){ if(mrow_index < matrix_size * matrix_size) SM_A[threadIdx.y][threadIdx.x] = gpu_matrixA[mrow_index]; else SM_A[threadIdx.y][threadIdx.x] = 0.0; // } mcol_index = (m * sharedsize + threadIdx.y) * matrix_size + col_index; //if(threadIdx.y + n < sharedsize && threadIdx.y + o < sharedsize){ if(mcol_index < matrix_size * matrix_size) SM_B[threadIdx.y][threadIdx.x] = gpu_matrixB[mcol_index]; else SM_B[threadIdx.y][threadIdx.x] = 0.0; //} __syncthreads(); for(l = 0; l < max_iter; l++) R += SM_A[threadIdx.y][l] * SM_B[l][threadIdx.x]; __syncthreads(); } if(row_index < matrix_size && col_index < matrix_size) gpu_result[row_index * matrix_size + col_index] = R; // } // } // } } int main(int argc, char** argv){ srand(time(NULL)); double runtime; struct timespec begin, end; // Inisialisasi parameter dari user input int mode = atoi(argv[1]); int matrix_size = atoi(argv[2]); int igrid = atoi(argv[3]); int iblock = atoi(argv[4]); //Debug print variabel user input //printf("Mode: %d\n", mode); //printf("Size %d x %d\n", matrix_size, matrix_size); //printf("Grid: %d\n", igrid); //printf("Block:%d\n", iblock); // Inisailiasai pada Host //int matrixallsize = matrix_size * matrix_size; int matrixBytes = (matrix_size * matrix_size) * sizeof(float); float *matrixA = (float *)malloc(matrixBytes) ; float *matrixB = (float *)malloc(matrixBytes); float *result = (float *)malloc(matrixBytes); int i, j, k; //Inisialisasi martrix for(i = 0; i < matrix_size * matrix_size; i++){ matrixA[i] = rand() % 99 + 1; matrixB[i] = rand() % 99 + 1; } //Operasi dengan 1 GPU //if(mode < 2){ clock_gettime(CLOCK_REALTIME, &begin); //Inisialisasi pada GPU float *gpu_matrixA, *gpu_matrixB, *gpu_result; hipMalloc((void **) &gpu_matrixA, matrixBytes); hipMalloc((void **) &gpu_matrixB, matrixBytes); hipMalloc((void **) &gpu_result, matrixBytes); hipMemcpy(gpu_matrixA, matrixA, matrixBytes, hipMemcpyHostToDevice); hipMemcpy(gpu_matrixB, matrixB, matrixBytes, hipMemcpyHostToDevice); //int omg; //if(sharedsize > matrix_size){ // omg = matrix_size; //else omg = sharedsize; //Mulai operasi pada device igrid = (matrix_size - 1)/sharedsize + 1; iblock = sharedsize; //iblock = matrix_size/ igrid; //if(iblock < 1) iblock = 1; printf("Grid: %d - %d\n", igrid, iblock); dim3 grid(igrid, igrid); dim3 block(iblock, iblock); hipLaunchKernelGGL(( matrixmul_kernel), dim3(grid), dim3(block), 0, 0, gpu_matrixA, gpu_matrixB, gpu_result, matrix_size, igrid, iblock); //Return hasil perkalian hipMemcpy(result, gpu_result, matrixBytes, hipMemcpyDeviceToHost); hipDeviceSynchronize(); clock_gettime(CLOCK_REALTIME, &end); runtime = (end.tv_sec - begin.tv_sec) + (end.tv_nsec - begin.tv_nsec) / 1000000000.0; printf("Running Time: %f\n\n", runtime); /*}else{ //Operasi pada multiple GPU //Check Device clock_gettime(CLOCK_REALTIME, &begin); int device_count; hipGetDeviceCount(&device_count); printf("Device: %d\n", device_count); clock_gettime(CLOCK_REALTIME, &end); runtime = (end.tv_sec - begin.tv_sec) + (end.tv_nsec - begin.tv_nsec) / 1000000000.0; printf("Running Time: %f\n\n", runtime); }*/ //Operasi sekuensial if(mode == 1 || mode == 3){ int right_answer = 0; float *seqresult = (float *)malloc(matrixBytes); for (i = 0; i < matrix_size; i++){ for (j = 0; j < matrix_size; j++){ seqresult[i * matrix_size + j] = 0; for (k = 0; k < matrix_size; k++) seqresult[i * matrix_size + j] += matrixA[i * matrix_size + k] * matrixB[k * matrix_size + j]; if(seqresult[i * matrix_size + j] == result[i * matrix_size + j]) right_answer += 1; //printf("%d - %d S: %f, CUDA: %f\n", i * matrix_size, j, seqresult[i * matrix_size + j], result[i * matrix_size + j]); } } if(right_answer == (matrix_size * matrix_size)) printf("The answer is matched.\n"); free(seqresult); } //Membebaskan Device hipFree(gpu_matrixB); hipFree(gpu_matrixB); hipFree(gpu_result); //Membebaskan Host free(matrixA); free(matrixB); free(result); }
7fd9e515f3ceaeba73167042f1d10244da6c4525.cu
//Note: //Cara running program ./nama_file mode besar_matrix besar_grid besar_block //Ukuran matrix: besar_matrix x besar matrix //Grid: besar_grid x besar_grid (block per grid) | Max: Mengacu pada NVIDIA Compute Capability dari setiap seri GPU //Block: besar_block x besar_block (thread per block) | Max: Mengacu pada NVIDIA Compute Capability dari setiap seri GPU // mode 2 ketas belum selesai dikerjakan, masih belum sempurna (ukuran matrix harus setara block) // Mode: // 0: Matrix multiplication pada 1 GPU tanpa melihat hasil sekuensial // 1: Matrix multiplication pada 1 GPU dengan hasil sekuensial // 2: Matrix multiplication pada multiple GPU tanpa melihat hasil sekuensial // 3: Matrix multiplication pada multiple GPU dengan hasil sekuensial #include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #define sharedsize 32 //Operasi perkalian matrix pada gpu __global__ void matrixmul_kernel(float *gpu_matrixA, float *gpu_matrixB, float *gpu_result, int matrix_size, int grid, int block){ int l, m; float R = 0; //int n, o, displacement; //if(matrix_size > (grid * block)) displacement = matrix_size/(grid * block); //else displacement = 1; __shared__ float SM_A[sharedsize][sharedsize]; __shared__ float SM_B[sharedsize][sharedsize]; int row_index = blockIdx.y * sharedsize + threadIdx.y; int col_index = blockIdx.x * sharedsize + threadIdx.x; int mrow_index; int mcol_index; int max_iter = sharedsize < matrix_size ? sharedsize : matrix_size; // if(row_index < matrix_size && col_index < matrix_size){ //for(n = 0; n < displacement; n++){ //for(o = 0; o < displacement; o++){ <<<<<<< HEAD:CUDA/870/mm_sm.cu for(m = 0; m < (matrix_size + sharedsize - 1)/sharedsize; m++){ ======= for(m = 0; m < (sharedsize + matrix_size - 1)/sharedsize; m++){ >>>>>>> 5bc549dc584051c56f186af6f5235b07bc79bdef:cuda/780/mm_sm.cu mrow_index = row_index * matrix_size + m * sharedsize + threadIdx.x; // if(threadIdx.y + n < sharedsize && threadIdx.y + o < sharedsize){ if(mrow_index < matrix_size * matrix_size) SM_A[threadIdx.y][threadIdx.x] = gpu_matrixA[mrow_index]; else SM_A[threadIdx.y][threadIdx.x] = 0.0; // } mcol_index = (m * sharedsize + threadIdx.y) * matrix_size + col_index; //if(threadIdx.y + n < sharedsize && threadIdx.y + o < sharedsize){ if(mcol_index < matrix_size * matrix_size) SM_B[threadIdx.y][threadIdx.x] = gpu_matrixB[mcol_index]; else SM_B[threadIdx.y][threadIdx.x] = 0.0; //} __syncthreads(); for(l = 0; l < max_iter; l++) R += SM_A[threadIdx.y][l] * SM_B[l][threadIdx.x]; __syncthreads(); } if(row_index < matrix_size && col_index < matrix_size) gpu_result[row_index * matrix_size + col_index] = R; // } // } // } } int main(int argc, char** argv){ srand(time(NULL)); double runtime; struct timespec begin, end; // Inisialisasi parameter dari user input int mode = atoi(argv[1]); int matrix_size = atoi(argv[2]); int igrid = atoi(argv[3]); int iblock = atoi(argv[4]); //Debug print variabel user input //printf("Mode: %d\n", mode); //printf("Size %d x %d\n", matrix_size, matrix_size); //printf("Grid: %d\n", igrid); //printf("Block:%d\n", iblock); // Inisailiasai pada Host //int matrixallsize = matrix_size * matrix_size; int matrixBytes = (matrix_size * matrix_size) * sizeof(float); float *matrixA = (float *)malloc(matrixBytes) ; float *matrixB = (float *)malloc(matrixBytes); float *result = (float *)malloc(matrixBytes); int i, j, k; //Inisialisasi martrix for(i = 0; i < matrix_size * matrix_size; i++){ matrixA[i] = rand() % 99 + 1; matrixB[i] = rand() % 99 + 1; } //Operasi dengan 1 GPU //if(mode < 2){ clock_gettime(CLOCK_REALTIME, &begin); //Inisialisasi pada GPU float *gpu_matrixA, *gpu_matrixB, *gpu_result; cudaMalloc((void **) &gpu_matrixA, matrixBytes); cudaMalloc((void **) &gpu_matrixB, matrixBytes); cudaMalloc((void **) &gpu_result, matrixBytes); cudaMemcpy(gpu_matrixA, matrixA, matrixBytes, cudaMemcpyHostToDevice); cudaMemcpy(gpu_matrixB, matrixB, matrixBytes, cudaMemcpyHostToDevice); //int omg; //if(sharedsize > matrix_size){ // omg = matrix_size; //else omg = sharedsize; //Mulai operasi pada device igrid = (matrix_size - 1)/sharedsize + 1; iblock = sharedsize; //iblock = matrix_size/ igrid; //if(iblock < 1) iblock = 1; printf("Grid: %d - %d\n", igrid, iblock); dim3 grid(igrid, igrid); dim3 block(iblock, iblock); matrixmul_kernel<<<grid, block>>>(gpu_matrixA, gpu_matrixB, gpu_result, matrix_size, igrid, iblock); //Return hasil perkalian cudaMemcpy(result, gpu_result, matrixBytes, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); clock_gettime(CLOCK_REALTIME, &end); runtime = (end.tv_sec - begin.tv_sec) + (end.tv_nsec - begin.tv_nsec) / 1000000000.0; printf("Running Time: %f\n\n", runtime); /*}else{ //Operasi pada multiple GPU //Check Device clock_gettime(CLOCK_REALTIME, &begin); int device_count; cudaGetDeviceCount(&device_count); printf("Device: %d\n", device_count); clock_gettime(CLOCK_REALTIME, &end); runtime = (end.tv_sec - begin.tv_sec) + (end.tv_nsec - begin.tv_nsec) / 1000000000.0; printf("Running Time: %f\n\n", runtime); }*/ //Operasi sekuensial if(mode == 1 || mode == 3){ int right_answer = 0; float *seqresult = (float *)malloc(matrixBytes); for (i = 0; i < matrix_size; i++){ for (j = 0; j < matrix_size; j++){ seqresult[i * matrix_size + j] = 0; for (k = 0; k < matrix_size; k++) seqresult[i * matrix_size + j] += matrixA[i * matrix_size + k] * matrixB[k * matrix_size + j]; if(seqresult[i * matrix_size + j] == result[i * matrix_size + j]) right_answer += 1; //printf("%d - %d S: %f, CUDA: %f\n", i * matrix_size, j, seqresult[i * matrix_size + j], result[i * matrix_size + j]); } } if(right_answer == (matrix_size * matrix_size)) printf("The answer is matched.\n"); free(seqresult); } //Membebaskan Device cudaFree(gpu_matrixB); cudaFree(gpu_matrixB); cudaFree(gpu_result); //Membebaskan Host free(matrixA); free(matrixB); free(result); }
ee8ed616bea83f12427c7eaf44dd2b2243f6b836.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <ATen/native/ScatterGatherChecks.h> #include <ATen/native/ReduceOpsUtils.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/hip/HIPContext.h> #include <THH/THHAtomics.cuh> namespace at { namespace native { // The kernels are implemented on an opaque, // self-aligned type of the correct size, // to avoid redundant kernels for different types // of the same size. template <int N> struct alignas(N) OpaqueType { char data[N]; }; // essentialy rewritten related to legacy::launch_kernel parts template <int nt, int vt, typename func_t> C10_LAUNCH_BOUNDS_2(nt, vt) __global__ void _scatter_gather_elementwise_kernel(int N, func_t f) { constexpr int nv = nt * vt; int idx = nv * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < vt; ++i) { if (idx < N) { f(idx); idx += nt; } } } template <int nt, int vt, typename func_t> static void _launch_scatter_gather_kernel(int64_t N, const func_t& f) { TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max()); if (N == 0) { return; } dim3 block(nt); dim3 grid((N + block.x * vt - 1) / (block.x * vt)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( _scatter_gather_elementwise_kernel<nt, vt, func_t>), dim3(grid), dim3(block), 0, stream, N, f); AT_CUDA_CHECK(hipGetLastError()); } template <bool is_scatter_like, typename scalar_t> struct _cuda_scatter_gather_internal_kernel { template <typename func_t> void operator() ( TensorIterator& iter, int64_t index_size, int64_t index_stride, const func_t& f ) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _cuda_scatter_gather_internal_kernel<is_scatter_like, scalar_t>()( sub_iter, index_size, index_stride, f ); } return; } char* self_ptr = (char*)iter.data_ptr(0); char* src_ptr = (char*)iter.data_ptr(1); char* index_ptr = (char*)iter.data_ptr(2); auto offset_calc = make_offset_calculator<3>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); int64_t idx_dim = *(int64_t*)(index_ptr + offsets[2]); CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size && "index out of bounds"); char* self_data = self_ptr + offsets[0]; char* src_data = src_ptr + offsets[1]; f( (scalar_t*)self_data + (is_scatter_like ? idx_dim * index_stride : 0), (scalar_t*)src_data + (is_scatter_like ? 0 : idx_dim * index_stride) ); }; _launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop); } }; // struct _cuda_scatter_fill_internal_kernel template <bool is_scatter_like = true, bool cast_to_opaque = true> struct cuda_scatter_gather_base_kernel { template <typename func_t> void operator()( Tensor& self, int64_t dim, const Tensor& index, const Tensor& src, const std::string& method_name, const func_t& f ) { // no-op if index is empty if (index.numel() == 0) { return; } dim = maybe_wrap_dim(dim, self.dim()); scatter_gather_dtype_check(method_name, self, index, src); if (is_scatter_like) { scatter_shape_check(self, dim, index, src); } else { gather_shape_check(self, dim, index, src); } auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); auto self_strides = ensure_nonempty_vec(self.strides().vec()); auto src_strides = ensure_nonempty_vec(src.strides().vec()); // restride self and src such that // self.shape = src.shape = index.shape // // restride stride[dim] such that // if (is_scatter_like) self.stride[dim] = 0 // else src.stride[dim] = 0 auto self_restrided = is_scatter_like ? restride_dim(self, dim, index_sizes) : self.as_strided(index_sizes, self_strides); auto src_restrided = is_scatter_like ? src.as_strided(index_sizes, src_strides) : restride_dim(src, dim, index_sizes); auto iter = TensorIteratorConfig() .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(src_restrided) .add_input(index) .build(); auto self_dim_stride = ensure_nonempty_stride(self, dim); auto self_dim_size = ensure_nonempty_size(self, dim); auto src_dim_stride = ensure_nonempty_stride(src, dim); auto src_dim_size = ensure_nonempty_size(src, dim); auto index_size = is_scatter_like ? self_dim_size : src_dim_size; auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), method_name, [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; _cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()( iter, index_size, index_stride, f ); } ); } }; // struct cuda_scatter_gather_base_kernel template <typename scalar_t> struct _cuda_scatter_fill_internal_kernel { template <typename func_t> void operator()( TensorIterator& iter, scalar_t src_val, int64_t index_size, int64_t index_stride, const func_t& f ) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _cuda_scatter_fill_internal_kernel<scalar_t>()( sub_iter, src_val, index_size, index_stride, f ); } return; } char* self_ptr = (char*)iter.data_ptr(0); char* index_ptr = (char*)iter.data_ptr(1); auto offset_calc = make_offset_calculator<2>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); int64_t idx_dim = *(int64_t*)(index_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size && "index out of bounds" ); char* self_data = self_ptr + offsets[0]; f( (scalar_t*)self_data + idx_dim * index_stride, &src_val ); }; _launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop); } }; // struct _cuda_scatter_fill_internal_kernel template <bool cast_to_opaque = true> struct cuda_scatter_fill_base_kernel { template <typename func_t> void operator()( Tensor& self, int64_t dim, const Tensor& index, Scalar src, const std::string& method_name, const func_t& f ) { // no-op if index is empty if (index.numel() == 0) { return; } dim = maybe_wrap_dim(dim, self.dim()); scatter_gather_dtype_check(method_name, self, index); scatter_shape_check(self, dim, index); auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); // restride self such that // self.shape = index.shape and // self.stride[dim] = 0 auto self_restrided = restride_dim(self, dim, index_sizes); auto iter = TensorIteratorConfig() .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(index) .build(); auto index_size = ensure_nonempty_size(self, dim); auto index_stride = ensure_nonempty_stride(self, dim); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), method_name, [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; auto src_scalar_val = src.to<scalar_t>(); auto src_val = *(dtype*)&src_scalar_val; _cuda_scatter_fill_internal_kernel<dtype>()( iter, src_val, index_size, index_stride, f ); } ); } }; // struct cuda_scatter_fill_base_kernel void gather_cuda_kernel(Tensor& result, const Tensor& self, int64_t dim, const Tensor& index) { cuda_scatter_gather_base_kernel</*is_scatter_like=*/false>()( result, dim, index, self, "gather_out_cuda", []C10_DEVICE(auto* lhs, const auto* rhs) { *lhs = *rhs; } ); } void scatter_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) { cuda_scatter_gather_base_kernel<>()( self, dim, index, src, "scatter_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) { *lhs = *rhs; } ); } void scatter_fill_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, Scalar src) { cuda_scatter_fill_base_kernel<>()( self, dim, index, src, "scatter_fill_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) { *lhs = *rhs; } ); } void scatter_add_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("scatter_add_cuda_kernel"); cuda_scatter_gather_base_kernel</*is_scatter_like=*/true, /*cast_to_opaque=*/false>()( self, dim, index, src, "scatter_add_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) { gpuAtomicAddNoReturn(lhs, *rhs); } ); } REGISTER_DISPATCH(gather_stub, &gather_cuda_kernel); REGISTER_DISPATCH(scatter_stub, &scatter_cuda_kernel); REGISTER_DISPATCH(scatter_fill_stub, &scatter_fill_cuda_kernel); REGISTER_DISPATCH(scatter_add_stub, &scatter_add_cuda_kernel); }} // namespace at::native
ee8ed616bea83f12427c7eaf44dd2b2243f6b836.cu
#include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <ATen/native/ScatterGatherChecks.h> #include <ATen/native/ReduceOpsUtils.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/cuda/CUDAContext.h> #include <THC/THCAtomics.cuh> namespace at { namespace native { // The kernels are implemented on an opaque, // self-aligned type of the correct size, // to avoid redundant kernels for different types // of the same size. template <int N> struct alignas(N) OpaqueType { char data[N]; }; // essentialy rewritten related to legacy::launch_kernel parts template <int nt, int vt, typename func_t> C10_LAUNCH_BOUNDS_2(nt, vt) __global__ void _scatter_gather_elementwise_kernel(int N, func_t f) { constexpr int nv = nt * vt; int idx = nv * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < vt; ++i) { if (idx < N) { f(idx); idx += nt; } } } template <int nt, int vt, typename func_t> static void _launch_scatter_gather_kernel(int64_t N, const func_t& f) { TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max()); if (N == 0) { return; } dim3 block(nt); dim3 grid((N + block.x * vt - 1) / (block.x * vt)); auto stream = at::cuda::getCurrentCUDAStream(); _scatter_gather_elementwise_kernel<nt, vt, func_t><<<grid, block, 0, stream>>>(N, f); AT_CUDA_CHECK(cudaGetLastError()); } template <bool is_scatter_like, typename scalar_t> struct _cuda_scatter_gather_internal_kernel { template <typename func_t> void operator() ( TensorIterator& iter, int64_t index_size, int64_t index_stride, const func_t& f ) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _cuda_scatter_gather_internal_kernel<is_scatter_like, scalar_t>()( sub_iter, index_size, index_stride, f ); } return; } char* self_ptr = (char*)iter.data_ptr(0); char* src_ptr = (char*)iter.data_ptr(1); char* index_ptr = (char*)iter.data_ptr(2); auto offset_calc = make_offset_calculator<3>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); int64_t idx_dim = *(int64_t*)(index_ptr + offsets[2]); CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size && "index out of bounds"); char* self_data = self_ptr + offsets[0]; char* src_data = src_ptr + offsets[1]; f( (scalar_t*)self_data + (is_scatter_like ? idx_dim * index_stride : 0), (scalar_t*)src_data + (is_scatter_like ? 0 : idx_dim * index_stride) ); }; _launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop); } }; // struct _cuda_scatter_fill_internal_kernel template <bool is_scatter_like = true, bool cast_to_opaque = true> struct cuda_scatter_gather_base_kernel { template <typename func_t> void operator()( Tensor& self, int64_t dim, const Tensor& index, const Tensor& src, const std::string& method_name, const func_t& f ) { // no-op if index is empty if (index.numel() == 0) { return; } dim = maybe_wrap_dim(dim, self.dim()); scatter_gather_dtype_check(method_name, self, index, src); if (is_scatter_like) { scatter_shape_check(self, dim, index, src); } else { gather_shape_check(self, dim, index, src); } auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); auto self_strides = ensure_nonempty_vec(self.strides().vec()); auto src_strides = ensure_nonempty_vec(src.strides().vec()); // restride self and src such that // self.shape = src.shape = index.shape // // restride stride[dim] such that // if (is_scatter_like) self.stride[dim] = 0 // else src.stride[dim] = 0 auto self_restrided = is_scatter_like ? restride_dim(self, dim, index_sizes) : self.as_strided(index_sizes, self_strides); auto src_restrided = is_scatter_like ? src.as_strided(index_sizes, src_strides) : restride_dim(src, dim, index_sizes); auto iter = TensorIteratorConfig() .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(src_restrided) .add_input(index) .build(); auto self_dim_stride = ensure_nonempty_stride(self, dim); auto self_dim_size = ensure_nonempty_size(self, dim); auto src_dim_stride = ensure_nonempty_stride(src, dim); auto src_dim_size = ensure_nonempty_size(src, dim); auto index_size = is_scatter_like ? self_dim_size : src_dim_size; auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), method_name, [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; _cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()( iter, index_size, index_stride, f ); } ); } }; // struct cuda_scatter_gather_base_kernel template <typename scalar_t> struct _cuda_scatter_fill_internal_kernel { template <typename func_t> void operator()( TensorIterator& iter, scalar_t src_val, int64_t index_size, int64_t index_stride, const func_t& f ) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _cuda_scatter_fill_internal_kernel<scalar_t>()( sub_iter, src_val, index_size, index_stride, f ); } return; } char* self_ptr = (char*)iter.data_ptr(0); char* index_ptr = (char*)iter.data_ptr(1); auto offset_calc = make_offset_calculator<2>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); int64_t idx_dim = *(int64_t*)(index_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size && "index out of bounds" ); char* self_data = self_ptr + offsets[0]; f( (scalar_t*)self_data + idx_dim * index_stride, &src_val ); }; _launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop); } }; // struct _cuda_scatter_fill_internal_kernel template <bool cast_to_opaque = true> struct cuda_scatter_fill_base_kernel { template <typename func_t> void operator()( Tensor& self, int64_t dim, const Tensor& index, Scalar src, const std::string& method_name, const func_t& f ) { // no-op if index is empty if (index.numel() == 0) { return; } dim = maybe_wrap_dim(dim, self.dim()); scatter_gather_dtype_check(method_name, self, index); scatter_shape_check(self, dim, index); auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); // restride self such that // self.shape = index.shape and // self.stride[dim] = 0 auto self_restrided = restride_dim(self, dim, index_sizes); auto iter = TensorIteratorConfig() .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(index) .build(); auto index_size = ensure_nonempty_size(self, dim); auto index_stride = ensure_nonempty_stride(self, dim); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), method_name, [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; auto src_scalar_val = src.to<scalar_t>(); auto src_val = *(dtype*)&src_scalar_val; _cuda_scatter_fill_internal_kernel<dtype>()( iter, src_val, index_size, index_stride, f ); } ); } }; // struct cuda_scatter_fill_base_kernel void gather_cuda_kernel(Tensor& result, const Tensor& self, int64_t dim, const Tensor& index) { cuda_scatter_gather_base_kernel</*is_scatter_like=*/false>()( result, dim, index, self, "gather_out_cuda", []C10_DEVICE(auto* lhs, const auto* rhs) { *lhs = *rhs; } ); } void scatter_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) { cuda_scatter_gather_base_kernel<>()( self, dim, index, src, "scatter_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) { *lhs = *rhs; } ); } void scatter_fill_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, Scalar src) { cuda_scatter_fill_base_kernel<>()( self, dim, index, src, "scatter_fill_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) { *lhs = *rhs; } ); } void scatter_add_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("scatter_add_cuda_kernel"); cuda_scatter_gather_base_kernel</*is_scatter_like=*/true, /*cast_to_opaque=*/false>()( self, dim, index, src, "scatter_add_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) { gpuAtomicAddNoReturn(lhs, *rhs); } ); } REGISTER_DISPATCH(gather_stub, &gather_cuda_kernel); REGISTER_DISPATCH(scatter_stub, &scatter_cuda_kernel); REGISTER_DISPATCH(scatter_fill_stub, &scatter_fill_cuda_kernel); REGISTER_DISPATCH(scatter_add_stub, &scatter_add_cuda_kernel); }} // namespace at::native
76447359b7b44c97ce87e7c16378f0899ef356a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* dot product of two vectors: d = <x, y> */ #include "reduction_aux.h" #include <assert.h> /* host, add */ FLOAT dot_host(FLOAT *x, FLOAT *y, int N) { int i; FLOAT t = 0; assert(x != NULL); assert(y != NULL); for (i = 0; i < N; i++) t += x[i] * y[i]; return t; } __device__ void warpReduce(volatile FLOAT *sdata, int tid) { sdata[tid] += sdata[tid + 32]; sdata[tid] += sdata[tid + 16]; sdata[tid] += sdata[tid + 8]; sdata[tid] += sdata[tid + 4]; sdata[tid] += sdata[tid + 2]; sdata[tid] += sdata[tid + 1]; } // /* partial dot product */ __global__ void dot_stg_1(const FLOAT *x, FLOAT *y, FLOAT *z, int N) { __shared__ FLOAT sdata[256]; // int idx = get_tid(); // int tid = threadIdx.x; //grid int bid = get_bid(); /* load data to shared mem */ //loaddata if (idx < N) { sdata[tid] = x[idx] * y[idx]; } else { sdata[tid] = 0; } __syncthreads(); /* reduction using shared mem */ // if (tid < 128) sdata[tid] += sdata[tid + 128]; __syncthreads(); if (tid < 64) sdata[tid] += sdata[tid + 64]; __syncthreads(); if (tid < 32) warpReduce(sdata, tid); if (tid == 0) z[bid] = sdata[0]; } /* sum all entries in x and asign to y * block dim must be 256 */ __global__ void dot_stg_2(const FLOAT *x, FLOAT *y, int N) { __shared__ FLOAT sdata[256]; int idx = get_tid(); int tid = threadIdx.x; int bid = get_bid(); /* load data to shared mem */ if (idx < N) { sdata[tid] = x[idx]; } else { sdata[tid] = 0; } __syncthreads(); /* reduction using shared mem */ if (tid < 128) sdata[tid] += sdata[tid + 128]; __syncthreads(); if (tid < 64) sdata[tid] += sdata[tid + 64]; __syncthreads(); if (tid < 32) warpReduce(sdata, tid); if (tid == 0) y[bid] = sdata[0]; } //128 __global__ void dot_stg_3(FLOAT *x, int N) { __shared__ FLOAT sdata[128]; int tid = threadIdx.x; int i; sdata[tid] = 0; /* load data to shared mem */ for (i = 0; i < N; i += 128) { if (tid + i < N) sdata[tid] += x[i + tid]; } __syncthreads(); /* reduction using shared mem */ if (tid < 64) sdata[tid] = sdata[tid] + sdata[tid + 64]; __syncthreads(); if (tid < 32) warpReduce(sdata, tid); if (tid == 0) x[0] = sdata[0]; } /* dz and d serve as cache: result stores in d[0] */ void dot_device(FLOAT *dx, FLOAT *dy, FLOAT *dz, FLOAT *d, int N) { /* 1D block */ int bs = 256; /* 2D grid */ int s = ceil(sqrt((N + bs - 1.) / bs)); dim3 grid = dim3(s, s); int gs = 0; /* stage 1 */ hipLaunchKernelGGL(( dot_stg_1), dim3(grid), dim3(bs), 0, 0, dx, dy, dz, N); /* stage 2 */ { /* 1D grid */ int N2 = (N + bs - 1) / bs; int s2 = ceil(sqrt((N2 + bs - 1.) / bs)); dim3 grid2 = dim3(s2, s2); hipLaunchKernelGGL(( dot_stg_2), dim3(grid2), dim3(bs), 0, 0, dz, d, N2); /* record gs */ gs = (N2 + bs - 1.) / bs; } /* stage 3 */ hipLaunchKernelGGL(( dot_stg_3), dim3(1), dim3(128), 0, 0, d, gs); } int main(int argc, char **argv) { int N = 10000070; int nbytes = N * sizeof(FLOAT); FLOAT *hx = NULL, *hy = NULL; FLOAT *dx = NULL, *dy = NULL, *dz = NULL, *d = NULL; int i, itr = 20; FLOAT asd = 0, ash; double td, th; if (argc == 2) { int an; an = atoi(argv[1]); if (an > 0) N = an; } /* allocate GPU mem */ hipMalloc((void **)&dx, nbytes); hipMalloc((void **)&dy, nbytes); hipMalloc((void **)&dz, sizeof(FLOAT) * ((N + 255) / 256)); hipMalloc((void **)&d, sizeof(FLOAT) * ((N + 255) / 256)); if (dx == NULL || dy == NULL || dz == NULL || d == NULL) { printf("couldn't allocate GPU memory\n"); return -1; } printf("allocated %e MB on GPU\n", nbytes / (1024.f * 1024.f)); /* alllocate CPU mem */ hx = (FLOAT *) malloc(nbytes); hy = (FLOAT *) malloc(nbytes); if (hx == NULL || hy == NULL) { printf("couldn't allocate CPU memory\n"); return -2; } printf("allocated %e MB on CPU\n", nbytes / (1024.f * 1024.f)); /* init */ for (i = 0; i < N; i++) { hx[i] = 1; hy[i] = 2; } /* copy data to GPU */ hipMemcpy(dx, hx, nbytes, hipMemcpyHostToDevice); hipMemcpy(dy, hy, nbytes, hipMemcpyHostToDevice); /* let dust fall */ hipDeviceSynchronize(); td = get_time(); /* call GPU */ for (i = 0; i < itr; i++) dot_device(dx, dy, dz, d, N); /* let GPU finish */ hipDeviceSynchronize(); td = get_time() - td; th = get_time(); for (i = 0; i < itr; i++) ash = dot_host(hx, hy, N); th = get_time() - th; /* copy data from GPU */ hipMemcpy(&asd, d, sizeof(FLOAT), hipMemcpyDeviceToHost); printf("dot, answer: %d, calculated by GPU:%f, calculated by CPU:%f\n", 2 * N, asd, ash); printf("GPU time: %e, CPU time: %e, speedup: %g\n", td, th, th / td); hipFree(dx); hipFree(dy); hipFree(dz); hipFree(d); free(hx); free(hy); return 0; }
76447359b7b44c97ce87e7c16378f0899ef356a1.cu
/* dot product of two vectors: d = <x, y> */ #include "reduction_aux.h" #include <assert.h> /* host, add */ FLOAT dot_host(FLOAT *x, FLOAT *y, int N) { int i; FLOAT t = 0; assert(x != NULL); assert(y != NULL); for (i = 0; i < N; i++) t += x[i] * y[i]; return t; } __device__ void warpReduce(volatile FLOAT *sdata, int tid) { sdata[tid] += sdata[tid + 32]; sdata[tid] += sdata[tid + 16]; sdata[tid] += sdata[tid + 8]; sdata[tid] += sdata[tid + 4]; sdata[tid] += sdata[tid + 2]; sdata[tid] += sdata[tid + 1]; } //线程比较多,块儿比较多分成三阶段进行运算 /* partial dot product */ __global__ void dot_stg_1(const FLOAT *x, FLOAT *y, FLOAT *z, int N) { __shared__ FLOAT sdata[256]; //线程的全局索引 int idx = get_tid(); //线程的块内索引 int tid = threadIdx.x; //grid内线程块儿的索引 int bid = get_bid(); /* load data to shared mem */ //每一个线程load一个data到共享内存 if (idx < N) { sdata[tid] = x[idx] * y[idx]; } else { sdata[tid] = 0; } __syncthreads(); /* reduction using shared mem */ //双路块儿内规约算法 if (tid < 128) sdata[tid] += sdata[tid + 128]; __syncthreads(); if (tid < 64) sdata[tid] += sdata[tid + 64]; __syncthreads(); if (tid < 32) warpReduce(sdata, tid); if (tid == 0) z[bid] = sdata[0]; } /* sum all entries in x and asign to y * block dim must be 256 */ __global__ void dot_stg_2(const FLOAT *x, FLOAT *y, int N) { __shared__ FLOAT sdata[256]; int idx = get_tid(); int tid = threadIdx.x; int bid = get_bid(); /* load data to shared mem */ if (idx < N) { sdata[tid] = x[idx]; } else { sdata[tid] = 0; } __syncthreads(); /* reduction using shared mem */ if (tid < 128) sdata[tid] += sdata[tid + 128]; __syncthreads(); if (tid < 64) sdata[tid] += sdata[tid + 64]; __syncthreads(); if (tid < 32) warpReduce(sdata, tid); if (tid == 0) y[bid] = sdata[0]; } //第三阶段在一个块儿内操作,将所有的数先归并到128维的缓存中,然后再使用规约操作 __global__ void dot_stg_3(FLOAT *x, int N) { __shared__ FLOAT sdata[128]; int tid = threadIdx.x; int i; sdata[tid] = 0; /* load data to shared mem */ for (i = 0; i < N; i += 128) { if (tid + i < N) sdata[tid] += x[i + tid]; } __syncthreads(); /* reduction using shared mem */ if (tid < 64) sdata[tid] = sdata[tid] + sdata[tid + 64]; __syncthreads(); if (tid < 32) warpReduce(sdata, tid); if (tid == 0) x[0] = sdata[0]; } /* dz and d serve as cache: result stores in d[0] */ void dot_device(FLOAT *dx, FLOAT *dy, FLOAT *dz, FLOAT *d, int N) { /* 1D block */ int bs = 256; /* 2D grid */ int s = ceil(sqrt((N + bs - 1.) / bs)); dim3 grid = dim3(s, s); int gs = 0; /* stage 1 */ dot_stg_1<<<grid, bs>>>(dx, dy, dz, N); /* stage 2 */ { /* 1D grid */ int N2 = (N + bs - 1) / bs; int s2 = ceil(sqrt((N2 + bs - 1.) / bs)); dim3 grid2 = dim3(s2, s2); dot_stg_2<<<grid2, bs>>>(dz, d, N2); /* record gs */ gs = (N2 + bs - 1.) / bs; } /* stage 3 */ dot_stg_3<<<1, 128>>>(d, gs); } int main(int argc, char **argv) { int N = 10000070; int nbytes = N * sizeof(FLOAT); FLOAT *hx = NULL, *hy = NULL; FLOAT *dx = NULL, *dy = NULL, *dz = NULL, *d = NULL; int i, itr = 20; FLOAT asd = 0, ash; double td, th; if (argc == 2) { int an; an = atoi(argv[1]); if (an > 0) N = an; } /* allocate GPU mem */ cudaMalloc((void **)&dx, nbytes); cudaMalloc((void **)&dy, nbytes); cudaMalloc((void **)&dz, sizeof(FLOAT) * ((N + 255) / 256)); cudaMalloc((void **)&d, sizeof(FLOAT) * ((N + 255) / 256)); if (dx == NULL || dy == NULL || dz == NULL || d == NULL) { printf("couldn't allocate GPU memory\n"); return -1; } printf("allocated %e MB on GPU\n", nbytes / (1024.f * 1024.f)); /* alllocate CPU mem */ hx = (FLOAT *) malloc(nbytes); hy = (FLOAT *) malloc(nbytes); if (hx == NULL || hy == NULL) { printf("couldn't allocate CPU memory\n"); return -2; } printf("allocated %e MB on CPU\n", nbytes / (1024.f * 1024.f)); /* init */ for (i = 0; i < N; i++) { hx[i] = 1; hy[i] = 2; } /* copy data to GPU */ cudaMemcpy(dx, hx, nbytes, cudaMemcpyHostToDevice); cudaMemcpy(dy, hy, nbytes, cudaMemcpyHostToDevice); /* let dust fall */ cudaThreadSynchronize(); td = get_time(); /* call GPU */ for (i = 0; i < itr; i++) dot_device(dx, dy, dz, d, N); /* let GPU finish */ cudaThreadSynchronize(); td = get_time() - td; th = get_time(); for (i = 0; i < itr; i++) ash = dot_host(hx, hy, N); th = get_time() - th; /* copy data from GPU */ cudaMemcpy(&asd, d, sizeof(FLOAT), cudaMemcpyDeviceToHost); printf("dot, answer: %d, calculated by GPU:%f, calculated by CPU:%f\n", 2 * N, asd, ash); printf("GPU time: %e, CPU time: %e, speedup: %g\n", td, th, th / td); cudaFree(dx); cudaFree(dy); cudaFree(dz); cudaFree(d); free(hx); free(hy); return 0; }
13644c00c44e7413466749bdf447c4ce8bec139e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <algorithm> #include <cmath> #include <cstdio> #include <iostream> #include <sstream> #include <omp.h> #ifdef HAVE_CUB #include <hipcub/hipcub.hpp> #endif // HAVE_CUB #ifdef USE_NVTX #include <roctracer/roctx.h> const uint32_t colors[] = {0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff}; const int num_colors = sizeof(colors) / sizeof(uint32_t); #define PUSH_RANGE(name, cid) \ { \ int color_id = cid; \ color_id = color_id % num_colors; \ nvtxEventAttributes_t eventAttrib = {0}; \ eventAttrib.version = NVTX_VERSION; \ eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \ eventAttrib.colorType = NVTX_COLOR_ARGB; \ eventAttrib.color = colors[color_id]; \ eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \ eventAttrib.message.ascii = name; \ nvtxRangePushEx(&eventAttrib); \ } #define POP_RANGE roctxRangePop(); #else #define PUSH_RANGE(name, cid) #define POP_RANGE #endif #define CUDA_RT_CALL(call) \ { \ hipError_t cudaStatus = call; \ if (hipSuccess != cudaStatus) \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \ "with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, hipGetErrorString(cudaStatus), cudaStatus); \ } constexpr int MAX_NUM_DEVICES = 32; typedef float real; constexpr real tol = 1.0e-8; const real PI = 2.0 * std::asin(1.0); __global__ void initialize_boundaries(real* __restrict__ const a_new, real* __restrict__ const a, const real pi, const int offset, const int nx, const int my_ny, const int ny) { for (int iy = blockIdx.x * blockDim.x + threadIdx.x; iy < my_ny; iy += blockDim.x * gridDim.x) { const real y0 = sin(2.0 * pi * (offset + iy) / (ny - 1)); a[iy * nx + 0] = y0; a[iy * nx + (nx - 1)] = y0; a_new[iy * nx + 0] = y0; a_new[iy * nx + (nx - 1)] = y0; } } template <int BLOCK_DIM_X, int BLOCK_DIM_Y> __global__ void jacobi_kernel(real* __restrict__ const a_new, const real* __restrict__ const a, real* __restrict__ const l2_norm, const int iy_start, const int iy_end, const int nx, real* __restrict__ const a_new_top, const int top_iy, real* __restrict__ const a_new_bottom, const int bottom_iy) { #ifdef HAVE_CUB typedef hipcub::BlockReduce<real, BLOCK_DIM_X, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; #endif // HAVE_CUB int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start; int ix = blockIdx.x * blockDim.x + threadIdx.x + 1; real local_l2_norm = 0.0; if (iy < iy_end && ix < (nx - 1)) { const real new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] + a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]); a_new[iy * nx + ix] = new_val; if (iy_start == iy) { a_new_top[top_iy * nx + ix] = new_val; } if ((iy_end - 1) == iy) { a_new_bottom[bottom_iy * nx + ix] = new_val; } real residue = new_val - a[iy * nx + ix]; local_l2_norm += residue * residue; } #ifdef HAVE_CUB real block_l2_norm = BlockReduce(temp_storage).Sum(local_l2_norm); if (0 == threadIdx.y && 0 == threadIdx.x) atomicAdd(l2_norm, block_l2_norm); #else atomicAdd(l2_norm, local_l2_norm); #endif // HAVE_CUB } double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print); template <typename T> T get_argval(char** begin, char** end, const std::string& arg, const T default_val) { T argval = default_val; char** itr = std::find(begin, end, arg); if (itr != end && ++itr != end) { std::istringstream inbuf(*itr); inbuf >> argval; } return argval; } bool get_arg(char** begin, char** end, const std::string& arg) { char** itr = std::find(begin, end, arg); if (itr != end) { return true; } return false; } struct l2_norm_buf { hipEvent_t copy_done; real* d; real* h; }; int main(int argc, char* argv[]) { const int iter_max = get_argval<int>(argv, argv + argc, "-niter", 1000); const int nccheck = get_argval<int>(argv, argv + argc, "-nccheck", 1); const int nx = get_argval<int>(argv, argv + argc, "-nx", 7168); const int ny = get_argval<int>(argv, argv + argc, "-ny", 7168); const bool csv = get_arg(argv, argv + argc, "-csv"); if (nccheck != 1) { printf("Only nccheck = 1 is supported\n"); exit(1); } real* a_new[MAX_NUM_DEVICES]; real* a_ref_h; real* a_h; double runtime_serial = 0.0; int iy_end[MAX_NUM_DEVICES]; hipEvent_t compute_done[2][MAX_NUM_DEVICES]; hipEvent_t reset_l2_norm_done[2][MAX_NUM_DEVICES]; bool result_correct = true; bool p2p_works = true; int num_devices = 0; CUDA_RT_CALL(hipGetDeviceCount(&num_devices)); real l2_norms[2]; #pragma omp parallel num_threads(num_devices) shared(l2_norms) { real* a; hipStream_t compute_stream; hipStream_t reset_l2_norm_stream; l2_norm_buf l2_norm_bufs[2]; // Ensure correctness if ny%size != 0 int chunk_size = ::ceil((1.0 * (ny - 2)) / num_devices); int dev_id = omp_get_thread_num(); CUDA_RT_CALL(hipSetDevice(dev_id)); CUDA_RT_CALL(hipSetDeviceFlags(hipDeviceScheduleSpin)); CUDA_RT_CALL(hipFree(0)); if (0 == dev_id) { CUDA_RT_CALL(hipHostMalloc(&a_ref_h, nx * ny * sizeof(real))); CUDA_RT_CALL(hipHostMalloc(&a_h, nx * ny * sizeof(real))); runtime_serial = single_gpu(nx, ny, iter_max, a_ref_h, nccheck, !csv); } #pragma omp barrier const int top = dev_id > 0 ? dev_id - 1 : (num_devices - 1); const int bottom = (dev_id + 1) % num_devices; if (top != dev_id) { int canAccessPeer = 0; CUDA_RT_CALL(hipDeviceCanAccessPeer(&canAccessPeer, dev_id, top)); if (canAccessPeer) { CUDA_RT_CALL(hipDeviceEnablePeerAccess(top, 0)); } else { std::cerr << "P2P access required from " << dev_id << " to " << top << std::endl; #pragma omp critical { if (p2p_works) p2p_works = false; } } if (top != bottom) { canAccessPeer = 0; CUDA_RT_CALL(hipDeviceCanAccessPeer(&canAccessPeer, dev_id, bottom)); if (canAccessPeer) { CUDA_RT_CALL(hipDeviceEnablePeerAccess(bottom, 0)); } else { std::cerr << "P2P access required from " << dev_id << " to " << bottom << std::endl; #pragma omp critical { if (p2p_works) p2p_works = false; } } } } #pragma omp barrier if (p2p_works) { CUDA_RT_CALL(hipMalloc(&a, nx * (chunk_size + 2) * sizeof(real))); CUDA_RT_CALL(hipMalloc(a_new + dev_id, nx * (chunk_size + 2) * sizeof(real))); CUDA_RT_CALL(hipMemset(a, 0, nx * (chunk_size + 2) * sizeof(real))); CUDA_RT_CALL(hipMemset(a_new[dev_id], 0, nx * (chunk_size + 2) * sizeof(real))); // Calculate local domain boundaries int iy_start_global = dev_id * chunk_size + 1; int iy_end_global = iy_start_global + chunk_size - 1; // Do not process boundaries iy_end_global = ::min(iy_end_global, ny - 2); int iy_start = 1; iy_end[dev_id] = (iy_end_global - iy_start_global + 1) + iy_start; // Set diriclet boundary conditions on left and right boarder hipLaunchKernelGGL(( initialize_boundaries), dim3((ny / num_devices) / 128 + 1), dim3(128), 0, 0, a, a_new[dev_id], PI, iy_start_global - 1, nx, (chunk_size + 2), ny); CUDA_RT_CALL(hipGetLastError()); CUDA_RT_CALL(hipDeviceSynchronize()); CUDA_RT_CALL(hipStreamCreate(&compute_stream)); CUDA_RT_CALL(hipStreamCreate(&reset_l2_norm_stream)); CUDA_RT_CALL( hipEventCreateWithFlags(compute_done[0] + dev_id, hipEventDisableTiming)); CUDA_RT_CALL( hipEventCreateWithFlags(compute_done[1] + dev_id, hipEventDisableTiming)); CUDA_RT_CALL( hipEventCreateWithFlags(reset_l2_norm_done[0] + dev_id, hipEventDisableTiming)); CUDA_RT_CALL( hipEventCreateWithFlags(reset_l2_norm_done[1] + dev_id, hipEventDisableTiming)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL( hipEventCreateWithFlags(&l2_norm_bufs[i].copy_done, hipEventDisableTiming)); CUDA_RT_CALL(hipMalloc(&l2_norm_bufs[i].d, sizeof(real))); CUDA_RT_CALL(hipMemset(l2_norm_bufs[i].d, 0, sizeof(real))); CUDA_RT_CALL(hipHostMalloc(&l2_norm_bufs[i].h, sizeof(real))); *(l2_norm_bufs[i].h) = 1.0; } CUDA_RT_CALL(hipDeviceSynchronize()); #pragma omp master { if (!csv) printf( "Jacobi relaxation: %d iterations on %d x %d mesh with " "norm " "check every %d iterations\n", iter_max, ny, nx, nccheck); } constexpr int dim_block_x = 32; constexpr int dim_block_y = 4; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, (ny + (num_devices * dim_block_y) - 1) / (num_devices * dim_block_y), 1); int iter = 0; #pragma omp master { for (int i = 0; i < 2; ++i) { l2_norms[i] = 1.0; } } CUDA_RT_CALL(hipDeviceSynchronize()); #pragma omp barrier double start = omp_get_wtime(); PUSH_RANGE("Jacobi solve", 0) bool l2_norm_greater_than_tol = true; while (l2_norm_greater_than_tol && iter < iter_max) { // on new iteration: old current vars are now previous vars, old // previous vars are no longer needed int prev = iter % 2; int curr = (iter + 1) % 2; // need to wait for other threads due to sharing of a_new and compute_done // between threads #pragma omp barrier CUDA_RT_CALL(hipStreamWaitEvent(compute_stream, compute_done[prev][top], 0)); CUDA_RT_CALL(hipStreamWaitEvent(compute_stream, compute_done[prev][bottom], 0)); CUDA_RT_CALL( hipStreamWaitEvent(compute_stream, reset_l2_norm_done[curr][dev_id], 0)); hipLaunchKernelGGL(( jacobi_kernel<dim_block_x, dim_block_y>) , dim3(dim_grid), dim3({dim_block_x), dim_block_y, 1}, 0, compute_stream, a_new[dev_id], a, l2_norm_bufs[curr].d, iy_start, iy_end[dev_id], nx, a_new[top], iy_end[top], a_new[bottom], 0); CUDA_RT_CALL(hipGetLastError()); CUDA_RT_CALL(hipEventRecord(compute_done[curr][dev_id], compute_stream)); // perform L2 norm calculation if ((iter % nccheck) == 0 || (!csv && (iter % 100) == 0)) { // as soon as computation is complete -> D2H-copy L2 norm CUDA_RT_CALL(hipMemcpyAsync(l2_norm_bufs[curr].h, l2_norm_bufs[curr].d, sizeof(real), hipMemcpyDeviceToHost, compute_stream)); CUDA_RT_CALL(hipEventRecord(l2_norm_bufs[curr].copy_done, compute_stream)); // ensure previous D2H-copy is completed before using the // data for calculation CUDA_RT_CALL(hipEventSynchronize(l2_norm_bufs[prev].copy_done)); /* * using atomics instead of critical sections caused a minimal (100ns / * iteration) performance gain */ #pragma omp atomic l2_norms[prev] += *(l2_norm_bufs[prev].h); #pragma omp barrier const real l2_norm_prev = std::sqrt(l2_norms[prev]); l2_norm_greater_than_tol = (l2_norm_prev > tol); if (!csv && (iter % 100) == 0) { #pragma omp single printf("%5d, %0.6f\n", iter, l2_norm_prev); } #pragma omp barrier // reset everything for next iteration l2_norms[prev] = 0.0; *(l2_norm_bufs[prev].h) = 0.0; CUDA_RT_CALL(hipMemcpyAsync(l2_norm_bufs[prev].d, l2_norm_bufs[curr].h, sizeof(real), hipMemcpyHostToDevice, reset_l2_norm_stream)); CUDA_RT_CALL( hipEventRecord(reset_l2_norm_done[prev][dev_id], reset_l2_norm_stream)); } else { #pragma omp barrier } std::swap(a_new[dev_id], a); iter++; } CUDA_RT_CALL(hipDeviceSynchronize()); #pragma omp barrier double stop = omp_get_wtime(); POP_RANGE CUDA_RT_CALL( hipMemcpy(a_h + iy_start_global * nx, a + nx, ::min((ny - iy_start_global) * nx, chunk_size * nx) * sizeof(real), hipMemcpyDeviceToHost)); #pragma omp barrier #pragma omp master { result_correct = true; for (int iy = 1; result_correct && (iy < (ny - 1)); ++iy) { for (int ix = 1; result_correct && (ix < (nx - 1)); ++ix) { if (::fabs(a_ref_h[iy * nx + ix] - a_h[iy * nx + ix]) > tol) { fprintf(stderr, "ERROR: a[%d * %d + %d] = %f does not " "match %f (reference)\n", iy, nx, ix, a_h[iy * nx + ix], a_ref_h[iy * nx + ix]); result_correct = false; } } } if (result_correct) { if (csv) { printf( "multi_threaded_p2p_opt, %d, %d, %d, %d, %d, 1, " "%f, %f\n", nx, ny, iter_max, nccheck, num_devices, (stop - start), runtime_serial); } else { printf("Num GPUs: %d.\n", num_devices); printf( "%dx%d: 1 GPU: %8.4f s, %d GPUs: %8.4f s, speedup: " "%8.2f, " "efficiency: %8.2f \n", ny, nx, runtime_serial, num_devices, (stop - start), runtime_serial / (stop - start), runtime_serial / (num_devices * (stop - start)) * 100); } } } for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(hipHostFree(l2_norm_bufs[i].h)); CUDA_RT_CALL(hipFree(l2_norm_bufs[i].d)); CUDA_RT_CALL(hipEventDestroy(l2_norm_bufs[i].copy_done)); } CUDA_RT_CALL(hipEventDestroy(reset_l2_norm_done[1][dev_id])); CUDA_RT_CALL(hipEventDestroy(reset_l2_norm_done[0][dev_id])); CUDA_RT_CALL(hipEventDestroy(compute_done[1][dev_id])); CUDA_RT_CALL(hipEventDestroy(compute_done[0][dev_id])); CUDA_RT_CALL(hipStreamDestroy(reset_l2_norm_stream)); CUDA_RT_CALL(hipStreamDestroy(compute_stream)); CUDA_RT_CALL(hipFree(a_new[dev_id])); CUDA_RT_CALL(hipFree(a)); if (0 == dev_id) { CUDA_RT_CALL(hipHostFree(a_h)); CUDA_RT_CALL(hipHostFree(a_ref_h)); } } } return result_correct ? 0 : 1; } double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print) { real* a; real* a_new; hipStream_t compute_stream; hipStream_t copy_l2_norm_stream; hipStream_t reset_l2_norm_stream; hipEvent_t compute_done; hipEvent_t reset_l2_norm_done[2]; real l2_norms[2]; l2_norm_buf l2_norm_bufs[2]; int iy_start = 1; int iy_end = (ny - 1); CUDA_RT_CALL(hipMalloc(&a, nx * ny * sizeof(real))); CUDA_RT_CALL(hipMalloc(&a_new, nx * ny * sizeof(real))); CUDA_RT_CALL(hipMemset(a, 0, nx * ny * sizeof(real))); CUDA_RT_CALL(hipMemset(a_new, 0, nx * ny * sizeof(real))); // Set diriclet boundary conditions on left and right boarder hipLaunchKernelGGL(( initialize_boundaries), dim3(ny / 128 + 1), dim3(128), 0, 0, a, a_new, PI, 0, nx, ny, ny); CUDA_RT_CALL(hipGetLastError()); CUDA_RT_CALL(hipDeviceSynchronize()); CUDA_RT_CALL(hipStreamCreate(&compute_stream)); CUDA_RT_CALL(hipStreamCreate(&copy_l2_norm_stream)); CUDA_RT_CALL(hipStreamCreate(&reset_l2_norm_stream)); CUDA_RT_CALL(hipEventCreateWithFlags(&compute_done, hipEventDisableTiming)); CUDA_RT_CALL(hipEventCreateWithFlags(&reset_l2_norm_done[0], hipEventDisableTiming)); CUDA_RT_CALL(hipEventCreateWithFlags(&reset_l2_norm_done[1], hipEventDisableTiming)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(hipEventCreateWithFlags(&l2_norm_bufs[i].copy_done, hipEventDisableTiming)); CUDA_RT_CALL(hipMalloc(&l2_norm_bufs[i].d, sizeof(real))); CUDA_RT_CALL(hipMemset(l2_norm_bufs[i].d, 0, sizeof(real))); CUDA_RT_CALL(hipHostMalloc(&l2_norm_bufs[i].h, sizeof(real))); *(l2_norm_bufs[i].h) = 1.0; } CUDA_RT_CALL(hipDeviceSynchronize()); if (print) printf( "Single GPU Jacobi relaxation: %d iterations on %d x %d mesh with " "norm " "check every %d iterations\n", iter_max, ny, nx, nccheck); constexpr int dim_block_x = 32; constexpr int dim_block_y = 4; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, (ny + dim_block_y - 1) / dim_block_y, 1); int iter = 0; for (int i = 0; i < 2; ++i) { l2_norms[i] = 1.0; } double start = omp_get_wtime(); PUSH_RANGE("Jacobi solve", 0) bool l2_norm_greater_than_tol = true; while (l2_norm_greater_than_tol && iter < iter_max) { // on new iteration: old current vars are now previous vars, old // previous vars are no longer needed int prev = iter % 2; int curr = (iter + 1) % 2; CUDA_RT_CALL(hipStreamWaitEvent(compute_stream, reset_l2_norm_done[curr], 0)); hipLaunchKernelGGL(( jacobi_kernel<dim_block_x, dim_block_y>) , dim3(dim_grid), dim3({dim_block_x), dim_block_y, 1}, 0, compute_stream, a_new, a, l2_norm_bufs[curr].d, iy_start, iy_end, nx, a_new, iy_end, a_new, (iy_start - 1)); CUDA_RT_CALL(hipGetLastError()); CUDA_RT_CALL(hipEventRecord(compute_done, compute_stream)); // perform L2 norm calculation if ((iter % nccheck) == 0 || (print && (iter % 100) == 0)) { // as soon as computation is complete -> D2H-copy L2 norm CUDA_RT_CALL(hipStreamWaitEvent(copy_l2_norm_stream, compute_done, 0)); CUDA_RT_CALL(hipMemcpyAsync(l2_norm_bufs[curr].h, l2_norm_bufs[curr].d, sizeof(real), hipMemcpyDeviceToHost, copy_l2_norm_stream)); CUDA_RT_CALL(hipEventRecord(l2_norm_bufs[curr].copy_done, copy_l2_norm_stream)); // ensure previous D2H copy is completed before using the data for // calculation CUDA_RT_CALL(hipEventSynchronize(l2_norm_bufs[prev].copy_done)); l2_norms[prev] = *(l2_norm_bufs[prev].h); l2_norms[prev] = std::sqrt(l2_norms[prev]); l2_norm_greater_than_tol = (l2_norms[prev] > tol); if (print && (iter % 100) == 0) { printf("%5d, %0.6f\n", iter, l2_norms[prev]); } // reset everything for next iteration l2_norms[prev] = 0.0; *(l2_norm_bufs[prev].h) = 0.0; CUDA_RT_CALL(hipMemcpyAsync(l2_norm_bufs[prev].d, l2_norm_bufs[prev].h, sizeof(real), hipMemcpyHostToDevice, reset_l2_norm_stream)); CUDA_RT_CALL(hipEventRecord(reset_l2_norm_done[prev], reset_l2_norm_stream)); } std::swap(a_new, a); iter++; } CUDA_RT_CALL(hipDeviceSynchronize()); POP_RANGE double stop = omp_get_wtime(); CUDA_RT_CALL(hipMemcpy(a_ref_h, a, nx * ny * sizeof(real), hipMemcpyDeviceToHost)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(hipHostFree(l2_norm_bufs[i].h)); CUDA_RT_CALL(hipFree(l2_norm_bufs[i].d)); CUDA_RT_CALL(hipEventDestroy(l2_norm_bufs[i].copy_done)); } CUDA_RT_CALL(hipEventDestroy(reset_l2_norm_done[1])); CUDA_RT_CALL(hipEventDestroy(reset_l2_norm_done[0])); CUDA_RT_CALL(hipEventDestroy(compute_done)); CUDA_RT_CALL(hipStreamDestroy(reset_l2_norm_stream)); CUDA_RT_CALL(hipStreamDestroy(copy_l2_norm_stream)); CUDA_RT_CALL(hipStreamDestroy(compute_stream)); CUDA_RT_CALL(hipFree(a_new)); CUDA_RT_CALL(hipFree(a)); return (stop - start); }
13644c00c44e7413466749bdf447c4ce8bec139e.cu
/* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <algorithm> #include <cmath> #include <cstdio> #include <iostream> #include <sstream> #include <omp.h> #ifdef HAVE_CUB #include <cub/block/block_reduce.cuh> #endif // HAVE_CUB #ifdef USE_NVTX #include <nvToolsExt.h> const uint32_t colors[] = {0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff}; const int num_colors = sizeof(colors) / sizeof(uint32_t); #define PUSH_RANGE(name, cid) \ { \ int color_id = cid; \ color_id = color_id % num_colors; \ nvtxEventAttributes_t eventAttrib = {0}; \ eventAttrib.version = NVTX_VERSION; \ eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \ eventAttrib.colorType = NVTX_COLOR_ARGB; \ eventAttrib.color = colors[color_id]; \ eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \ eventAttrib.message.ascii = name; \ nvtxRangePushEx(&eventAttrib); \ } #define POP_RANGE nvtxRangePop(); #else #define PUSH_RANGE(name, cid) #define POP_RANGE #endif #define CUDA_RT_CALL(call) \ { \ cudaError_t cudaStatus = call; \ if (cudaSuccess != cudaStatus) \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \ "with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \ } constexpr int MAX_NUM_DEVICES = 32; typedef float real; constexpr real tol = 1.0e-8; const real PI = 2.0 * std::asin(1.0); __global__ void initialize_boundaries(real* __restrict__ const a_new, real* __restrict__ const a, const real pi, const int offset, const int nx, const int my_ny, const int ny) { for (int iy = blockIdx.x * blockDim.x + threadIdx.x; iy < my_ny; iy += blockDim.x * gridDim.x) { const real y0 = sin(2.0 * pi * (offset + iy) / (ny - 1)); a[iy * nx + 0] = y0; a[iy * nx + (nx - 1)] = y0; a_new[iy * nx + 0] = y0; a_new[iy * nx + (nx - 1)] = y0; } } template <int BLOCK_DIM_X, int BLOCK_DIM_Y> __global__ void jacobi_kernel(real* __restrict__ const a_new, const real* __restrict__ const a, real* __restrict__ const l2_norm, const int iy_start, const int iy_end, const int nx, real* __restrict__ const a_new_top, const int top_iy, real* __restrict__ const a_new_bottom, const int bottom_iy) { #ifdef HAVE_CUB typedef cub::BlockReduce<real, BLOCK_DIM_X, cub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; #endif // HAVE_CUB int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start; int ix = blockIdx.x * blockDim.x + threadIdx.x + 1; real local_l2_norm = 0.0; if (iy < iy_end && ix < (nx - 1)) { const real new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] + a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]); a_new[iy * nx + ix] = new_val; if (iy_start == iy) { a_new_top[top_iy * nx + ix] = new_val; } if ((iy_end - 1) == iy) { a_new_bottom[bottom_iy * nx + ix] = new_val; } real residue = new_val - a[iy * nx + ix]; local_l2_norm += residue * residue; } #ifdef HAVE_CUB real block_l2_norm = BlockReduce(temp_storage).Sum(local_l2_norm); if (0 == threadIdx.y && 0 == threadIdx.x) atomicAdd(l2_norm, block_l2_norm); #else atomicAdd(l2_norm, local_l2_norm); #endif // HAVE_CUB } double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print); template <typename T> T get_argval(char** begin, char** end, const std::string& arg, const T default_val) { T argval = default_val; char** itr = std::find(begin, end, arg); if (itr != end && ++itr != end) { std::istringstream inbuf(*itr); inbuf >> argval; } return argval; } bool get_arg(char** begin, char** end, const std::string& arg) { char** itr = std::find(begin, end, arg); if (itr != end) { return true; } return false; } struct l2_norm_buf { cudaEvent_t copy_done; real* d; real* h; }; int main(int argc, char* argv[]) { const int iter_max = get_argval<int>(argv, argv + argc, "-niter", 1000); const int nccheck = get_argval<int>(argv, argv + argc, "-nccheck", 1); const int nx = get_argval<int>(argv, argv + argc, "-nx", 7168); const int ny = get_argval<int>(argv, argv + argc, "-ny", 7168); const bool csv = get_arg(argv, argv + argc, "-csv"); if (nccheck != 1) { printf("Only nccheck = 1 is supported\n"); exit(1); } real* a_new[MAX_NUM_DEVICES]; real* a_ref_h; real* a_h; double runtime_serial = 0.0; int iy_end[MAX_NUM_DEVICES]; cudaEvent_t compute_done[2][MAX_NUM_DEVICES]; cudaEvent_t reset_l2_norm_done[2][MAX_NUM_DEVICES]; bool result_correct = true; bool p2p_works = true; int num_devices = 0; CUDA_RT_CALL(cudaGetDeviceCount(&num_devices)); real l2_norms[2]; #pragma omp parallel num_threads(num_devices) shared(l2_norms) { real* a; cudaStream_t compute_stream; cudaStream_t reset_l2_norm_stream; l2_norm_buf l2_norm_bufs[2]; // Ensure correctness if ny%size != 0 int chunk_size = std::ceil((1.0 * (ny - 2)) / num_devices); int dev_id = omp_get_thread_num(); CUDA_RT_CALL(cudaSetDevice(dev_id)); CUDA_RT_CALL(cudaSetDeviceFlags(cudaDeviceScheduleSpin)); CUDA_RT_CALL(cudaFree(0)); if (0 == dev_id) { CUDA_RT_CALL(cudaMallocHost(&a_ref_h, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMallocHost(&a_h, nx * ny * sizeof(real))); runtime_serial = single_gpu(nx, ny, iter_max, a_ref_h, nccheck, !csv); } #pragma omp barrier const int top = dev_id > 0 ? dev_id - 1 : (num_devices - 1); const int bottom = (dev_id + 1) % num_devices; if (top != dev_id) { int canAccessPeer = 0; CUDA_RT_CALL(cudaDeviceCanAccessPeer(&canAccessPeer, dev_id, top)); if (canAccessPeer) { CUDA_RT_CALL(cudaDeviceEnablePeerAccess(top, 0)); } else { std::cerr << "P2P access required from " << dev_id << " to " << top << std::endl; #pragma omp critical { if (p2p_works) p2p_works = false; } } if (top != bottom) { canAccessPeer = 0; CUDA_RT_CALL(cudaDeviceCanAccessPeer(&canAccessPeer, dev_id, bottom)); if (canAccessPeer) { CUDA_RT_CALL(cudaDeviceEnablePeerAccess(bottom, 0)); } else { std::cerr << "P2P access required from " << dev_id << " to " << bottom << std::endl; #pragma omp critical { if (p2p_works) p2p_works = false; } } } } #pragma omp barrier if (p2p_works) { CUDA_RT_CALL(cudaMalloc(&a, nx * (chunk_size + 2) * sizeof(real))); CUDA_RT_CALL(cudaMalloc(a_new + dev_id, nx * (chunk_size + 2) * sizeof(real))); CUDA_RT_CALL(cudaMemset(a, 0, nx * (chunk_size + 2) * sizeof(real))); CUDA_RT_CALL(cudaMemset(a_new[dev_id], 0, nx * (chunk_size + 2) * sizeof(real))); // Calculate local domain boundaries int iy_start_global = dev_id * chunk_size + 1; int iy_end_global = iy_start_global + chunk_size - 1; // Do not process boundaries iy_end_global = std::min(iy_end_global, ny - 2); int iy_start = 1; iy_end[dev_id] = (iy_end_global - iy_start_global + 1) + iy_start; // Set diriclet boundary conditions on left and right boarder initialize_boundaries<<<(ny / num_devices) / 128 + 1, 128>>>( a, a_new[dev_id], PI, iy_start_global - 1, nx, (chunk_size + 2), ny); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaDeviceSynchronize()); CUDA_RT_CALL(cudaStreamCreate(&compute_stream)); CUDA_RT_CALL(cudaStreamCreate(&reset_l2_norm_stream)); CUDA_RT_CALL( cudaEventCreateWithFlags(compute_done[0] + dev_id, cudaEventDisableTiming)); CUDA_RT_CALL( cudaEventCreateWithFlags(compute_done[1] + dev_id, cudaEventDisableTiming)); CUDA_RT_CALL( cudaEventCreateWithFlags(reset_l2_norm_done[0] + dev_id, cudaEventDisableTiming)); CUDA_RT_CALL( cudaEventCreateWithFlags(reset_l2_norm_done[1] + dev_id, cudaEventDisableTiming)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL( cudaEventCreateWithFlags(&l2_norm_bufs[i].copy_done, cudaEventDisableTiming)); CUDA_RT_CALL(cudaMalloc(&l2_norm_bufs[i].d, sizeof(real))); CUDA_RT_CALL(cudaMemset(l2_norm_bufs[i].d, 0, sizeof(real))); CUDA_RT_CALL(cudaMallocHost(&l2_norm_bufs[i].h, sizeof(real))); *(l2_norm_bufs[i].h) = 1.0; } CUDA_RT_CALL(cudaDeviceSynchronize()); #pragma omp master { if (!csv) printf( "Jacobi relaxation: %d iterations on %d x %d mesh with " "norm " "check every %d iterations\n", iter_max, ny, nx, nccheck); } constexpr int dim_block_x = 32; constexpr int dim_block_y = 4; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, (ny + (num_devices * dim_block_y) - 1) / (num_devices * dim_block_y), 1); int iter = 0; #pragma omp master { for (int i = 0; i < 2; ++i) { l2_norms[i] = 1.0; } } CUDA_RT_CALL(cudaDeviceSynchronize()); #pragma omp barrier double start = omp_get_wtime(); PUSH_RANGE("Jacobi solve", 0) bool l2_norm_greater_than_tol = true; while (l2_norm_greater_than_tol && iter < iter_max) { // on new iteration: old current vars are now previous vars, old // previous vars are no longer needed int prev = iter % 2; int curr = (iter + 1) % 2; // need to wait for other threads due to sharing of a_new and compute_done // between threads #pragma omp barrier CUDA_RT_CALL(cudaStreamWaitEvent(compute_stream, compute_done[prev][top], 0)); CUDA_RT_CALL(cudaStreamWaitEvent(compute_stream, compute_done[prev][bottom], 0)); CUDA_RT_CALL( cudaStreamWaitEvent(compute_stream, reset_l2_norm_done[curr][dev_id], 0)); jacobi_kernel<dim_block_x, dim_block_y> <<<dim_grid, {dim_block_x, dim_block_y, 1}, 0, compute_stream>>>( a_new[dev_id], a, l2_norm_bufs[curr].d, iy_start, iy_end[dev_id], nx, a_new[top], iy_end[top], a_new[bottom], 0); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaEventRecord(compute_done[curr][dev_id], compute_stream)); // perform L2 norm calculation if ((iter % nccheck) == 0 || (!csv && (iter % 100) == 0)) { // as soon as computation is complete -> D2H-copy L2 norm CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_bufs[curr].h, l2_norm_bufs[curr].d, sizeof(real), cudaMemcpyDeviceToHost, compute_stream)); CUDA_RT_CALL(cudaEventRecord(l2_norm_bufs[curr].copy_done, compute_stream)); // ensure previous D2H-copy is completed before using the // data for calculation CUDA_RT_CALL(cudaEventSynchronize(l2_norm_bufs[prev].copy_done)); /* * using atomics instead of critical sections caused a minimal (100ns / * iteration) performance gain */ #pragma omp atomic l2_norms[prev] += *(l2_norm_bufs[prev].h); #pragma omp barrier const real l2_norm_prev = std::sqrt(l2_norms[prev]); l2_norm_greater_than_tol = (l2_norm_prev > tol); if (!csv && (iter % 100) == 0) { #pragma omp single printf("%5d, %0.6f\n", iter, l2_norm_prev); } #pragma omp barrier // reset everything for next iteration l2_norms[prev] = 0.0; *(l2_norm_bufs[prev].h) = 0.0; CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_bufs[prev].d, l2_norm_bufs[curr].h, sizeof(real), cudaMemcpyHostToDevice, reset_l2_norm_stream)); CUDA_RT_CALL( cudaEventRecord(reset_l2_norm_done[prev][dev_id], reset_l2_norm_stream)); } else { #pragma omp barrier } std::swap(a_new[dev_id], a); iter++; } CUDA_RT_CALL(cudaDeviceSynchronize()); #pragma omp barrier double stop = omp_get_wtime(); POP_RANGE CUDA_RT_CALL( cudaMemcpy(a_h + iy_start_global * nx, a + nx, std::min((ny - iy_start_global) * nx, chunk_size * nx) * sizeof(real), cudaMemcpyDeviceToHost)); #pragma omp barrier #pragma omp master { result_correct = true; for (int iy = 1; result_correct && (iy < (ny - 1)); ++iy) { for (int ix = 1; result_correct && (ix < (nx - 1)); ++ix) { if (std::fabs(a_ref_h[iy * nx + ix] - a_h[iy * nx + ix]) > tol) { fprintf(stderr, "ERROR: a[%d * %d + %d] = %f does not " "match %f (reference)\n", iy, nx, ix, a_h[iy * nx + ix], a_ref_h[iy * nx + ix]); result_correct = false; } } } if (result_correct) { if (csv) { printf( "multi_threaded_p2p_opt, %d, %d, %d, %d, %d, 1, " "%f, %f\n", nx, ny, iter_max, nccheck, num_devices, (stop - start), runtime_serial); } else { printf("Num GPUs: %d.\n", num_devices); printf( "%dx%d: 1 GPU: %8.4f s, %d GPUs: %8.4f s, speedup: " "%8.2f, " "efficiency: %8.2f \n", ny, nx, runtime_serial, num_devices, (stop - start), runtime_serial / (stop - start), runtime_serial / (num_devices * (stop - start)) * 100); } } } for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(cudaFreeHost(l2_norm_bufs[i].h)); CUDA_RT_CALL(cudaFree(l2_norm_bufs[i].d)); CUDA_RT_CALL(cudaEventDestroy(l2_norm_bufs[i].copy_done)); } CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[1][dev_id])); CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[0][dev_id])); CUDA_RT_CALL(cudaEventDestroy(compute_done[1][dev_id])); CUDA_RT_CALL(cudaEventDestroy(compute_done[0][dev_id])); CUDA_RT_CALL(cudaStreamDestroy(reset_l2_norm_stream)); CUDA_RT_CALL(cudaStreamDestroy(compute_stream)); CUDA_RT_CALL(cudaFree(a_new[dev_id])); CUDA_RT_CALL(cudaFree(a)); if (0 == dev_id) { CUDA_RT_CALL(cudaFreeHost(a_h)); CUDA_RT_CALL(cudaFreeHost(a_ref_h)); } } } return result_correct ? 0 : 1; } double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print) { real* a; real* a_new; cudaStream_t compute_stream; cudaStream_t copy_l2_norm_stream; cudaStream_t reset_l2_norm_stream; cudaEvent_t compute_done; cudaEvent_t reset_l2_norm_done[2]; real l2_norms[2]; l2_norm_buf l2_norm_bufs[2]; int iy_start = 1; int iy_end = (ny - 1); CUDA_RT_CALL(cudaMalloc(&a, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMalloc(&a_new, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMemset(a, 0, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMemset(a_new, 0, nx * ny * sizeof(real))); // Set diriclet boundary conditions on left and right boarder initialize_boundaries<<<ny / 128 + 1, 128>>>(a, a_new, PI, 0, nx, ny, ny); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaDeviceSynchronize()); CUDA_RT_CALL(cudaStreamCreate(&compute_stream)); CUDA_RT_CALL(cudaStreamCreate(&copy_l2_norm_stream)); CUDA_RT_CALL(cudaStreamCreate(&reset_l2_norm_stream)); CUDA_RT_CALL(cudaEventCreateWithFlags(&compute_done, cudaEventDisableTiming)); CUDA_RT_CALL(cudaEventCreateWithFlags(&reset_l2_norm_done[0], cudaEventDisableTiming)); CUDA_RT_CALL(cudaEventCreateWithFlags(&reset_l2_norm_done[1], cudaEventDisableTiming)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(cudaEventCreateWithFlags(&l2_norm_bufs[i].copy_done, cudaEventDisableTiming)); CUDA_RT_CALL(cudaMalloc(&l2_norm_bufs[i].d, sizeof(real))); CUDA_RT_CALL(cudaMemset(l2_norm_bufs[i].d, 0, sizeof(real))); CUDA_RT_CALL(cudaMallocHost(&l2_norm_bufs[i].h, sizeof(real))); *(l2_norm_bufs[i].h) = 1.0; } CUDA_RT_CALL(cudaDeviceSynchronize()); if (print) printf( "Single GPU Jacobi relaxation: %d iterations on %d x %d mesh with " "norm " "check every %d iterations\n", iter_max, ny, nx, nccheck); constexpr int dim_block_x = 32; constexpr int dim_block_y = 4; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, (ny + dim_block_y - 1) / dim_block_y, 1); int iter = 0; for (int i = 0; i < 2; ++i) { l2_norms[i] = 1.0; } double start = omp_get_wtime(); PUSH_RANGE("Jacobi solve", 0) bool l2_norm_greater_than_tol = true; while (l2_norm_greater_than_tol && iter < iter_max) { // on new iteration: old current vars are now previous vars, old // previous vars are no longer needed int prev = iter % 2; int curr = (iter + 1) % 2; CUDA_RT_CALL(cudaStreamWaitEvent(compute_stream, reset_l2_norm_done[curr], 0)); jacobi_kernel<dim_block_x, dim_block_y> <<<dim_grid, {dim_block_x, dim_block_y, 1}, 0, compute_stream>>>( a_new, a, l2_norm_bufs[curr].d, iy_start, iy_end, nx, a_new, iy_end, a_new, (iy_start - 1)); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaEventRecord(compute_done, compute_stream)); // perform L2 norm calculation if ((iter % nccheck) == 0 || (print && (iter % 100) == 0)) { // as soon as computation is complete -> D2H-copy L2 norm CUDA_RT_CALL(cudaStreamWaitEvent(copy_l2_norm_stream, compute_done, 0)); CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_bufs[curr].h, l2_norm_bufs[curr].d, sizeof(real), cudaMemcpyDeviceToHost, copy_l2_norm_stream)); CUDA_RT_CALL(cudaEventRecord(l2_norm_bufs[curr].copy_done, copy_l2_norm_stream)); // ensure previous D2H copy is completed before using the data for // calculation CUDA_RT_CALL(cudaEventSynchronize(l2_norm_bufs[prev].copy_done)); l2_norms[prev] = *(l2_norm_bufs[prev].h); l2_norms[prev] = std::sqrt(l2_norms[prev]); l2_norm_greater_than_tol = (l2_norms[prev] > tol); if (print && (iter % 100) == 0) { printf("%5d, %0.6f\n", iter, l2_norms[prev]); } // reset everything for next iteration l2_norms[prev] = 0.0; *(l2_norm_bufs[prev].h) = 0.0; CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_bufs[prev].d, l2_norm_bufs[prev].h, sizeof(real), cudaMemcpyHostToDevice, reset_l2_norm_stream)); CUDA_RT_CALL(cudaEventRecord(reset_l2_norm_done[prev], reset_l2_norm_stream)); } std::swap(a_new, a); iter++; } CUDA_RT_CALL(cudaDeviceSynchronize()); POP_RANGE double stop = omp_get_wtime(); CUDA_RT_CALL(cudaMemcpy(a_ref_h, a, nx * ny * sizeof(real), cudaMemcpyDeviceToHost)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(cudaFreeHost(l2_norm_bufs[i].h)); CUDA_RT_CALL(cudaFree(l2_norm_bufs[i].d)); CUDA_RT_CALL(cudaEventDestroy(l2_norm_bufs[i].copy_done)); } CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[1])); CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[0])); CUDA_RT_CALL(cudaEventDestroy(compute_done)); CUDA_RT_CALL(cudaStreamDestroy(reset_l2_norm_stream)); CUDA_RT_CALL(cudaStreamDestroy(copy_l2_norm_stream)); CUDA_RT_CALL(cudaStreamDestroy(compute_stream)); CUDA_RT_CALL(cudaFree(a_new)); CUDA_RT_CALL(cudaFree(a)); return (stop - start); }
81ac30768b44b5bd19289993e40aeff10e1e5877.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "matrixMulGPU.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; hipMalloc(&a, XSIZE*YSIZE); int *b = NULL; hipMalloc(&b, XSIZE*YSIZE); int *c = NULL; hipMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( matrixMulGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( matrixMulGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( matrixMulGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
81ac30768b44b5bd19289993e40aeff10e1e5877.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "matrixMulGPU.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); int *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); int *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); matrixMulGPU<<<gridBlock,threadBlock>>>(a,b,c); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { matrixMulGPU<<<gridBlock,threadBlock>>>(a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { matrixMulGPU<<<gridBlock,threadBlock>>>(a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4be4bed3f4fda1c3a9649b4bf634e1229eb5fe7e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMath.cu" #else THC_API void THCTensor_(fill)(THCState* state, THCTensor *self_, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); if (!THC_pointwiseApply1( state, self_, TensorFillOp<real>(value))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(zero)(THCState *state, THCTensor *self_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); if (THCTensor_(isContiguous)(state, self_)) { THCudaCheck(hipMemsetAsync(THCTensor_(data)(state, self_), 0, sizeof(real) * THCTensor_(nElement)(state, self_), THCState_getCurrentStream(state))); } else { if (!THC_pointwiseApply1( state, self_, TensorFillOp<real>(ScalarConvert<int, real>::to(0)))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(zeros)(THCState *state, THCTensor *r_, THLongStorage *size) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THCTensor_(resize)(state, r_, size, NULL); THCTensor_(zero)(state, r_); } THC_API void THCTensor_(ones)(THCState *state, THCTensor *r_, THLongStorage *size) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THCTensor_(resize)(state, r_, size, NULL); THCTensor_(fill)(state, r_, ScalarConvert<int, real>::to(1)); } THC_API void THCTensor_(reshape)(THCState *state, THCTensor *r_, THCTensor *t, THLongStorage *size) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, t)); THCTensor_(resize)(state, r_, size, NULL); THCTensor_(copy)(state, r_, t); } ptrdiff_t THCTensor_(numel)(THCState *state, THCTensor *t) { return THCTensor_(nElement)(state, t); } void THCTensor_(cat)(THCState *state, THCTensor *result, THCTensor *ta, THCTensor *tb, int dimension) { THCTensor* inputs[2]; inputs[0] = ta; inputs[1] = tb; THCTensor_(catArray)(state, result, inputs, 2, dimension); } void THCTensor_(catArray)(THCState *state, THCTensor *result, THCTensor **inputs, int numInputs, int dimension) { THLongStorage *size; int i, j, cohortMax; int64_t offset; bool hasEmptyInput = false; // Even in the case where dimension is negative (i.e. when we want // to cat along the last dimension), this logic still works, as the // loop below will overwrite the value int maxDim = dimension + 1; // cat_dimension is the actual dimension we cat along int cat_dimension = dimension; for (i = 0; i < numInputs; i++) { int inputDim = THCTensor_(nDimension)(state, inputs[i]); hasEmptyInput |= !inputDim; maxDim = THMax(maxDim, inputDim); } // In the event that the user specified -1 as the concat dimension, then // we want to pick the maxDim as dimension to cat along (and thus maxDim - 1 as the // value due to 0-based indexing). If the maxDim is // 0 (i.e. we are catting all // empty tensors), then we set cat_dimension to be 0 if (dimension + TH_INDEX_BASE == -1) { cat_dimension = maxDim ? (maxDim - 1) : 0; } THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs); THArgCheck(cat_dimension >= 0, 4, "invalid dimension %d", dimension + TH_INDEX_BASE); size = THLongStorage_newWithSize(maxDim); for(i = 0; i < maxDim; i++) { // dimSize is either the size of the dim if it exists, either 1 if #dim > 0, otherwise 0 int64_t dimSize = i < THCTensor_(nDimension)(state, inputs[0]) ? THCTensor_(size)(state, inputs[0], i) : THMin(THCTensor_(nDimension)(state, inputs[0]), 1); if (i == cat_dimension) { for (j = 1; j < numInputs; j++) { // accumulate the size over the dimension we want to cat on. // Empty tensors are allowed dimSize += i < THCTensor_(nDimension)(state, inputs[j]) ? THCTensor_(size)(state, inputs[j], i) : THMin(THCTensor_(nDimension)(state, inputs[j]), 1); } } else { for (j = 1; j < numInputs; j++) { int64_t sz = i < THCTensor_(nDimension)(state, inputs[j]) ? THCTensor_(size)(state, inputs[j], i) : THMin(THCTensor_(nDimension)(state, inputs[j]), 1); // If it's a dimension we're not catting on // Then fail if sizes are different AND > 0 if (dimSize != sz && dimSize && sz) { THLongStorage_free(size); THError("inconsistent tensor sizes"); } else if(!dimSize) { dimSize = sz; } } } size->data[i] = dimSize; } THCTensor_(resize)(state, result, size, NULL); THLongStorage_free(size); // We parallelize the copy if all 6 conditions pass: // // 1. There is more than one input tensor // 2. No empty inputs // 3. The result tensor is 32-bit indexable // 4. The number of dimensions is <= 4 // 5. All input tensors are contiguous (output tensor may be non-contig) // 6. All input tensors can use 32-bit indexing // 7. All input tensors are on the same device if (numInputs > 1 && !hasEmptyInput && THCTensor_(nDimension)(state, result) <= CAT_ARRAY_MAX_INPUT_DIMS && TensorUtils<THCTensor>::canUse32BitIndexMath(state, result) && TensorUtils<THCTensor>::allContiguous(state, inputs, numInputs) && TensorUtils<THCTensor>::all32BitIndexable(state, inputs, numInputs) && TensorUtils<THCTensor>::allSameDevice(state, inputs, numInputs)) { // First, let's set up our kernel parameters. We start with a raw pointer to the storage // for the output Tensor. real *data = THCTensor_(data)(state, result); // Kernel Parameter CatArrInputTensor<real, unsigned int> stackInputs[CAT_ARRAY_BATCH_SIZE]; CatArrInputTensor<real, unsigned int> *d_inputs; // Attempt to re-use stream's scratch space for the input metadata bool usedScratch = false; size_t tensorMetadataSize = sizeof(CatArrInputTensor<real, unsigned int>) * CAT_ARRAY_BATCH_SIZE; if (THCState_getCurrentDeviceScratchSpaceSize(state) > tensorMetadataSize) { void* space = THCState_getCurrentDeviceScratchSpace(state); if (space) { d_inputs = (CatArrInputTensor<real, unsigned int> *) space; usedScratch = true; } } if (!usedScratch) { // Fallback to allocating GPU memory THCudaCheck(THCudaMalloc(state, (void**) &d_inputs, tensorMetadataSize)); } OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param; // Next, let's initialize the size, stride arrays for the output Tensor. for (i = 0; i < maxDim; ++i) { param.outputSize[i] = THCTensor_(size)(state, result, i); param.outputStride[i] = THCTensor_(stride)(state, result, i); } // Template Declarations for dim = 1, 2, 3, 4 #define HANDLE_CASE(DIMS) \ hipLaunchKernelGGL(( CatArrayBatchedCopy<real, unsigned int, DIMS>), dim3(applyGrid), dim3(applyBlock), 0, 0, data, d_inputs, param, cat_dimension, param.outputStride[cat_dimension]); // Now we loop offset = 0; for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) { cohortMax = 0; for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) { int64_t dimSize = cat_dimension < THCTensor_(nDimension)(state, inputs[i+j]) ? THCTensor_(size)(state, inputs[i+j], cat_dimension) : 1; stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]); stackInputs[j].offset = offset; stackInputs[j].dimSize = dimSize; stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]); cohortMax = cohortMax > stackInputs[j].nElements ? cohortMax : stackInputs[j].nElements; // update offset offset += dimSize; } THCudaCheck(hipMemcpy(d_inputs, stackInputs, j * sizeof(CatArrInputTensor<real, unsigned int>), hipMemcpyHostToDevice)); // Next, let's consider how we set our kernel launch parameters. // We borrow from THCApply, which the kernel's internal indexing // is based on. dim3 applyBlock = getApplyBlock(); // We also re-use the applyGrid - but note that we use the maximum number of // elements for a given tensor in this grouping to determine the count dim3 applyGrid; getApplyGrid(state, cohortMax, applyGrid); // Next, we set our grid's y component to be the number of tensors in // the batch. This will allow the kernel to determine which input // tensor it is responsible for copying applyGrid.y = j; switch (maxDim) { case 1: HANDLE_CASE(1); break; case 2: HANDLE_CASE(2); break; case 3: HANDLE_CASE(3); break; case 4: HANDLE_CASE(4); break; } THCudaCheck(hipGetLastError()); } if (!usedScratch) { THCudaCheck(THCudaFree(state, (void *)d_inputs)); } #undef HANDLE_CASE } else { offset = 0; for (j = 0; j < numInputs; j++) { // No reason to copy when input is empty if (!THCTensor_(nDimension)(state, inputs[j])) continue; int64_t dimSize = cat_dimension < THCTensor_(nDimension)(state, inputs[j]) ? THCTensor_(size)(state, inputs[j], cat_dimension) : 1; THCTensor *nt = THCTensor_(newWithTensor)(state, result); THCTensor_(narrow)(state, nt, NULL, cat_dimension, offset, dimSize); THCTensor_(copy)(state, nt, inputs[j]); THCTensor_(free)(state, nt); offset += dimSize; } } } void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self )); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor)); using namespace thrust::placeholders; THCThrustAllocator thrustAlloc(state); self = THCTensor_(newContiguous)(state, self); thrust::device_ptr<real> self_data(THCTensor_(data)(state, self)); int num_dim = THCTensor_(nDimension)(state, self); int64_t N = THCTensor_(nElement)(state, self); THCudaLongTensor_resize2d(state, tensor, N, num_dim); tensor = THCudaLongTensor_newContiguous(state, tensor); thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor)); thrust::counting_iterator<int64_t> idxfirst(0); thrust::counting_iterator<int64_t> idxlast = idxfirst + N; typedef thrust::device_ptr<int64_t> Iter; strided_range<Iter> strided_tensor(tensor_data, tensor_data+N*num_dim, num_dim); #if TORCH_HIP_VERSION >= 7000 hipStream_t stream = THCState_getCurrentStream(state); #endif strided_range<Iter>::iterator dend = thrust::copy_if( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(stream), #endif idxfirst, idxlast, self_data, strided_tensor.begin(), NonZeroOp<real>() ); int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend); int64_t div = 1; for (int dim = num_dim-1; dim >= 0; dim--) { strided_range<Iter> stride_dim(tensor_data+dim, tensor_data+N*num_dim, num_dim); thrust::transform( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(stream), #endif strided_tensor.begin(), strided_tensor.end(), stride_dim.begin(), idx_functor(div, self->size[dim]) ); div *= self->size[dim]; } THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim); THCTensor_(free)(state, self); THCudaLongTensor_free(state, tensor); THCudaCheck(hipGetLastError()); } void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_)); int nDimension = THCTensor_(nDimension)(state, src_); THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector"); if (nDimension == 2) { int64_t stride0 = THCTensor_(stride)(state, src_, 0); int64_t stride1 = THCTensor_(stride)(state, src_, 1); int64_t size0 = THCTensor_(size)(state, src_, 0); int64_t size1 = THCTensor_(size)(state, src_, 1); int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1); THCTensor_(resize1d)(state, self_, size); int64_t strideSelf = THCTensor_(stride)(state, self_, 0); const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x))); int64_t start = (k >= 0 ? k * stride1 : -k * stride0); hipLaunchKernelGGL(( THCTensor_copyFromDiagonal<real>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf); } else { ptrdiff_t totalElements = THCTensor_(nElement)(state, src_); ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k; int64_t strideSrc = THCTensor_(stride)(state, src_, 0); THCTensor_(resize2d)(state, self_, size, size); THCTensor_(zero)(state, self_); int64_t stride0 = THCTensor_(stride)(state, self_, 0); int64_t stride1 = THCTensor_(stride)(state, self_, 1); const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x))); ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0); hipLaunchKernelGGL(( THCTensor_copyToDiagonal<real>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc); } THCudaCheck(hipGetLastError()); } accreal THCTensor_(trace)(THCState *state, THCTensor *src_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_)); THArgCheck((src_->nDimension == 2), 1, "expected a matrix"); THCTensor *diag = THCTensor_(new)(state); THCTensor_(diag)(state, diag, src_, 0); accreal trace = THCTensor_(sumall)(state, diag); THCTensor_(free)(state, diag); return trace; } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(linspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n); if (n == 1) THCTensor_(fill)(state, r_, a); else { THCTensor *r = THCTensor_(isContiguous)(state, r_) ? r_ // if r_ is contiguous we can direct work on it : THCTensor_(newContiguous)(state, r_); real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a), ScalarConvert<int64_t,real>::to(n - 1)); LinspaceOp<real> linspace_method(a, step); thrust::device_ptr<real> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + n, linspace_method); if (!THCTensor_(isContiguous)(state, r_)) { // We need to move data back to r_ THCTensor_(freeCopyTo)(state, r, r_); } } THCudaCheck(hipGetLastError()); } void THCTensor_(logspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n); if (n == 1) THCTensor_(fill)(state, r_, THCNumerics<real>::exp10(a)); else { THCTensor *r = THCTensor_(isContiguous)(state, r_) ? r_ : THCTensor_(newContiguous)(state, r_); real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a), ScalarConvert<int64_t,real>::to(n - 1)); LogspaceOp<real> logspace_method(a, step); thrust::device_ptr<real> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + n, logspace_method); if (!THCTensor_(isContiguous)(state, r_)) { THCTensor_(freeCopyTo)(state, r, r_); } } THCudaCheck(hipGetLastError()); } #endif void THCTensor_(range)(THCState *state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(step > 0 || step < 0, 3, "step must be a non-null number"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound incoherent with step sign"); ptrdiff_t size = (ptrdiff_t) (((xmax - xmin) / step) + 1); if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size); THCTensor *r = THCTensor_(isContiguous)(state, r_) ? r_ : THCTensor_(newContiguous)(state, r_); LinspaceOp<real,accreal> linspace_method(xmin, step); thrust::device_ptr<real> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + size, linspace_method); if (!THCTensor_(isContiguous)(state, r_)) THCTensor_(freeCopyTo)(state, r, r_); THCudaCheck(hipGetLastError()); } #endif
4be4bed3f4fda1c3a9649b4bf634e1229eb5fe7e.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMath.cu" #else THC_API void THCTensor_(fill)(THCState* state, THCTensor *self_, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); if (!THC_pointwiseApply1( state, self_, TensorFillOp<real>(value))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(zero)(THCState *state, THCTensor *self_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); if (THCTensor_(isContiguous)(state, self_)) { THCudaCheck(cudaMemsetAsync(THCTensor_(data)(state, self_), 0, sizeof(real) * THCTensor_(nElement)(state, self_), THCState_getCurrentStream(state))); } else { if (!THC_pointwiseApply1( state, self_, TensorFillOp<real>(ScalarConvert<int, real>::to(0)))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(zeros)(THCState *state, THCTensor *r_, THLongStorage *size) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THCTensor_(resize)(state, r_, size, NULL); THCTensor_(zero)(state, r_); } THC_API void THCTensor_(ones)(THCState *state, THCTensor *r_, THLongStorage *size) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THCTensor_(resize)(state, r_, size, NULL); THCTensor_(fill)(state, r_, ScalarConvert<int, real>::to(1)); } THC_API void THCTensor_(reshape)(THCState *state, THCTensor *r_, THCTensor *t, THLongStorage *size) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, t)); THCTensor_(resize)(state, r_, size, NULL); THCTensor_(copy)(state, r_, t); } ptrdiff_t THCTensor_(numel)(THCState *state, THCTensor *t) { return THCTensor_(nElement)(state, t); } void THCTensor_(cat)(THCState *state, THCTensor *result, THCTensor *ta, THCTensor *tb, int dimension) { THCTensor* inputs[2]; inputs[0] = ta; inputs[1] = tb; THCTensor_(catArray)(state, result, inputs, 2, dimension); } void THCTensor_(catArray)(THCState *state, THCTensor *result, THCTensor **inputs, int numInputs, int dimension) { THLongStorage *size; int i, j, cohortMax; int64_t offset; bool hasEmptyInput = false; // Even in the case where dimension is negative (i.e. when we want // to cat along the last dimension), this logic still works, as the // loop below will overwrite the value int maxDim = dimension + 1; // cat_dimension is the actual dimension we cat along int cat_dimension = dimension; for (i = 0; i < numInputs; i++) { int inputDim = THCTensor_(nDimension)(state, inputs[i]); hasEmptyInput |= !inputDim; maxDim = THMax(maxDim, inputDim); } // In the event that the user specified -1 as the concat dimension, then // we want to pick the maxDim as dimension to cat along (and thus maxDim - 1 as the // value due to 0-based indexing). If the maxDim is // 0 (i.e. we are catting all // empty tensors), then we set cat_dimension to be 0 if (dimension + TH_INDEX_BASE == -1) { cat_dimension = maxDim ? (maxDim - 1) : 0; } THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs); THArgCheck(cat_dimension >= 0, 4, "invalid dimension %d", dimension + TH_INDEX_BASE); size = THLongStorage_newWithSize(maxDim); for(i = 0; i < maxDim; i++) { // dimSize is either the size of the dim if it exists, either 1 if #dim > 0, otherwise 0 int64_t dimSize = i < THCTensor_(nDimension)(state, inputs[0]) ? THCTensor_(size)(state, inputs[0], i) : THMin(THCTensor_(nDimension)(state, inputs[0]), 1); if (i == cat_dimension) { for (j = 1; j < numInputs; j++) { // accumulate the size over the dimension we want to cat on. // Empty tensors are allowed dimSize += i < THCTensor_(nDimension)(state, inputs[j]) ? THCTensor_(size)(state, inputs[j], i) : THMin(THCTensor_(nDimension)(state, inputs[j]), 1); } } else { for (j = 1; j < numInputs; j++) { int64_t sz = i < THCTensor_(nDimension)(state, inputs[j]) ? THCTensor_(size)(state, inputs[j], i) : THMin(THCTensor_(nDimension)(state, inputs[j]), 1); // If it's a dimension we're not catting on // Then fail if sizes are different AND > 0 if (dimSize != sz && dimSize && sz) { THLongStorage_free(size); THError("inconsistent tensor sizes"); } else if(!dimSize) { dimSize = sz; } } } size->data[i] = dimSize; } THCTensor_(resize)(state, result, size, NULL); THLongStorage_free(size); // We parallelize the copy if all 6 conditions pass: // // 1. There is more than one input tensor // 2. No empty inputs // 3. The result tensor is 32-bit indexable // 4. The number of dimensions is <= 4 // 5. All input tensors are contiguous (output tensor may be non-contig) // 6. All input tensors can use 32-bit indexing // 7. All input tensors are on the same device if (numInputs > 1 && !hasEmptyInput && THCTensor_(nDimension)(state, result) <= CAT_ARRAY_MAX_INPUT_DIMS && TensorUtils<THCTensor>::canUse32BitIndexMath(state, result) && TensorUtils<THCTensor>::allContiguous(state, inputs, numInputs) && TensorUtils<THCTensor>::all32BitIndexable(state, inputs, numInputs) && TensorUtils<THCTensor>::allSameDevice(state, inputs, numInputs)) { // First, let's set up our kernel parameters. We start with a raw pointer to the storage // for the output Tensor. real *data = THCTensor_(data)(state, result); // Kernel Parameter CatArrInputTensor<real, unsigned int> stackInputs[CAT_ARRAY_BATCH_SIZE]; CatArrInputTensor<real, unsigned int> *d_inputs; // Attempt to re-use stream's scratch space for the input metadata bool usedScratch = false; size_t tensorMetadataSize = sizeof(CatArrInputTensor<real, unsigned int>) * CAT_ARRAY_BATCH_SIZE; if (THCState_getCurrentDeviceScratchSpaceSize(state) > tensorMetadataSize) { void* space = THCState_getCurrentDeviceScratchSpace(state); if (space) { d_inputs = (CatArrInputTensor<real, unsigned int> *) space; usedScratch = true; } } if (!usedScratch) { // Fallback to allocating GPU memory THCudaCheck(THCudaMalloc(state, (void**) &d_inputs, tensorMetadataSize)); } OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param; // Next, let's initialize the size, stride arrays for the output Tensor. for (i = 0; i < maxDim; ++i) { param.outputSize[i] = THCTensor_(size)(state, result, i); param.outputStride[i] = THCTensor_(stride)(state, result, i); } // Template Declarations for dim = 1, 2, 3, 4 #define HANDLE_CASE(DIMS) \ CatArrayBatchedCopy<real, unsigned int, DIMS><<<applyGrid, applyBlock>>>(data, d_inputs, param, cat_dimension, param.outputStride[cat_dimension]); // Now we loop offset = 0; for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) { cohortMax = 0; for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) { int64_t dimSize = cat_dimension < THCTensor_(nDimension)(state, inputs[i+j]) ? THCTensor_(size)(state, inputs[i+j], cat_dimension) : 1; stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]); stackInputs[j].offset = offset; stackInputs[j].dimSize = dimSize; stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]); cohortMax = cohortMax > stackInputs[j].nElements ? cohortMax : stackInputs[j].nElements; // update offset offset += dimSize; } THCudaCheck(cudaMemcpy(d_inputs, stackInputs, j * sizeof(CatArrInputTensor<real, unsigned int>), cudaMemcpyHostToDevice)); // Next, let's consider how we set our kernel launch parameters. // We borrow from THCApply, which the kernel's internal indexing // is based on. dim3 applyBlock = getApplyBlock(); // We also re-use the applyGrid - but note that we use the maximum number of // elements for a given tensor in this grouping to determine the count dim3 applyGrid; getApplyGrid(state, cohortMax, applyGrid); // Next, we set our grid's y component to be the number of tensors in // the batch. This will allow the kernel to determine which input // tensor it is responsible for copying applyGrid.y = j; switch (maxDim) { case 1: HANDLE_CASE(1); break; case 2: HANDLE_CASE(2); break; case 3: HANDLE_CASE(3); break; case 4: HANDLE_CASE(4); break; } THCudaCheck(cudaGetLastError()); } if (!usedScratch) { THCudaCheck(THCudaFree(state, (void *)d_inputs)); } #undef HANDLE_CASE } else { offset = 0; for (j = 0; j < numInputs; j++) { // No reason to copy when input is empty if (!THCTensor_(nDimension)(state, inputs[j])) continue; int64_t dimSize = cat_dimension < THCTensor_(nDimension)(state, inputs[j]) ? THCTensor_(size)(state, inputs[j], cat_dimension) : 1; THCTensor *nt = THCTensor_(newWithTensor)(state, result); THCTensor_(narrow)(state, nt, NULL, cat_dimension, offset, dimSize); THCTensor_(copy)(state, nt, inputs[j]); THCTensor_(free)(state, nt); offset += dimSize; } } } void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self )); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor)); using namespace thrust::placeholders; THCThrustAllocator thrustAlloc(state); self = THCTensor_(newContiguous)(state, self); thrust::device_ptr<real> self_data(THCTensor_(data)(state, self)); int num_dim = THCTensor_(nDimension)(state, self); int64_t N = THCTensor_(nElement)(state, self); THCudaLongTensor_resize2d(state, tensor, N, num_dim); tensor = THCudaLongTensor_newContiguous(state, tensor); thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor)); thrust::counting_iterator<int64_t> idxfirst(0); thrust::counting_iterator<int64_t> idxlast = idxfirst + N; typedef thrust::device_ptr<int64_t> Iter; strided_range<Iter> strided_tensor(tensor_data, tensor_data+N*num_dim, num_dim); #if CUDA_VERSION >= 7000 cudaStream_t stream = THCState_getCurrentStream(state); #endif strided_range<Iter>::iterator dend = thrust::copy_if( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(stream), #endif idxfirst, idxlast, self_data, strided_tensor.begin(), NonZeroOp<real>() ); int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend); int64_t div = 1; for (int dim = num_dim-1; dim >= 0; dim--) { strided_range<Iter> stride_dim(tensor_data+dim, tensor_data+N*num_dim, num_dim); thrust::transform( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(stream), #endif strided_tensor.begin(), strided_tensor.end(), stride_dim.begin(), idx_functor(div, self->size[dim]) ); div *= self->size[dim]; } THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim); THCTensor_(free)(state, self); THCudaLongTensor_free(state, tensor); THCudaCheck(cudaGetLastError()); } void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_)); int nDimension = THCTensor_(nDimension)(state, src_); THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector"); if (nDimension == 2) { int64_t stride0 = THCTensor_(stride)(state, src_, 0); int64_t stride1 = THCTensor_(stride)(state, src_, 1); int64_t size0 = THCTensor_(size)(state, src_, 0); int64_t size1 = THCTensor_(size)(state, src_, 1); int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1); THCTensor_(resize1d)(state, self_, size); int64_t strideSelf = THCTensor_(stride)(state, self_, 0); const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x))); int64_t start = (k >= 0 ? k * stride1 : -k * stride0); THCTensor_copyFromDiagonal<real><<<grid, threads, 0, THCState_getCurrentStream(state)>>> (THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf); } else { ptrdiff_t totalElements = THCTensor_(nElement)(state, src_); ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k; int64_t strideSrc = THCTensor_(stride)(state, src_, 0); THCTensor_(resize2d)(state, self_, size, size); THCTensor_(zero)(state, self_); int64_t stride0 = THCTensor_(stride)(state, self_, 0); int64_t stride1 = THCTensor_(stride)(state, self_, 1); const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x))); ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0); THCTensor_copyToDiagonal<real><<<grid, threads, 0, THCState_getCurrentStream(state)>>> (THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc); } THCudaCheck(cudaGetLastError()); } accreal THCTensor_(trace)(THCState *state, THCTensor *src_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_)); THArgCheck((src_->nDimension == 2), 1, "expected a matrix"); THCTensor *diag = THCTensor_(new)(state); THCTensor_(diag)(state, diag, src_, 0); accreal trace = THCTensor_(sumall)(state, diag); THCTensor_(free)(state, diag); return trace; } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(linspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n); if (n == 1) THCTensor_(fill)(state, r_, a); else { THCTensor *r = THCTensor_(isContiguous)(state, r_) ? r_ // if r_ is contiguous we can direct work on it : THCTensor_(newContiguous)(state, r_); real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a), ScalarConvert<int64_t,real>::to(n - 1)); LinspaceOp<real> linspace_method(a, step); thrust::device_ptr<real> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + n, linspace_method); if (!THCTensor_(isContiguous)(state, r_)) { // We need to move data back to r_ THCTensor_(freeCopyTo)(state, r, r_); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(logspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n); if (n == 1) THCTensor_(fill)(state, r_, THCNumerics<real>::exp10(a)); else { THCTensor *r = THCTensor_(isContiguous)(state, r_) ? r_ : THCTensor_(newContiguous)(state, r_); real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a), ScalarConvert<int64_t,real>::to(n - 1)); LogspaceOp<real> logspace_method(a, step); thrust::device_ptr<real> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + n, logspace_method); if (!THCTensor_(isContiguous)(state, r_)) { THCTensor_(freeCopyTo)(state, r, r_); } } THCudaCheck(cudaGetLastError()); } #endif void THCTensor_(range)(THCState *state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(step > 0 || step < 0, 3, "step must be a non-null number"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound incoherent with step sign"); ptrdiff_t size = (ptrdiff_t) (((xmax - xmin) / step) + 1); if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size); THCTensor *r = THCTensor_(isContiguous)(state, r_) ? r_ : THCTensor_(newContiguous)(state, r_); LinspaceOp<real,accreal> linspace_method(xmin, step); thrust::device_ptr<real> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + size, linspace_method); if (!THCTensor_(isContiguous)(state, r_)) THCTensor_(freeCopyTo)(state, r, r_); THCudaCheck(cudaGetLastError()); } #endif
ed706cfae08cd1b4ac9e6c71efb35299ce629d95.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gauss.h" void gaussCreate(Gauss* gauss) { scanf("%d %d", &gauss->m, &gauss->n); gauss->rank = 0; gauss->mat = (double*)malloc(sizeof(double) * gauss->m * (gauss->n + 1)); gauss->res = (double*)malloc(sizeof(double) * gauss->n); for (int i = 0; i < gauss->m; ++i) for (int j = 0; j < gauss->n; ++j) scanf("%lf", &gauss->mat[gaussOffset(i, j, gauss->m)]); for (int i = 0; i < gauss->m; ++i) scanf("%lf", &gauss->mat[gaussOffset(i, gauss->n, gauss->m)]); } void gaussDelete(Gauss* gauss) { free(gauss->mat); free(gauss->res); } void gaussSolve(Gauss* gauss) { int m = gauss->m; int n = gauss->n; double* dMat; int matSize = sizeof(double) * m * (n + 1); ERR(hipMalloc(&dMat, matSize)); ERR(hipMemcpy(dMat, gauss->mat, matSize, hipMemcpyHostToDevice)); thrust::device_ptr<double> dPtrBase = thrust::device_pointer_cast(dMat); for (int j = 0; j < n; ++j) { thrust::pair<thrust::device_ptr<double>, thrust::device_ptr<double> > dPtrMax = thrust::minmax_element( dPtrBase + gaussOffset(gauss->rank, j, m), dPtrBase + gaussOffset(m, j, m) ); double maxVal1 = fabs(dPtrMax.first[0]); double maxVal2 = fabs(dPtrMax.second[0]); int maxInd = -1; if (maxVal1 > maxVal2) maxInd = (dPtrMax.first - dPtrBase) % m; else maxInd = (dPtrMax.second - dPtrBase) % m; if (fmax(maxVal1, maxVal2) < 1e-7) { gauss->res[j] = 0.0; continue; } else gauss->res[j] = 1.0; hipLaunchKernelGGL(( swapKernel), dim3(32), dim3(32), 0, 0, dMat, m, n, gauss->rank, maxInd); hipLaunchKernelGGL(( transformKernel), dim3(dim3(32, 32)), dim3(dim3(32, 32)), 0, 0, dMat, m, n, gauss->rank, j); ++gauss->rank; if (gauss->rank == m) break; } ERR(hipMemcpy(gauss->mat, dMat, matSize, hipMemcpyDeviceToHost)); ERR(hipFree(dMat)); gaussBackward(gauss); } void gaussBackward(Gauss* gauss) { int m = gauss->m; int n = gauss->n; int rank = gauss->rank - 1; double* mat = gauss->mat; double* res = gauss->res; for (int j = n - 1; j >= 0; --j) { if (fabs(res[j]) < 1e-7) continue; double sum = 0.0; for (int k = j + 1; k < n; ++k) sum += mat[gaussOffset(rank, k, m)] * res[k]; res[j] = (mat[gaussOffset(rank, n, m)] - sum) / mat[gaussOffset(rank, j, m)]; --rank; } } void gaussPrintResult(Gauss* gauss) { for (int j = 0; j < gauss->n; ++j) printf("%.10e ", gauss->res[j]); printf("\n"); } __host__ __device__ int gaussOffset(int row, int col, int m) { return col * m + row; } __global__ void swapKernel(double* mat, int m, int n, int row1, int row2) { int tX = blockDim.x * blockIdx.x + threadIdx.x + row1; int offsetX = gridDim.x * blockDim.x; while (tX <= n) { int offset1 = gaussOffset(row1, tX, m); int offset2 = gaussOffset(row2, tX, m); double tmp = mat[offset1]; mat[offset1] = mat[offset2]; mat[offset2] = tmp; tX += offsetX; } } __global__ void transformKernel(double* mat, int m, int n, int row, int col) { int tX = blockDim.x * blockIdx.x + threadIdx.x + row + 1; int tY = blockDim.y * blockIdx.y + threadIdx.y + col + 1; int offsetX = gridDim.x * blockDim.x; int offsetY = gridDim.y * blockDim.y; for (int j = tY; j <= n; j += offsetY) { for (int i = tX; i < m; i += offsetX) { double ratio = mat[gaussOffset(i, col, m)] / mat[gaussOffset(row, col, m)]; mat[gaussOffset(i, j, m)] -= mat[gaussOffset(row, j, m)] * ratio; } } }
ed706cfae08cd1b4ac9e6c71efb35299ce629d95.cu
#include "gauss.h" void gaussCreate(Gauss* gauss) { scanf("%d %d", &gauss->m, &gauss->n); gauss->rank = 0; gauss->mat = (double*)malloc(sizeof(double) * gauss->m * (gauss->n + 1)); gauss->res = (double*)malloc(sizeof(double) * gauss->n); for (int i = 0; i < gauss->m; ++i) for (int j = 0; j < gauss->n; ++j) scanf("%lf", &gauss->mat[gaussOffset(i, j, gauss->m)]); for (int i = 0; i < gauss->m; ++i) scanf("%lf", &gauss->mat[gaussOffset(i, gauss->n, gauss->m)]); } void gaussDelete(Gauss* gauss) { free(gauss->mat); free(gauss->res); } void gaussSolve(Gauss* gauss) { int m = gauss->m; int n = gauss->n; double* dMat; int matSize = sizeof(double) * m * (n + 1); ERR(cudaMalloc(&dMat, matSize)); ERR(cudaMemcpy(dMat, gauss->mat, matSize, cudaMemcpyHostToDevice)); thrust::device_ptr<double> dPtrBase = thrust::device_pointer_cast(dMat); for (int j = 0; j < n; ++j) { thrust::pair<thrust::device_ptr<double>, thrust::device_ptr<double> > dPtrMax = thrust::minmax_element( dPtrBase + gaussOffset(gauss->rank, j, m), dPtrBase + gaussOffset(m, j, m) ); double maxVal1 = fabs(dPtrMax.first[0]); double maxVal2 = fabs(dPtrMax.second[0]); int maxInd = -1; if (maxVal1 > maxVal2) maxInd = (dPtrMax.first - dPtrBase) % m; else maxInd = (dPtrMax.second - dPtrBase) % m; if (fmax(maxVal1, maxVal2) < 1e-7) { gauss->res[j] = 0.0; continue; } else gauss->res[j] = 1.0; swapKernel<<<32, 32>>>(dMat, m, n, gauss->rank, maxInd); transformKernel<<<dim3(32, 32), dim3(32, 32)>>>(dMat, m, n, gauss->rank, j); ++gauss->rank; if (gauss->rank == m) break; } ERR(cudaMemcpy(gauss->mat, dMat, matSize, cudaMemcpyDeviceToHost)); ERR(cudaFree(dMat)); gaussBackward(gauss); } void gaussBackward(Gauss* gauss) { int m = gauss->m; int n = gauss->n; int rank = gauss->rank - 1; double* mat = gauss->mat; double* res = gauss->res; for (int j = n - 1; j >= 0; --j) { if (fabs(res[j]) < 1e-7) continue; double sum = 0.0; for (int k = j + 1; k < n; ++k) sum += mat[gaussOffset(rank, k, m)] * res[k]; res[j] = (mat[gaussOffset(rank, n, m)] - sum) / mat[gaussOffset(rank, j, m)]; --rank; } } void gaussPrintResult(Gauss* gauss) { for (int j = 0; j < gauss->n; ++j) printf("%.10e ", gauss->res[j]); printf("\n"); } __host__ __device__ int gaussOffset(int row, int col, int m) { return col * m + row; } __global__ void swapKernel(double* mat, int m, int n, int row1, int row2) { int tX = blockDim.x * blockIdx.x + threadIdx.x + row1; int offsetX = gridDim.x * blockDim.x; while (tX <= n) { int offset1 = gaussOffset(row1, tX, m); int offset2 = gaussOffset(row2, tX, m); double tmp = mat[offset1]; mat[offset1] = mat[offset2]; mat[offset2] = tmp; tX += offsetX; } } __global__ void transformKernel(double* mat, int m, int n, int row, int col) { int tX = blockDim.x * blockIdx.x + threadIdx.x + row + 1; int tY = blockDim.y * blockIdx.y + threadIdx.y + col + 1; int offsetX = gridDim.x * blockDim.x; int offsetY = gridDim.y * blockDim.y; for (int j = tY; j <= n; j += offsetY) { for (int i = tX; i < m; i += offsetX) { double ratio = mat[gaussOffset(i, col, m)] / mat[gaussOffset(row, col, m)]; mat[gaussOffset(i, j, m)] -= mat[gaussOffset(row, j, m)] * ratio; } } }
1ddbb17e8c72cf14dc77246ea7a2fda874f0f83e.hip
// !!! This is a file automatically generated by hipify!!! /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> __global__ void mykernel(void) { printf("hello word from GPU \n"); } /** * Host main routine */ int main(void) { hipLaunchKernelGGL(( mykernel), dim3(1),dim3(10) , 0, 0, ); hipDeviceSynchronize(); printf("hello word \n"); return 0; }
1ddbb17e8c72cf14dc77246ea7a2fda874f0f83e.cu
/** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> __global__ void mykernel(void) { printf("hello word from GPU \n"); } /** * Host main routine */ int main(void) { mykernel<<< 1,10 >>>(); cudaDeviceSynchronize(); printf("hello word \n"); return 0; }
93139bc31d3fda31ce5d8591ea79194416853d00.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Solves the Panfilov model using an explicit numerical scheme. * Based on code orginally provided by Xing Cai, Simula Research Laboratory * and reimplementation by Scott B. Baden, UCSD * * Modified and restructured by Didem Unat, Koc University * * Refer to "Detailed Numerical Analyses of the Aliev-Panfilov Model on GPGPU" * https://www.simula.no/publications/detailed-numerical-analyses-aliev-panfilov-model-gpgpu * by Xing Cai, Didem Unat and Scott Baden * */ #include <stdio.h> #include <assert.h> #include <stdlib.h> #include <iostream> #include <iomanip> #include <string.h> #include <math.h> #include <sys/time.h> #include <getopt.h> #include <vector> #include <algorithm> #define TILE_DIM 32 using namespace std; // External functions extern "C" void splot(double **E, double T, int niter, int m, int n); void cmdLine(int argc, char *argv[], double& T, int& n, int& px, int& py, int& plot_freq, int& no_comm, int&num_threads); // Utilities // // Timer // Make successive calls and take a difference to get the elapsed time. static const double kMicro = 1.0e-6; double getTime() { struct timeval TV; struct timezone TZ; const int RC = gettimeofday(&TV, &TZ); if(RC == -1) { cerr << "ERROR: Bad call to gettimeofday" << endl; return(-1); } return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) ); } // end getTime() // Reports statistics about the computation // These values should not vary (except to within roundoff) // when we use different numbers of processes to solve the problem double stats(vector<double> E, int m, int n, double *_mx){ double mx = -1; double l2norm = 0; int i, j; for (j=1; j<=m; j++) { for (i=1; i<=n; i++) { l2norm += E[j*(n+2) + i]*E[j*(n+2) + i]; if (E[j*(n+2) + i] > mx) mx = E[j*(n+2) + i]; } } *_mx = mx; l2norm /= (double) ((m)*(n)); l2norm = sqrt(l2norm); return l2norm; } // External functions __global__ void mirror_boundaries(double *E_prev, const int n, const int m); __global__ void simulate(double *E, double *E_prev, double *R, const double alpha, const int n, const int m, const double kk, const double dt, const double a, const double epsilon, const double M1,const double M2, const double b); // Main program int main (int argc, char** argv) { /* * Solution arrays * E is the "Excitation" variable, a voltage * R is the "Recovery" variable * E_prev is the Excitation variable for the previous timestep, * and is used in time integration */ // Various constants - these definitions shouldn't change const double a=0.1, b=0.1, kk=8.0, M1= 0.07, M2=0.3, epsilon=0.01, d=5e-5; double T=1000.0; int m=200,n=200; int plot_freq = 0; int px = 1, py = 1; int no_comm = 0; int num_threads=1; cmdLine( argc, argv, T, n,px, py, plot_freq, no_comm, num_threads); m = n; // Allocate contiguous memory for solution arrays // The computational box is defined on [1:m+1,1:n+1] // We pad the arrays in order to facilitate differencing on the // boundaries of the computation box // Initialize Host matrices std::vector<double> h_E((m+2)*(n+2)), h_E_prev((m+2)*(n+2)), h_R((m+2)*(n+2)), h_tmp((m+2)*(n+2)); int i,j; // Initialization for (j=1; j<=m; j++) for (i=1; i<=n; i++) h_E_prev[j*(m+2) + i] = h_R[j*(m+2) + i] = 0; for (j=1; j<=m; j++) for (i=n/2+1; i<=n; i++) h_E_prev[j*(m+2) + i] = 1.0; for (j=m/2+1; j<=m; j++) for (i=1; i<=n; i++) h_R[j*(m+2) + i] = 1.0; // Initialize device matrices double *d_E = 0, *d_E_prev = 0, *d_R = 0, *d_tmp = 0; hipMalloc((void**)&d_E, sizeof(double) * (m+2) * (n+2)); hipMalloc((void**)&d_E_prev, sizeof(double) * (m+2) * (n+2)); hipMalloc((void**)&d_R, sizeof(double) * (m+2) * (n+2)); hipMalloc((void**)&d_tmp, sizeof(double) * (m+2) * (n+2)); hipMemcpy(d_E, &h_E[0], sizeof(double) * (m+2) * (n+2), hipMemcpyHostToDevice); hipMemcpy(d_E_prev, &h_E_prev[0], sizeof(double) * (m+2) * (n+2), hipMemcpyHostToDevice); hipMemcpy(d_R, &h_R[0], sizeof(double) * (m+2) * (n+2), hipMemcpyHostToDevice); hipMemcpy(d_tmp, &h_tmp[0], sizeof(double) * (m+2) * (n+2), hipMemcpyHostToDevice); const dim3 thread_size(TILE_DIM,TILE_DIM); // Max thread on one unit const dim3 num_blocks(m/TILE_DIM+1,n/TILE_DIM+1); // Division will take floor. So we add one. We check the boundaries inside kernels. double dx = 1.0/n; // For time integration, these values shouldn't change double rp= kk*(b+1)*(b+1)/4; double dte=(dx*dx)/(d*4+((dx*dx))*(rp+kk)); double dtr=1/(epsilon+((M1/M2)*rp)); double dt = (dte<dtr) ? 0.95*dte : 0.95*dtr; double alpha = d*dt/(dx*dx); cout << "Grid Size : " << n << endl; cout << "Duration of Sim : " << T << endl; cout << "Time step dt : " << dt << endl; cout << "Process geometry: " << px << " x " << py << endl; if (no_comm) cout << "Communication : DISABLED" << endl; cout << endl; // Start the timer double t0 = getTime(); // Simulated time is different from the integer timestep number // Simulated time double t = 0.0; // Integer timestep number int niter=0; while (t<T) { t += dt; niter++; hipLaunchKernelGGL(( mirror_boundaries), dim3(num_blocks),dim3(thread_size), 0, 0, d_E_prev, n, m); hipLaunchKernelGGL(( simulate), dim3(num_blocks),dim3(thread_size), 0, 0, d_E, d_E_prev, d_R, alpha, n, m, kk, dt, a, epsilon, M1, M2, b); //swap current E with previous E d_tmp = d_E; d_E = d_E_prev; d_E_prev = d_tmp; // if (plot_freq){ // int k = (int)(t/plot_freq); // if ((t - k * plot_freq) < dt){ // splot(E,t,niter,m+2,n+2); // } // } }//end of while loop double time_elapsed = getTime() - t0; double Gflops = (double)(niter * (1E-9 * n * n ) * 28.0) / time_elapsed ; double BW = (double)(niter * 1E-9 * (n * n * sizeof(double) * 4.0 ))/time_elapsed; cout << "Number of Iterations : " << niter << endl; cout << "Elapsed Time (sec) : " << time_elapsed << endl; cout << "Sustained Gflops Rate : " << Gflops << endl; cout << "Sustained Bandwidth (GB/sec): " << BW << endl << endl; hipMemcpy(&h_E_prev[0], d_E_prev, sizeof(double) * (m+2) * (n+2), hipMemcpyDeviceToHost); double mx; double l2norm = stats(h_E_prev,m,n,&mx); cout << "Max: " << mx << " L2norm: "<< l2norm << endl; if (plot_freq){ cout << "\n\nEnter any input to close the program and the plot..." << endl; getchar(); } hipFree (d_E); hipFree (d_E_prev); hipFree (d_R); hipFree (d_tmp); return 0; } void cmdLine(int argc, char *argv[], double& T, int& n, int& px, int& py, int& plot_freq, int& no_comm, int& num_threads){ /// Command line arguments // Default value of the domain sizes static struct option long_options[] = { {"n", required_argument, 0, 'n'}, {"px", required_argument, 0, 'x'}, {"py", required_argument, 0, 'y'}, {"tfinal", required_argument, 0, 't'}, {"plot", required_argument, 0, 'p'}, {"nocomm", no_argument, 0, 'k'}, {"numthreads", required_argument, 0, 'o'}, }; // Process command line arguments int ac; for(ac=1;ac<argc;ac++) { int c; while ((c=getopt_long(argc,argv,"n:x:y:t:kp:o:",long_options,NULL)) != -1){ switch (c) { // Size of the computational box case 'n': n = atoi(optarg); break; // X processor geometry case 'x': px = atoi(optarg); // Y processor geometry case 'y': py = atoi(optarg); // Length of simulation, in simulated time units case 't': T = atof(optarg); break; // Turn off communication case 'k': no_comm = 1; break; // Plot the excitation variable case 'p': plot_freq = atoi(optarg); break; // Plot the excitation variable case 'o': num_threads = atoi(optarg); break; // Error default: printf("Usage: a.out [-n <domain size>] [-t <final time >]\n\t [-p <plot frequency>]\n\t[-px <x processor geometry> [-py <y proc. geometry] [-k turn off communication] [-o <Number of OpenMP threads>]\n"); exit(-1); } } } } /* ********************************************************** * Author : Urvashi R.V. [04/06/2004] * Modified by Didem Unat [03/23/18] *************************************************************/ #include <stdio.h> /* Function to plot the 2D array * 'gnuplot' is instantiated via a pipe and * the values to be plotted are passed through, along * with gnuplot commands */ FILE *gnu=NULL; void splot(double **U, double T, int niter, int m, int n) { int i, j; if(gnu==NULL) gnu = popen("gnuplot","w"); double mx = -1, mn = 32768; for (j=0; j<m; j++) for (i=0; i<n; i++){ if (U[j][i] > mx) mx = U[j][i]; if (U[j][i] < mn) mn = U[j][i]; } fprintf(gnu,"set title \"T = %f [niter = %d]\"\n",T, niter); fprintf(gnu,"set size square\n"); fprintf(gnu,"set key off\n"); fprintf(gnu,"set pm3d map\n"); // Various color schemes fprintf(gnu,"set palette defined (-3 \"blue\", 0 \"white\", 1 \"red\")\n"); // fprintf(gnu,"set palette rgbformulae 22, 13, 31\n"); // fprintf(gnu,"set palette rgbformulae 30, 31, 32\n"); fprintf(gnu,"splot [0:%d] [0:%d][%f:%f] \"-\"\n",m-1,n-1,mn,mx); for (j=0; j<m; j++){ for (i=0; i<n; i++) { fprintf(gnu,"%d %d %f\n", i, j, U[i][j]); } fprintf(gnu,"\n"); } fprintf(gnu,"e\n"); fflush(gnu); return; }
93139bc31d3fda31ce5d8591ea79194416853d00.cu
/* * Solves the Panfilov model using an explicit numerical scheme. * Based on code orginally provided by Xing Cai, Simula Research Laboratory * and reimplementation by Scott B. Baden, UCSD * * Modified and restructured by Didem Unat, Koc University * * Refer to "Detailed Numerical Analyses of the Aliev-Panfilov Model on GPGPU" * https://www.simula.no/publications/detailed-numerical-analyses-aliev-panfilov-model-gpgpu * by Xing Cai, Didem Unat and Scott Baden * */ #include <stdio.h> #include <assert.h> #include <stdlib.h> #include <iostream> #include <iomanip> #include <string.h> #include <math.h> #include <sys/time.h> #include <getopt.h> #include <vector> #include <algorithm> #define TILE_DIM 32 using namespace std; // External functions extern "C" void splot(double **E, double T, int niter, int m, int n); void cmdLine(int argc, char *argv[], double& T, int& n, int& px, int& py, int& plot_freq, int& no_comm, int&num_threads); // Utilities // // Timer // Make successive calls and take a difference to get the elapsed time. static const double kMicro = 1.0e-6; double getTime() { struct timeval TV; struct timezone TZ; const int RC = gettimeofday(&TV, &TZ); if(RC == -1) { cerr << "ERROR: Bad call to gettimeofday" << endl; return(-1); } return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) ); } // end getTime() // Reports statistics about the computation // These values should not vary (except to within roundoff) // when we use different numbers of processes to solve the problem double stats(vector<double> E, int m, int n, double *_mx){ double mx = -1; double l2norm = 0; int i, j; for (j=1; j<=m; j++) { for (i=1; i<=n; i++) { l2norm += E[j*(n+2) + i]*E[j*(n+2) + i]; if (E[j*(n+2) + i] > mx) mx = E[j*(n+2) + i]; } } *_mx = mx; l2norm /= (double) ((m)*(n)); l2norm = sqrt(l2norm); return l2norm; } // External functions __global__ void mirror_boundaries(double *E_prev, const int n, const int m); __global__ void simulate(double *E, double *E_prev, double *R, const double alpha, const int n, const int m, const double kk, const double dt, const double a, const double epsilon, const double M1,const double M2, const double b); // Main program int main (int argc, char** argv) { /* * Solution arrays * E is the "Excitation" variable, a voltage * R is the "Recovery" variable * E_prev is the Excitation variable for the previous timestep, * and is used in time integration */ // Various constants - these definitions shouldn't change const double a=0.1, b=0.1, kk=8.0, M1= 0.07, M2=0.3, epsilon=0.01, d=5e-5; double T=1000.0; int m=200,n=200; int plot_freq = 0; int px = 1, py = 1; int no_comm = 0; int num_threads=1; cmdLine( argc, argv, T, n,px, py, plot_freq, no_comm, num_threads); m = n; // Allocate contiguous memory for solution arrays // The computational box is defined on [1:m+1,1:n+1] // We pad the arrays in order to facilitate differencing on the // boundaries of the computation box // Initialize Host matrices std::vector<double> h_E((m+2)*(n+2)), h_E_prev((m+2)*(n+2)), h_R((m+2)*(n+2)), h_tmp((m+2)*(n+2)); int i,j; // Initialization for (j=1; j<=m; j++) for (i=1; i<=n; i++) h_E_prev[j*(m+2) + i] = h_R[j*(m+2) + i] = 0; for (j=1; j<=m; j++) for (i=n/2+1; i<=n; i++) h_E_prev[j*(m+2) + i] = 1.0; for (j=m/2+1; j<=m; j++) for (i=1; i<=n; i++) h_R[j*(m+2) + i] = 1.0; // Initialize device matrices double *d_E = 0, *d_E_prev = 0, *d_R = 0, *d_tmp = 0; cudaMalloc((void**)&d_E, sizeof(double) * (m+2) * (n+2)); cudaMalloc((void**)&d_E_prev, sizeof(double) * (m+2) * (n+2)); cudaMalloc((void**)&d_R, sizeof(double) * (m+2) * (n+2)); cudaMalloc((void**)&d_tmp, sizeof(double) * (m+2) * (n+2)); cudaMemcpy(d_E, &h_E[0], sizeof(double) * (m+2) * (n+2), cudaMemcpyHostToDevice); cudaMemcpy(d_E_prev, &h_E_prev[0], sizeof(double) * (m+2) * (n+2), cudaMemcpyHostToDevice); cudaMemcpy(d_R, &h_R[0], sizeof(double) * (m+2) * (n+2), cudaMemcpyHostToDevice); cudaMemcpy(d_tmp, &h_tmp[0], sizeof(double) * (m+2) * (n+2), cudaMemcpyHostToDevice); const dim3 thread_size(TILE_DIM,TILE_DIM); // Max thread on one unit const dim3 num_blocks(m/TILE_DIM+1,n/TILE_DIM+1); // Division will take floor. So we add one. We check the boundaries inside kernels. double dx = 1.0/n; // For time integration, these values shouldn't change double rp= kk*(b+1)*(b+1)/4; double dte=(dx*dx)/(d*4+((dx*dx))*(rp+kk)); double dtr=1/(epsilon+((M1/M2)*rp)); double dt = (dte<dtr) ? 0.95*dte : 0.95*dtr; double alpha = d*dt/(dx*dx); cout << "Grid Size : " << n << endl; cout << "Duration of Sim : " << T << endl; cout << "Time step dt : " << dt << endl; cout << "Process geometry: " << px << " x " << py << endl; if (no_comm) cout << "Communication : DISABLED" << endl; cout << endl; // Start the timer double t0 = getTime(); // Simulated time is different from the integer timestep number // Simulated time double t = 0.0; // Integer timestep number int niter=0; while (t<T) { t += dt; niter++; mirror_boundaries<<<num_blocks,thread_size>>>(d_E_prev, n, m); simulate<<<num_blocks,thread_size>>>(d_E, d_E_prev, d_R, alpha, n, m, kk, dt, a, epsilon, M1, M2, b); //swap current E with previous E d_tmp = d_E; d_E = d_E_prev; d_E_prev = d_tmp; // if (plot_freq){ // int k = (int)(t/plot_freq); // if ((t - k * plot_freq) < dt){ // splot(E,t,niter,m+2,n+2); // } // } }//end of while loop double time_elapsed = getTime() - t0; double Gflops = (double)(niter * (1E-9 * n * n ) * 28.0) / time_elapsed ; double BW = (double)(niter * 1E-9 * (n * n * sizeof(double) * 4.0 ))/time_elapsed; cout << "Number of Iterations : " << niter << endl; cout << "Elapsed Time (sec) : " << time_elapsed << endl; cout << "Sustained Gflops Rate : " << Gflops << endl; cout << "Sustained Bandwidth (GB/sec): " << BW << endl << endl; cudaMemcpy(&h_E_prev[0], d_E_prev, sizeof(double) * (m+2) * (n+2), cudaMemcpyDeviceToHost); double mx; double l2norm = stats(h_E_prev,m,n,&mx); cout << "Max: " << mx << " L2norm: "<< l2norm << endl; if (plot_freq){ cout << "\n\nEnter any input to close the program and the plot..." << endl; getchar(); } cudaFree (d_E); cudaFree (d_E_prev); cudaFree (d_R); cudaFree (d_tmp); return 0; } void cmdLine(int argc, char *argv[], double& T, int& n, int& px, int& py, int& plot_freq, int& no_comm, int& num_threads){ /// Command line arguments // Default value of the domain sizes static struct option long_options[] = { {"n", required_argument, 0, 'n'}, {"px", required_argument, 0, 'x'}, {"py", required_argument, 0, 'y'}, {"tfinal", required_argument, 0, 't'}, {"plot", required_argument, 0, 'p'}, {"nocomm", no_argument, 0, 'k'}, {"numthreads", required_argument, 0, 'o'}, }; // Process command line arguments int ac; for(ac=1;ac<argc;ac++) { int c; while ((c=getopt_long(argc,argv,"n:x:y:t:kp:o:",long_options,NULL)) != -1){ switch (c) { // Size of the computational box case 'n': n = atoi(optarg); break; // X processor geometry case 'x': px = atoi(optarg); // Y processor geometry case 'y': py = atoi(optarg); // Length of simulation, in simulated time units case 't': T = atof(optarg); break; // Turn off communication case 'k': no_comm = 1; break; // Plot the excitation variable case 'p': plot_freq = atoi(optarg); break; // Plot the excitation variable case 'o': num_threads = atoi(optarg); break; // Error default: printf("Usage: a.out [-n <domain size>] [-t <final time >]\n\t [-p <plot frequency>]\n\t[-px <x processor geometry> [-py <y proc. geometry] [-k turn off communication] [-o <Number of OpenMP threads>]\n"); exit(-1); } } } } /* ********************************************************** * Author : Urvashi R.V. [04/06/2004] * Modified by Didem Unat [03/23/18] *************************************************************/ #include <stdio.h> /* Function to plot the 2D array * 'gnuplot' is instantiated via a pipe and * the values to be plotted are passed through, along * with gnuplot commands */ FILE *gnu=NULL; void splot(double **U, double T, int niter, int m, int n) { int i, j; if(gnu==NULL) gnu = popen("gnuplot","w"); double mx = -1, mn = 32768; for (j=0; j<m; j++) for (i=0; i<n; i++){ if (U[j][i] > mx) mx = U[j][i]; if (U[j][i] < mn) mn = U[j][i]; } fprintf(gnu,"set title \"T = %f [niter = %d]\"\n",T, niter); fprintf(gnu,"set size square\n"); fprintf(gnu,"set key off\n"); fprintf(gnu,"set pm3d map\n"); // Various color schemes fprintf(gnu,"set palette defined (-3 \"blue\", 0 \"white\", 1 \"red\")\n"); // fprintf(gnu,"set palette rgbformulae 22, 13, 31\n"); // fprintf(gnu,"set palette rgbformulae 30, 31, 32\n"); fprintf(gnu,"splot [0:%d] [0:%d][%f:%f] \"-\"\n",m-1,n-1,mn,mx); for (j=0; j<m; j++){ for (i=0; i<n; i++) { fprintf(gnu,"%d %d %f\n", i, j, U[i][j]); } fprintf(gnu,"\n"); } fprintf(gnu,"e\n"); fflush(gnu); return; }
e90e3d8bf05ae237274051c6e8855e3847cd60c5.hip
// !!! This is a file automatically generated by hipify!!! #include "Particles.h" #include "Alloc.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "MemCpy.h" #define TPB (16*20) // Threads per block /** allocate particle arrays */ void particle_allocate(struct parameters* param, struct particles* part, int is) { // set species ID part->species_ID = is; // number of particles part->nop = param->np[is]; // maximum number of particles part->npmax = param->npMax[is]; // choose a different number of mover iterations for ions and electrons if (param->qom[is] < 0){ //electrons part->NiterMover = param->NiterMover; part->n_sub_cycles = param->n_sub_cycles; } else { // ions: only one iteration part->NiterMover = 1; part->n_sub_cycles = 1; } // particles per cell part->npcelx = param->npcelx[is]; part->npcely = param->npcely[is]; part->npcelz = param->npcelz[is]; part->npcel = part->npcelx*part->npcely*part->npcelz; // cast it to required precision part->qom = (FPpart) param->qom[is]; long npmax = part->npmax; // initialize drift and thermal velocities // drift part->u0 = (FPpart) param->u0[is]; part->v0 = (FPpart) param->v0[is]; part->w0 = (FPpart) param->w0[is]; // thermal part->uth = (FPpart) param->uth[is]; part->vth = (FPpart) param->vth[is]; part->wth = (FPpart) param->wth[is]; ////////////////////////////// /// ALLOCATION PARTICLE ARRAYS ////////////////////////////// part->x = new FPpart[npmax]; part->y = new FPpart[npmax]; part->z = new FPpart[npmax]; // allocate velocity part->u = new FPpart[npmax]; part->v = new FPpart[npmax]; part->w = new FPpart[npmax]; // allocate charge = q * statistical weight part->q = new FPinterp[npmax]; } /** deallocate */ void particle_deallocate(struct particles* part) { // deallocate particle variables delete[] part->x; delete[] part->y; delete[] part->z; delete[] part->u; delete[] part->v; delete[] part->w; delete[] part->q; } /* The kernel makes every thread work on one single particle. */ __global__ void mover_PC_kernel(struct device_part_arrays part, struct EMfield* field, struct grid* grd, struct parameters *param, long nop, int n_sub_cycles, FPpart qom, int NiterMover) { // Find global thread index int part_index = blockIdx.x*blockDim.x + threadIdx.x; // Don't go outside array if(part_index >= nop) return; // auxiliary variables FPpart dt_sub_cycling = (FPpart) param->dt/((double) n_sub_cycles); FPpart dto2 = .5*dt_sub_cycling, qomdt2 = qom*dto2/param->c; FPpart omdtsq, denom, ut, vt, wt, udotb; // local (to the particle) electric and magnetic field FPfield Exl=0.0, Eyl=0.0, Ezl=0.0, Bxl=0.0, Byl=0.0, Bzl=0.0; // interpolation densities int ix,iy,iz; FPfield weight[2][2][2]; FPfield xi[2], eta[2], zeta[2]; // intermediate particle position and velocity FPpart xptilde, yptilde, zptilde, uptilde, vptilde, wptilde; /* I removed the loop going through every particle since each thread handles one particle. */ // start subcycling for (int i_sub=0; i_sub < n_sub_cycles; i_sub++){ // move each particle with new fields xptilde = part.x[part_index]; yptilde = part.y[part_index]; zptilde = part.z[part_index]; // calculate the average velocity iteratively for(int innter=0; innter < NiterMover; innter++){ // interpolation G-->P ix = 2 + int((part.x[part_index] - grd->xStart)*grd->invdx); iy = 2 + int((part.y[part_index] - grd->yStart)*grd->invdy); iz = 2 + int((part.z[part_index] - grd->zStart)*grd->invdz); // calculate weights xi[0] = part.x[part_index] - grd->XN_flat[get_idx(ix-1, iy, iz, grd->nyn, grd->nzn)]; eta[0] = part.y[part_index] - grd->YN_flat[get_idx(ix, iy-1, iz, grd->nyn, grd->nzn)]; zeta[0] = part.z[part_index] - grd->ZN_flat[get_idx(ix, iy, iz-1, grd->nyn, grd->nzn)]; xi[1] = grd->XN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part.x[part_index]; eta[1] = grd->YN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part.y[part_index]; zeta[1] = grd->ZN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part.z[part_index]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) weight[ii][jj][kk] = xi[ii] * eta[jj] * zeta[kk] * grd->invVOL; // set to zero local electric and magnetic field Exl=0.0, Eyl = 0.0, Ezl = 0.0, Bxl = 0.0, Byl = 0.0, Bzl = 0.0; for (int ii=0; ii < 2; ii++) for (int jj=0; jj < 2; jj++) for(int kk=0; kk < 2; kk++){ long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); Exl += weight[ii][jj][kk]*field->Ex_flat[index]; Eyl += weight[ii][jj][kk]*field->Ey_flat[index]; Ezl += weight[ii][jj][kk]*field->Ez_flat[index]; Bxl += weight[ii][jj][kk]*field->Bxn_flat[index]; Byl += weight[ii][jj][kk]*field->Byn_flat[index]; Bzl += weight[ii][jj][kk]*field->Bzn_flat[index]; } // end interpolation omdtsq = qomdt2*qomdt2*(Bxl*Bxl+Byl*Byl+Bzl*Bzl); denom = 1.0/(1.0 + omdtsq); // solve the position equation ut= part.u[part_index] + qomdt2*Exl; vt= part.v[part_index] + qomdt2*Eyl; wt= part.w[part_index] + qomdt2*Ezl; udotb = ut*Bxl + vt*Byl + wt*Bzl; // solve the velocity equation uptilde = (ut+qomdt2*(vt*Bzl -wt*Byl + qomdt2*udotb*Bxl))*denom; // ut, vt, Bzl vptilde = (vt+qomdt2*(wt*Bxl -ut*Bzl + qomdt2*udotb*Byl))*denom; wptilde = (wt+qomdt2*(ut*Byl -vt*Bxl + qomdt2*udotb*Bzl))*denom; // update position part.x[part_index] = xptilde + uptilde*dto2; part.y[part_index] = yptilde + vptilde*dto2; part.z[part_index] = zptilde + wptilde*dto2; } // end of iteration // update the final position and velocity part.u[part_index]= 2.0*uptilde - part.u[part_index]; part.v[part_index]= 2.0*vptilde - part.v[part_index]; part.w[part_index]= 2.0*wptilde - part.w[part_index]; part.x[part_index] = xptilde + uptilde*dt_sub_cycling; part.y[part_index] = yptilde + vptilde*dt_sub_cycling; part.z[part_index] = zptilde + wptilde*dt_sub_cycling; ////////// ////////// ////////// BC // X-DIRECTION: BC particles if (part.x[part_index] > grd->Lx){ if (param->PERIODICX==true){ // PERIODIC part.x[part_index] = part.x[part_index] - grd->Lx; } else { // REFLECTING BC part.u[part_index] = -part.u[part_index]; part.x[part_index] = 2*grd->Lx - part.x[part_index]; } } if (part.x[part_index] < 0){ if (param->PERIODICX==true){ // PERIODIC part.x[part_index] = part.x[part_index] + grd->Lx; } else { // REFLECTING BC part.u[part_index] = -part.u[part_index]; part.x[part_index] = -part.x[part_index]; } } // Y-DIRECTION: BC particles if (part.y[part_index] > grd->Ly){ if (param->PERIODICY==true){ // PERIODIC part.y[part_index] = part.y[part_index] - grd->Ly; } else { // REFLECTING BC part.v[part_index] = -part.v[part_index]; part.y[part_index] = 2*grd->Ly - part.y[part_index]; } } if (part.y[part_index] < 0){ if (param->PERIODICY==true){ // PERIODIC part.y[part_index] = part.y[part_index] + grd->Ly; } else { // REFLECTING BC part.v[part_index] = -part.v[part_index]; part.y[part_index] = -part.y[part_index]; } } // Z-DIRECTION: BC particles if (part.z[part_index] > grd->Lz){ if (param->PERIODICZ==true){ // PERIODIC part.z[part_index] = part.z[part_index] - grd->Lz; } else { // REFLECTING BC part.w[part_index] = -part.w[part_index]; part.z[part_index] = 2*grd->Lz - part.z[part_index]; } } if (part.z[part_index] < 0){ if (param->PERIODICZ==true){ // PERIODIC part.z[part_index] = part.z[part_index] + grd->Lz; } else { // REFLECTING BC part.w[part_index] = -part.w[part_index]; part.z[part_index] = -part.z[part_index]; } } } // end of one particle } /** MODIFICATION: New particle mover */ // TODO COMMENT MORE int mover_PC(struct device_part_arrays part, EMfield *field, grid* grd, parameters *param, long nop, int species_ID, int n_sub_cycles, FPpart qom, int NiterMover) { // print species and subcycling std::cout << "*** MOVER with SUBCYCLYING "<< n_sub_cycles << " - species " << species_ID << " ***" << std::endl; int blocks = ((nop + TPB - 1) / TPB); hipLaunchKernelGGL(( mover_PC_kernel), dim3(blocks), dim3(TPB), 0, 0, part, field, grd, param, nop, n_sub_cycles, qom, NiterMover); hipError_t err = hipGetLastError(); if(err != hipSuccess) { std::cout << "Kernel mover_PC error: " << hipGetErrorString(err) << std::endl; exit(0); } return 0; } /** Interpolation Particle --> Grid: This is for species */ // TODO COMMENT WHEN DONE __global__ void interpP2G_kernel(device_part_arrays part, device_ids_arrays ids, grid* grd, long nop) { int part_index = blockIdx.x*blockDim.x + threadIdx.x; if(part_index >= nop) return; // arrays needed for interpolation FPpart weight[2][2][2]; FPpart temp[2][2][2]; FPpart xi[2], eta[2], zeta[2]; // index of the cell int ix, iy, iz; // determine cell: can we change to int()? is it faster? ix = 2 + int(floor((part.x[part_index] - grd->xStart) * grd->invdx)); iy = 2 + int(floor((part.y[part_index] - grd->yStart) * grd->invdy)); iz = 2 + int(floor((part.z[part_index] - grd->zStart) * grd->invdz)); // distances from node xi[0] = part.x[part_index] - grd->XN_flat[get_idx(ix-1, iy, iz, grd->nyn, grd->nzn)]; eta[0] = part.y[part_index] - grd->YN_flat[get_idx(ix, iy-1, iz, grd->nyn, grd->nzn)]; zeta[0] = part.z[part_index] - grd->ZN_flat[get_idx(ix, iy, iz-1, grd->nyn, grd->nzn)]; xi[1] = grd->XN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part.x[part_index]; eta[1] = grd->YN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part.y[part_index]; zeta[1] = grd->ZN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part.z[part_index]; // calculate the weights for different nodes for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) weight[ii][jj][kk] = part.q[part_index] * xi[ii] * eta[jj] * zeta[kk] * grd->invVOL; ////////////////////////// // add charge density for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++){ long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.rhon_flat[index]), weight[ii][jj][kk] * grd->invVOL); } //////////////////////////// // add current density - Jx for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.u[part_index] * weight[ii][jj][kk]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.Jx_flat[index]), temp[ii][jj][kk] * grd->invVOL); } //////////////////////////// // add current density - Jy for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.v[part_index] * weight[ii][jj][kk]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.Jy_flat[index]), temp[ii][jj][kk] * grd->invVOL); } //////////////////////////// // add current density - Jz for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.w[part_index] * weight[ii][jj][kk]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.Jz_flat[index]), temp[ii][jj][kk] * grd->invVOL); } //////////////////////////// // add pressure pxx for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.u[part_index] * part.u[part_index] * weight[ii][jj][kk]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.pxx_flat[index]), temp[ii][jj][kk] * grd->invVOL); } //////////////////////////// // add pressure pxy for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.u[part_index] * part.v[part_index] * weight[ii][jj][kk]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.pxy_flat[index]), temp[ii][jj][kk] * grd->invVOL); } ///////////////////////////// // add pressure pxz for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.u[part_index] * part.w[part_index] * weight[ii][jj][kk]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.pxz_flat[index]), temp[ii][jj][kk] * grd->invVOL); } ///////////////////////////// // add pressure pyy for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.v[part_index] * part.v[part_index] * weight[ii][jj][kk]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.pyy_flat[index]), temp[ii][jj][kk] * grd->invVOL); } ///////////////////////////// // add pressure pyz for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.v[part_index] * part.w[part_index] * weight[ii][jj][kk]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.pyz_flat[index]), temp[ii][jj][kk] * grd->invVOL); } ///////////////////////////// // add pressure pzz for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.w[part_index] * part.w[part_index] * weight[ii][jj][kk]; for (int ii=0; ii < 2; ii++) for (int jj=0; jj < 2; jj++) for(int kk=0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.pzz_flat[index]), temp[ii][jj][kk] * grd->invVOL); } } // TODO COMMENT WHEN DONE void interpP2G(struct device_part_arrays part, struct device_ids_arrays ids, grid* grd, long nop) { int blocks = (nop + TPB - 1) / TPB; hipLaunchKernelGGL(( interpP2G_kernel), dim3(blocks), dim3(TPB), 0, 0, part, ids, grd, nop); }
e90e3d8bf05ae237274051c6e8855e3847cd60c5.cu
#include "Particles.h" #include "Alloc.h" #include <cuda.h> #include <cuda_runtime.h> #include "MemCpy.h" #define TPB (16*20) // Threads per block /** allocate particle arrays */ void particle_allocate(struct parameters* param, struct particles* part, int is) { // set species ID part->species_ID = is; // number of particles part->nop = param->np[is]; // maximum number of particles part->npmax = param->npMax[is]; // choose a different number of mover iterations for ions and electrons if (param->qom[is] < 0){ //electrons part->NiterMover = param->NiterMover; part->n_sub_cycles = param->n_sub_cycles; } else { // ions: only one iteration part->NiterMover = 1; part->n_sub_cycles = 1; } // particles per cell part->npcelx = param->npcelx[is]; part->npcely = param->npcely[is]; part->npcelz = param->npcelz[is]; part->npcel = part->npcelx*part->npcely*part->npcelz; // cast it to required precision part->qom = (FPpart) param->qom[is]; long npmax = part->npmax; // initialize drift and thermal velocities // drift part->u0 = (FPpart) param->u0[is]; part->v0 = (FPpart) param->v0[is]; part->w0 = (FPpart) param->w0[is]; // thermal part->uth = (FPpart) param->uth[is]; part->vth = (FPpart) param->vth[is]; part->wth = (FPpart) param->wth[is]; ////////////////////////////// /// ALLOCATION PARTICLE ARRAYS ////////////////////////////// part->x = new FPpart[npmax]; part->y = new FPpart[npmax]; part->z = new FPpart[npmax]; // allocate velocity part->u = new FPpart[npmax]; part->v = new FPpart[npmax]; part->w = new FPpart[npmax]; // allocate charge = q * statistical weight part->q = new FPinterp[npmax]; } /** deallocate */ void particle_deallocate(struct particles* part) { // deallocate particle variables delete[] part->x; delete[] part->y; delete[] part->z; delete[] part->u; delete[] part->v; delete[] part->w; delete[] part->q; } /* The kernel makes every thread work on one single particle. */ __global__ void mover_PC_kernel(struct device_part_arrays part, struct EMfield* field, struct grid* grd, struct parameters *param, long nop, int n_sub_cycles, FPpart qom, int NiterMover) { // Find global thread index int part_index = blockIdx.x*blockDim.x + threadIdx.x; // Don't go outside array if(part_index >= nop) return; // auxiliary variables FPpart dt_sub_cycling = (FPpart) param->dt/((double) n_sub_cycles); FPpart dto2 = .5*dt_sub_cycling, qomdt2 = qom*dto2/param->c; FPpart omdtsq, denom, ut, vt, wt, udotb; // local (to the particle) electric and magnetic field FPfield Exl=0.0, Eyl=0.0, Ezl=0.0, Bxl=0.0, Byl=0.0, Bzl=0.0; // interpolation densities int ix,iy,iz; FPfield weight[2][2][2]; FPfield xi[2], eta[2], zeta[2]; // intermediate particle position and velocity FPpart xptilde, yptilde, zptilde, uptilde, vptilde, wptilde; /* I removed the loop going through every particle since each thread handles one particle. */ // start subcycling for (int i_sub=0; i_sub < n_sub_cycles; i_sub++){ // move each particle with new fields xptilde = part.x[part_index]; yptilde = part.y[part_index]; zptilde = part.z[part_index]; // calculate the average velocity iteratively for(int innter=0; innter < NiterMover; innter++){ // interpolation G-->P ix = 2 + int((part.x[part_index] - grd->xStart)*grd->invdx); iy = 2 + int((part.y[part_index] - grd->yStart)*grd->invdy); iz = 2 + int((part.z[part_index] - grd->zStart)*grd->invdz); // calculate weights xi[0] = part.x[part_index] - grd->XN_flat[get_idx(ix-1, iy, iz, grd->nyn, grd->nzn)]; eta[0] = part.y[part_index] - grd->YN_flat[get_idx(ix, iy-1, iz, grd->nyn, grd->nzn)]; zeta[0] = part.z[part_index] - grd->ZN_flat[get_idx(ix, iy, iz-1, grd->nyn, grd->nzn)]; xi[1] = grd->XN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part.x[part_index]; eta[1] = grd->YN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part.y[part_index]; zeta[1] = grd->ZN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part.z[part_index]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) weight[ii][jj][kk] = xi[ii] * eta[jj] * zeta[kk] * grd->invVOL; // set to zero local electric and magnetic field Exl=0.0, Eyl = 0.0, Ezl = 0.0, Bxl = 0.0, Byl = 0.0, Bzl = 0.0; for (int ii=0; ii < 2; ii++) for (int jj=0; jj < 2; jj++) for(int kk=0; kk < 2; kk++){ long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); Exl += weight[ii][jj][kk]*field->Ex_flat[index]; Eyl += weight[ii][jj][kk]*field->Ey_flat[index]; Ezl += weight[ii][jj][kk]*field->Ez_flat[index]; Bxl += weight[ii][jj][kk]*field->Bxn_flat[index]; Byl += weight[ii][jj][kk]*field->Byn_flat[index]; Bzl += weight[ii][jj][kk]*field->Bzn_flat[index]; } // end interpolation omdtsq = qomdt2*qomdt2*(Bxl*Bxl+Byl*Byl+Bzl*Bzl); denom = 1.0/(1.0 + omdtsq); // solve the position equation ut= part.u[part_index] + qomdt2*Exl; vt= part.v[part_index] + qomdt2*Eyl; wt= part.w[part_index] + qomdt2*Ezl; udotb = ut*Bxl + vt*Byl + wt*Bzl; // solve the velocity equation uptilde = (ut+qomdt2*(vt*Bzl -wt*Byl + qomdt2*udotb*Bxl))*denom; // ut, vt, Bzl vptilde = (vt+qomdt2*(wt*Bxl -ut*Bzl + qomdt2*udotb*Byl))*denom; wptilde = (wt+qomdt2*(ut*Byl -vt*Bxl + qomdt2*udotb*Bzl))*denom; // update position part.x[part_index] = xptilde + uptilde*dto2; part.y[part_index] = yptilde + vptilde*dto2; part.z[part_index] = zptilde + wptilde*dto2; } // end of iteration // update the final position and velocity part.u[part_index]= 2.0*uptilde - part.u[part_index]; part.v[part_index]= 2.0*vptilde - part.v[part_index]; part.w[part_index]= 2.0*wptilde - part.w[part_index]; part.x[part_index] = xptilde + uptilde*dt_sub_cycling; part.y[part_index] = yptilde + vptilde*dt_sub_cycling; part.z[part_index] = zptilde + wptilde*dt_sub_cycling; ////////// ////////// ////////// BC // X-DIRECTION: BC particles if (part.x[part_index] > grd->Lx){ if (param->PERIODICX==true){ // PERIODIC part.x[part_index] = part.x[part_index] - grd->Lx; } else { // REFLECTING BC part.u[part_index] = -part.u[part_index]; part.x[part_index] = 2*grd->Lx - part.x[part_index]; } } if (part.x[part_index] < 0){ if (param->PERIODICX==true){ // PERIODIC part.x[part_index] = part.x[part_index] + grd->Lx; } else { // REFLECTING BC part.u[part_index] = -part.u[part_index]; part.x[part_index] = -part.x[part_index]; } } // Y-DIRECTION: BC particles if (part.y[part_index] > grd->Ly){ if (param->PERIODICY==true){ // PERIODIC part.y[part_index] = part.y[part_index] - grd->Ly; } else { // REFLECTING BC part.v[part_index] = -part.v[part_index]; part.y[part_index] = 2*grd->Ly - part.y[part_index]; } } if (part.y[part_index] < 0){ if (param->PERIODICY==true){ // PERIODIC part.y[part_index] = part.y[part_index] + grd->Ly; } else { // REFLECTING BC part.v[part_index] = -part.v[part_index]; part.y[part_index] = -part.y[part_index]; } } // Z-DIRECTION: BC particles if (part.z[part_index] > grd->Lz){ if (param->PERIODICZ==true){ // PERIODIC part.z[part_index] = part.z[part_index] - grd->Lz; } else { // REFLECTING BC part.w[part_index] = -part.w[part_index]; part.z[part_index] = 2*grd->Lz - part.z[part_index]; } } if (part.z[part_index] < 0){ if (param->PERIODICZ==true){ // PERIODIC part.z[part_index] = part.z[part_index] + grd->Lz; } else { // REFLECTING BC part.w[part_index] = -part.w[part_index]; part.z[part_index] = -part.z[part_index]; } } } // end of one particle } /** MODIFICATION: New particle mover */ // TODO COMMENT MORE int mover_PC(struct device_part_arrays part, EMfield *field, grid* grd, parameters *param, long nop, int species_ID, int n_sub_cycles, FPpart qom, int NiterMover) { // print species and subcycling std::cout << "*** MOVER with SUBCYCLYING "<< n_sub_cycles << " - species " << species_ID << " ***" << std::endl; int blocks = ((nop + TPB - 1) / TPB); mover_PC_kernel<<<blocks, TPB>>>(part, field, grd, param, nop, n_sub_cycles, qom, NiterMover); cudaError_t err = cudaGetLastError(); if(err != cudaSuccess) { std::cout << "Kernel mover_PC error: " << cudaGetErrorString(err) << std::endl; exit(0); } return 0; } /** Interpolation Particle --> Grid: This is for species */ // TODO COMMENT WHEN DONE __global__ void interpP2G_kernel(device_part_arrays part, device_ids_arrays ids, grid* grd, long nop) { int part_index = blockIdx.x*blockDim.x + threadIdx.x; if(part_index >= nop) return; // arrays needed for interpolation FPpart weight[2][2][2]; FPpart temp[2][2][2]; FPpart xi[2], eta[2], zeta[2]; // index of the cell int ix, iy, iz; // determine cell: can we change to int()? is it faster? ix = 2 + int(floor((part.x[part_index] - grd->xStart) * grd->invdx)); iy = 2 + int(floor((part.y[part_index] - grd->yStart) * grd->invdy)); iz = 2 + int(floor((part.z[part_index] - grd->zStart) * grd->invdz)); // distances from node xi[0] = part.x[part_index] - grd->XN_flat[get_idx(ix-1, iy, iz, grd->nyn, grd->nzn)]; eta[0] = part.y[part_index] - grd->YN_flat[get_idx(ix, iy-1, iz, grd->nyn, grd->nzn)]; zeta[0] = part.z[part_index] - grd->ZN_flat[get_idx(ix, iy, iz-1, grd->nyn, grd->nzn)]; xi[1] = grd->XN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part.x[part_index]; eta[1] = grd->YN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part.y[part_index]; zeta[1] = grd->ZN_flat[get_idx(ix, iy, iz, grd->nyn, grd->nzn)] - part.z[part_index]; // calculate the weights for different nodes for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) weight[ii][jj][kk] = part.q[part_index] * xi[ii] * eta[jj] * zeta[kk] * grd->invVOL; ////////////////////////// // add charge density for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++){ long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.rhon_flat[index]), weight[ii][jj][kk] * grd->invVOL); } //////////////////////////// // add current density - Jx for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.u[part_index] * weight[ii][jj][kk]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.Jx_flat[index]), temp[ii][jj][kk] * grd->invVOL); } //////////////////////////// // add current density - Jy for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.v[part_index] * weight[ii][jj][kk]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.Jy_flat[index]), temp[ii][jj][kk] * grd->invVOL); } //////////////////////////// // add current density - Jz for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.w[part_index] * weight[ii][jj][kk]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.Jz_flat[index]), temp[ii][jj][kk] * grd->invVOL); } //////////////////////////// // add pressure pxx for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.u[part_index] * part.u[part_index] * weight[ii][jj][kk]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.pxx_flat[index]), temp[ii][jj][kk] * grd->invVOL); } //////////////////////////// // add pressure pxy for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.u[part_index] * part.v[part_index] * weight[ii][jj][kk]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.pxy_flat[index]), temp[ii][jj][kk] * grd->invVOL); } ///////////////////////////// // add pressure pxz for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.u[part_index] * part.w[part_index] * weight[ii][jj][kk]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.pxz_flat[index]), temp[ii][jj][kk] * grd->invVOL); } ///////////////////////////// // add pressure pyy for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.v[part_index] * part.v[part_index] * weight[ii][jj][kk]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.pyy_flat[index]), temp[ii][jj][kk] * grd->invVOL); } ///////////////////////////// // add pressure pyz for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.v[part_index] * part.w[part_index] * weight[ii][jj][kk]; for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.pyz_flat[index]), temp[ii][jj][kk] * grd->invVOL); } ///////////////////////////// // add pressure pzz for (int ii = 0; ii < 2; ii++) for (int jj = 0; jj < 2; jj++) for (int kk = 0; kk < 2; kk++) temp[ii][jj][kk] = part.w[part_index] * part.w[part_index] * weight[ii][jj][kk]; for (int ii=0; ii < 2; ii++) for (int jj=0; jj < 2; jj++) for(int kk=0; kk < 2; kk++) { long index = get_idx(ix-ii, iy-jj, iz-kk, grd->nyn, grd->nzn); atomicAdd(&(ids.pzz_flat[index]), temp[ii][jj][kk] * grd->invVOL); } } // TODO COMMENT WHEN DONE void interpP2G(struct device_part_arrays part, struct device_ids_arrays ids, grid* grd, long nop) { int blocks = (nop + TPB - 1) / TPB; interpP2G_kernel<<<blocks, TPB>>>(part, ids, grd, nop); }
ba1cc95f6238a505702108c28217a195ce9b3c0a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file dnn/src/cuda/local/cuda-convnet2/filter_acts/filter_act_sparse2_y4x32i4f16c4_tex.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ /** * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * -------------------------------------------------------------------------- * * This file has been modified by Megvii ("Megvii Modifications"). * * All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved. * -------------------------------------------------------------------------- */ #include "filter_act_templates.cuh" namespace megdnn { namespace cuda { template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache, bool scale, bool checkImgBounds> __global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex (FILTER_ACTS_PARAMS) { __shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images fill_shared_mem<float>((float *)shFilters, sizeof(shFilters)/sizeof(float), 0); fill_shared_mem<float>((float *)shImages, sizeof(shImages)/sizeof(float), 0); __syncthreads(); const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int numFilterColors = numImgColors / numGroups; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numModules = numModulesX * numModulesY; const int blockColorIdx = numFilterColors * blockGroupIdx; // Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; const int imgOffset = (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; // images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; const int filterOffset = blockFilterIdx + shFilterLoadY * numFilters * filterPixels + shFilterLoadX + (conv ? 0 : moduleIdx * numFilterColors * filterPixels * numFilters); // filters +=blockFilterIdx // + shFilterLoadY * numFilters * filterPixels + shFilterLoadX; // if (!conv) { // filters += moduleIdx * numFilterColors * filterPixels * numFilters; // } targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; // float fCache[filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } // NOTE: these max/min functions increase register usage as compared to my macros const int imgStartX = max(0, imgLoadModPosX); const int imgStartY = max(0, imgLoadModPosY); const int imgEndX = min(imgLoadModPosX + filterSize, imgSizeX); const int imgEndY = min(imgLoadModPosY + filterSize, imgSizeY); // __shared__ int imgPos[] int fPidx, iPidx; float imPreload[imgsPerThread]; // [4] float fPreload[colorCache*filtersPerThread/B_X]; // [2] // float fCache[filtersPerThread]; filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgStartY, imgStartX, fPidx, iPidx); #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { imPreload[i] = tex1Dfetch<float>(images, imgOffset + imgStride * iPidx + i * B_X); } else { imPreload[i] = 0; } } if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { // This if statement reduces reg usage.. #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { fPreload[c*filtersPerThread/B_X] = tex1Dfetch<float>(filters, filterOffset + (c * filterPixels + fPidx) * numFilters); } } for (int imgY = imgStartY; imgY < imgEndY; ++imgY) { // const int filterPxY = imgY - imgLoadModPosY; for (int imgX = imgStartX; imgX < imgEndX; ++imgX) { // const int filterPxX = imgX - imgLoadModPosX; // const int p = filterPxY * filterSize + filterPxX; // const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img // setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgY, imgX, &p, &pixIdx); // float* m = &images[imgStride * pixIdx]; const bool lastPixel = imgY == imgEndY - 1 && imgX == imgEndX - 1; int imgYNext = imgY; int imgXNext = imgX; int fPidxNext, iPidxNext; if (!lastPixel) { imgYNext = imgY + (imgX + 1 == imgEndX); imgXNext = imgX + 1 == imgEndX ? imgStartX : imgX + 1; } filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgYNext, imgXNext, fPidxNext, iPidxNext); for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop) // const float* ff = &filters[numFilters * ((oc + colorCache) * filterPixels + fPidx)]; // const float* mm = &images[imgStride * ((oc + colorCache) * imgPixels + iPidx)]; int imgOffset2 = imgOffset + imgStride * ((oc + colorCache) * imgPixels + iPidx); int filterOffset2 = filterOffset + numFilters * ((oc + colorCache) * filterPixels + fPidx); if (oc == numFilterColors - colorCache) { filterOffset2 = filterOffset + fPidxNext * numFilters; imgOffset2 = imgOffset + iPidxNext * imgStride; fPidx = fPidxNext; iPidx = iPidxNext; } #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { shFilters[c + shFilterLoadY][shFilterLoadX] = fPreload[c*filtersPerThread/B_X]; } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! shImages[ty][tx * imgsPerThread + i] = imPreload[i]; } imPreload[0] = (checkImgBounds && myImgIdx + 0 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 0 * B_X); imPreload[1] = (checkImgBounds && myImgIdx + 1 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 1 * B_X); imPreload[2] = (checkImgBounds && myImgIdx + 2 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 2 * B_X); __syncthreads(); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[0][threadIdx.x * imgsPerThread + i] * shFilters[0][threadIdx.y * filtersPerThread + f]; } } fPreload[0] = tex1Dfetch<float>(filters, filterOffset2 + 0); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[1][threadIdx.x * imgsPerThread + i] * shFilters[1][threadIdx.y * filtersPerThread + f]; } } fPreload[1] = tex1Dfetch<float>(filters, filterOffset2 + (B_X/filtersPerThread * filterPixels) * numFilters); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[2][threadIdx.x * imgsPerThread + i] * shFilters[2][threadIdx.y * filtersPerThread + f]; } } imPreload[3] = (checkImgBounds && myImgIdx + 3 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 3 * B_X); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[3][threadIdx.x * imgsPerThread + i] * shFilters[3][threadIdx.y * filtersPerThread + f]; } } __syncthreads(); } } } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } template __global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false >(FILTER_ACTS_PARAMS); template __global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false >(FILTER_ACTS_PARAMS); } // namespace cuda } // namespace megdnn
ba1cc95f6238a505702108c28217a195ce9b3c0a.cu
/** * \file dnn/src/cuda/local/cuda-convnet2/filter_acts/filter_act_sparse2_y4x32i4f16c4_tex.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ /** * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * -------------------------------------------------------------------------- * * This file has been modified by Megvii ("Megvii Modifications"). * * All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved. * -------------------------------------------------------------------------- */ #include "filter_act_templates.cuh" namespace megdnn { namespace cuda { template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache, bool scale, bool checkImgBounds> __global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex (FILTER_ACTS_PARAMS) { __shared__ float shFilters[colorCache][B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[colorCache][B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images fill_shared_mem<float>((float *)shFilters, sizeof(shFilters)/sizeof(float), 0); fill_shared_mem<float>((float *)shImages, sizeof(shImages)/sizeof(float), 0); __syncthreads(); const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int numFilterColors = numImgColors / numGroups; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numModules = numModulesX * numModulesY; const int blockColorIdx = numFilterColors * blockGroupIdx; // Another fun insanity: the % B_X makes things faster, even thought threadIdx.x is // in the range 0..31. It appears that this allows the compiler to optimize? const int tx = threadIdx.x % B_X; const int ty = threadIdx.y % B_Y; const int tidx = ty * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; const int imgOffset = (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; // images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; const int filterOffset = blockFilterIdx + shFilterLoadY * numFilters * filterPixels + shFilterLoadX + (conv ? 0 : moduleIdx * numFilterColors * filterPixels * numFilters); // filters +=blockFilterIdx // + shFilterLoadY * numFilters * filterPixels + shFilterLoadX; // if (!conv) { // filters += moduleIdx * numFilterColors * filterPixels * numFilters; // } targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y * filtersPerThread) * numImages * numModules + myImgIdx; float prod[imgsPerThread][filtersPerThread]; // float fCache[filtersPerThread]; #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] = 0; } } // NOTE: these max/min functions increase register usage as compared to my macros const int imgStartX = max(0, imgLoadModPosX); const int imgStartY = max(0, imgLoadModPosY); const int imgEndX = min(imgLoadModPosX + filterSize, imgSizeX); const int imgEndY = min(imgLoadModPosY + filterSize, imgSizeY); // __shared__ int imgPos[] int fPidx, iPidx; float imPreload[imgsPerThread]; // [4] float fPreload[colorCache*filtersPerThread/B_X]; // [2] // float fCache[filtersPerThread]; filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgStartY, imgStartX, fPidx, iPidx); #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { imPreload[i] = tex1Dfetch<float>(images, imgOffset + imgStride * iPidx + i * B_X); } else { imPreload[i] = 0; } } if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X/filtersPerThread) { // This if statement reduces reg usage.. #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { fPreload[c*filtersPerThread/B_X] = tex1Dfetch<float>(filters, filterOffset + (c * filterPixels + fPidx) * numFilters); } } for (int imgY = imgStartY; imgY < imgEndY; ++imgY) { // const int filterPxY = imgY - imgLoadModPosY; for (int imgX = imgStartX; imgX < imgEndX; ++imgX) { // const int filterPxX = imgX - imgLoadModPosX; // const int p = filterPxY * filterSize + filterPxX; // const int pixIdx = imgY * imgSizeX + imgX;// Pixel index in img // setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgY, imgX, &p, &pixIdx); // float* m = &images[imgStride * pixIdx]; const bool lastPixel = imgY == imgEndY - 1 && imgX == imgEndX - 1; int imgYNext = imgY; int imgXNext = imgX; int fPidxNext, iPidxNext; if (!lastPixel) { imgYNext = imgY + (imgX + 1 == imgEndX); imgXNext = imgX + 1 == imgEndX ? imgStartX : imgX + 1; } filterActs_YxX_sparse2_preload_ty_4_tx_32_f_16_c_4_setPixelCoords(filterSize, imgSizeX, imgLoadModPosY, imgLoadModPosX, imgYNext, imgXNext, fPidxNext, iPidxNext); for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop) // const float* ff = &filters[numFilters * ((oc + colorCache) * filterPixels + fPidx)]; // const float* mm = &images[imgStride * ((oc + colorCache) * imgPixels + iPidx)]; int imgOffset2 = imgOffset + imgStride * ((oc + colorCache) * imgPixels + iPidx); int filterOffset2 = filterOffset + numFilters * ((oc + colorCache) * filterPixels + fPidx); if (oc == numFilterColors - colorCache) { filterOffset2 = filterOffset + fPidxNext * numFilters; imgOffset2 = imgOffset + iPidxNext * imgStride; fPidx = fPidxNext; iPidx = iPidxNext; } #pragma unroll for (int c = 0; c < colorCache; c += B_X/filtersPerThread) { shFilters[c + shFilterLoadY][shFilterLoadX] = fPreload[c*filtersPerThread/B_X]; } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! shImages[ty][tx * imgsPerThread + i] = imPreload[i]; } imPreload[0] = (checkImgBounds && myImgIdx + 0 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 0 * B_X); imPreload[1] = (checkImgBounds && myImgIdx + 1 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 1 * B_X); imPreload[2] = (checkImgBounds && myImgIdx + 2 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 2 * B_X); __syncthreads(); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[0][threadIdx.x * imgsPerThread + i] * shFilters[0][threadIdx.y * filtersPerThread + f]; } } fPreload[0] = tex1Dfetch<float>(filters, filterOffset2 + 0); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[1][threadIdx.x * imgsPerThread + i] * shFilters[1][threadIdx.y * filtersPerThread + f]; } } fPreload[1] = tex1Dfetch<float>(filters, filterOffset2 + (B_X/filtersPerThread * filterPixels) * numFilters); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[2][threadIdx.x * imgsPerThread + i] * shFilters[2][threadIdx.y * filtersPerThread + f]; } } imPreload[3] = (checkImgBounds && myImgIdx + 3 * B_X >= numImages) ? 0 : tex1Dfetch<float>(images, imgOffset2 + 3 * B_X); #pragma unroll for(int i = 0; i < imgsPerThread; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { prod[i][f] += shImages[3][threadIdx.x * imgsPerThread + i] * shFilters[3][threadIdx.y * filtersPerThread + f]; } } __syncthreads(); } } } if (scale) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleTargets * targets[i * B_X + f * numImages * numModules] + scaleOutputs * prod[i][f]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { targets[i * B_X + f * numImages * numModules] = scaleOutputs * prod[i][f]; } } } } } template __global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, false, false >(FILTER_ACTS_PARAMS); template __global__ void filterActs_YxX_sparse2_preload_ty_4_tx_32_i_4_f_16_c_4_tex < 4, 32, 4, 16, 4, true, false >(FILTER_ACTS_PARAMS); } // namespace cuda } // namespace megdnn
32847f3ad78947ee7830ab96511b8e9884d9a17a.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> /* * This example helps to visualize the relationship between thread/block IDs and * offsets into data. For each CUDA thread, this example displays the * intra-block thread ID, the inter-block block ID, the global coordinate of a * thread, the calculated offset into input data, and the input data at that * offset. */ #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code:%d, reason: %s\n", error, hipGetErrorString(error)); \ exit(-10*error); \ } \ } void initialInt(int *ip, int size) { for (int i = 0; i < size; i++) { ip[i] = i; } } void printMatrix(int *C, const int nx, const int ny) { int *ic = C; printf("\nMatrix: (%d.%d)\n", nx, ny); for (int iy = 0; iy < ny; iy++) { for (int ix = 0; ix < nx; ix++) { printf("%3d", ic[ix]); } ic += nx; printf("\n"); } printf("\n"); return; } __global__ void printThreadIndex(int *A, const int nx, const int ny) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; printf("thread_id (%d,%d) block_id (%d,%d) coordinate (%d,%d) global index" " %2d ival %2d\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, A[idx]); } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // get device information int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(hipSetDevice(dev)); // set matrix dimension int nx = 8; int ny = 6; int nxy = nx * ny; int nBytes = nxy * sizeof(float); // malloc host memory int *h_A; h_A = (int *)malloc(nBytes); // iniitialize host matrix with integer for (int i = 0; i < nxy; i++) { h_A[i] = i; } printMatrix(h_A, nx, ny); // malloc device memory int *d_MatA; CHECK(hipMalloc((void **)&d_MatA, nBytes)); // transfer data from host to device CHECK(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice)); // set up execution configuration dim3 block(4, 2); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); // invoke the kernel printThreadIndex << <grid, block >> > (d_MatA, nx, ny); CHECK(hipGetLastError()); // free host and devide memory CHECK(hipFree(d_MatA)); free(h_A); // reset device CHECK(hipDeviceReset()); return (0); }
32847f3ad78947ee7830ab96511b8e9884d9a17a.cu
#include <cuda_runtime.h> #include <stdio.h> /* * This example helps to visualize the relationship between thread/block IDs and * offsets into data. For each CUDA thread, this example displays the * intra-block thread ID, the inter-block block ID, the global coordinate of a * thread, the calculated offset into input data, and the input data at that * offset. */ #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \ exit(-10*error); \ } \ } void initialInt(int *ip, int size) { for (int i = 0; i < size; i++) { ip[i] = i; } } void printMatrix(int *C, const int nx, const int ny) { int *ic = C; printf("\nMatrix: (%d.%d)\n", nx, ny); for (int iy = 0; iy < ny; iy++) { for (int ix = 0; ix < nx; ix++) { printf("%3d", ic[ix]); } ic += nx; printf("\n"); } printf("\n"); return; } __global__ void printThreadIndex(int *A, const int nx, const int ny) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; printf("thread_id (%d,%d) block_id (%d,%d) coordinate (%d,%d) global index" " %2d ival %2d\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, A[idx]); } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // get device information int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); // set matrix dimension int nx = 8; int ny = 6; int nxy = nx * ny; int nBytes = nxy * sizeof(float); // malloc host memory int *h_A; h_A = (int *)malloc(nBytes); // iniitialize host matrix with integer for (int i = 0; i < nxy; i++) { h_A[i] = i; } printMatrix(h_A, nx, ny); // malloc device memory int *d_MatA; CHECK(cudaMalloc((void **)&d_MatA, nBytes)); // transfer data from host to device CHECK(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice)); // set up execution configuration dim3 block(4, 2); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); // invoke the kernel printThreadIndex << <grid, block >> > (d_MatA, nx, ny); CHECK(cudaGetLastError()); // free host and devide memory CHECK(cudaFree(d_MatA)); free(h_A); // reset device CHECK(cudaDeviceReset()); return (0); }
db886d0ff4f744627e45ea3cb1182cf24b446b38.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************ ******** * MatrixMulti CUDA program. ************************************************************ *********/ #define BLOCK_SIZE 16 #define WIDTH (BLOCK_SIZE * 128) #define HEIGHT (BLOCK_SIZE * 128) #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <hip/hip_runtime.h> //#include <cutil.h> #include "/home/syma/etc/CUDA_5.0_SAMPLES/common/inc/helper_cuda_drvapi.h" #include "/home/syma/etc/CUDA_5.0_SAMPLES/common/inc/helper_cuda_gl.h" #include "/home/syma/etc/CUDA_5.0_SAMPLES/common/inc/helper_cuda.h" #include "/home/syma/etc/CUDA_5.0_SAMPLES/common/inc/helper_functions.h" #include "/home/syma/etc/CUDA_5.0_SAMPLES/common/inc/helper_image.h" #include "/home/syma/etc/CUDA_5.0_SAMPLES/common/inc/helper_math.h" #include "/home/syma/etc/CUDA_5.0_SAMPLES/common/inc/helper_string.h" #include "/home/syma/etc/CUDA_5.0_SAMPLES/common/inc/helper_timer.h" typedef struct { int width; int height; float* elements; } Matrix; /************************************************************ ************/ /* Init CUDA */ /************************************************************ ************/ #if __DEVICE_EMULATION__ bool InitCUDA(void){return true;} #else bool InitCUDA(void) { int count = 0; int i = 0; hipGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } for(i = 0; i < count; i++) { hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop, i) == hipSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA.\n"); return false; } hipSetDevice(i); printf("CUDA initialized.\n"); return true; } #endif // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float)RAND_MAX; } /************************************************************ ************/ //Kernel //Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; } /************************************************************ ************/ // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A,d_B,d_C; struct timeval tv1, tv2; size_t size = A.width * A.height * sizeof(float); d_A.width =A.width; d_A.height = A.width; gettimeofday(&tv1, NULL); hipMalloc((void**)&d_A.elements, size); hipMemcpy(d_A.elements,A.elements, size, hipMemcpyHostToDevice); gettimeofday(&tv2, NULL); printf("copying A takes %ld micro seconds\n", (tv2.tv_sec - tv1.tv_sec) * 1000000L + (tv2.tv_usec - tv1.tv_usec)); gettimeofday(&tv1, NULL); d_B.width = B.width; d_B.height = B.height; hipMalloc((void**)&d_B.elements, size); hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice); gettimeofday(&tv2, NULL); printf("copying B takes %ld micro seconds\n", (tv2.tv_sec - tv1.tv_sec) * 1000000L + (tv2.tv_usec - tv1.tv_usec)); // Allocate C in device memory d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); hipMalloc((void**)&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); gettimeofday(&tv1, NULL); hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C); hipDeviceSynchronize(); gettimeofday(&tv2, NULL); printf("Invoking kernel takes %ld micro seconds\n", (tv2.tv_sec - tv1.tv_sec) * 1000000L + (tv2.tv_usec - tv1.tv_usec)); gettimeofday(&tv1, NULL); // Read C from device memory hipMemcpy(C.elements, d_C.elements, size,hipMemcpyDeviceToHost); gettimeofday(&tv2, NULL); printf("Copying C takes %ld micro seconds\n", (tv2.tv_sec - tv1.tv_sec) * 1000000L + (tv2.tv_usec - tv1.tv_usec)); // Free device memory hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_C.elements); } /************************************************************ ************/ /*MAIN */ /************************************************************ ************/ int main(int argc, char* argv[]) { if(!InitCUDA()) { return 0; } // allocate host memory for matrices A and B Matrix h_A,h_B,h_C; h_A.width=WIDTH; h_A.height=HEIGHT; h_B.width=WIDTH; h_B.height=HEIGHT; h_C.width=WIDTH; h_C.height=HEIGHT; unsigned int size = WIDTH*HEIGHT; unsigned int mem_size = sizeof(float) * size; h_A.elements= (float*) malloc(mem_size); h_B.elements= (float*) malloc(mem_size); h_C.elements= (float*) malloc(mem_size); // set seed for rand() srand(2006); // initialize host memory randomInit(h_A.elements, size); randomInit(h_B.elements, size); //invoke MatMul MatMul(h_A,h_B,h_C); return 0; }
db886d0ff4f744627e45ea3cb1182cf24b446b38.cu
/************************************************************ ******** * MatrixMulti CUDA program. ************************************************************ *********/ #define BLOCK_SIZE 16 #define WIDTH (BLOCK_SIZE * 128) #define HEIGHT (BLOCK_SIZE * 128) #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <cuda_runtime.h> //#include <cutil.h> #include "/home/syma/etc/CUDA_5.0_SAMPLES/common/inc/helper_cuda_drvapi.h" #include "/home/syma/etc/CUDA_5.0_SAMPLES/common/inc/helper_cuda_gl.h" #include "/home/syma/etc/CUDA_5.0_SAMPLES/common/inc/helper_cuda.h" #include "/home/syma/etc/CUDA_5.0_SAMPLES/common/inc/helper_functions.h" #include "/home/syma/etc/CUDA_5.0_SAMPLES/common/inc/helper_image.h" #include "/home/syma/etc/CUDA_5.0_SAMPLES/common/inc/helper_math.h" #include "/home/syma/etc/CUDA_5.0_SAMPLES/common/inc/helper_string.h" #include "/home/syma/etc/CUDA_5.0_SAMPLES/common/inc/helper_timer.h" typedef struct { int width; int height; float* elements; } Matrix; /************************************************************ ************/ /* Init CUDA */ /************************************************************ ************/ #if __DEVICE_EMULATION__ bool InitCUDA(void){return true;} #else bool InitCUDA(void) { int count = 0; int i = 0; cudaGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } for(i = 0; i < count; i++) { cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA.\n"); return false; } cudaSetDevice(i); printf("CUDA initialized.\n"); return true; } #endif // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float)RAND_MAX; } /************************************************************ ************/ //Kernel //Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; } /************************************************************ ************/ // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A,d_B,d_C; struct timeval tv1, tv2; size_t size = A.width * A.height * sizeof(float); d_A.width =A.width; d_A.height = A.width; gettimeofday(&tv1, NULL); cudaMalloc((void**)&d_A.elements, size); cudaMemcpy(d_A.elements,A.elements, size, cudaMemcpyHostToDevice); gettimeofday(&tv2, NULL); printf("copying A takes %ld micro seconds\n", (tv2.tv_sec - tv1.tv_sec) * 1000000L + (tv2.tv_usec - tv1.tv_usec)); gettimeofday(&tv1, NULL); d_B.width = B.width; d_B.height = B.height; cudaMalloc((void**)&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); gettimeofday(&tv2, NULL); printf("copying B takes %ld micro seconds\n", (tv2.tv_sec - tv1.tv_sec) * 1000000L + (tv2.tv_usec - tv1.tv_usec)); // Allocate C in device memory d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); cudaMalloc((void**)&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); gettimeofday(&tv1, NULL); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); cudaThreadSynchronize(); gettimeofday(&tv2, NULL); printf("Invoking kernel takes %ld micro seconds\n", (tv2.tv_sec - tv1.tv_sec) * 1000000L + (tv2.tv_usec - tv1.tv_usec)); gettimeofday(&tv1, NULL); // Read C from device memory cudaMemcpy(C.elements, d_C.elements, size,cudaMemcpyDeviceToHost); gettimeofday(&tv2, NULL); printf("Copying C takes %ld micro seconds\n", (tv2.tv_sec - tv1.tv_sec) * 1000000L + (tv2.tv_usec - tv1.tv_usec)); // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } /************************************************************ ************/ /*MAIN */ /************************************************************ ************/ int main(int argc, char* argv[]) { if(!InitCUDA()) { return 0; } // allocate host memory for matrices A and B Matrix h_A,h_B,h_C; h_A.width=WIDTH; h_A.height=HEIGHT; h_B.width=WIDTH; h_B.height=HEIGHT; h_C.width=WIDTH; h_C.height=HEIGHT; unsigned int size = WIDTH*HEIGHT; unsigned int mem_size = sizeof(float) * size; h_A.elements= (float*) malloc(mem_size); h_B.elements= (float*) malloc(mem_size); h_C.elements= (float*) malloc(mem_size); // set seed for rand() srand(2006); // initialize host memory randomInit(h_A.elements, size); randomInit(h_B.elements, size); //invoke MatMul MatMul(h_A,h_B,h_C); return 0; }
9aee2181b77615c27184538218e6a47fae774d41.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include "cuda_shape.h" #include "cuda_camera.h" #include "cuda_scene.h" #include "tonemapping.h" #include "render_parameters.h" #include "kernel_globals.h" #include "shader.h" auto constexpr WIDTH = 640; auto constexpr HEIGHT = 480; __global__ void testSimpleScene(glm::u8vec4* img, cudaScene scene, RenderParameters params, unsigned int hashed_N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; unsigned int idy = blockDim.y * blockIdx.y + threadIdx.y; unsigned int offset = idy * scene.camera.imageW + idx; img[offset] = IMG_BLACK; hiprandState_t rng; hiprand_init(hashed_N + offset, 0, 0, &rng); cudaRay ray; scene.camera.GenerateRay(idx, idy, rng, &ray); glm::vec3 L = glm::vec3(0.f, 0.f, 0.f); glm::vec3 T = glm::vec3(1.f, 1.f, 1.f); SurfaceElement se; for(auto k = 0; k < params.rayDepth; ++k) { if(!scene_intersect(scene, ray, se)) { L += T * scene.env_light.GetEnvRadiance(ray.dir, 0.6); break; } L += T * scene.materials[se.matID].emition; switch(scene.materials[se.matID].bsdf_type) { case BSDF_DIFFUSE: diffuse_shading(scene, se, rng, &ray, &T); break; case BSDF_GLASS: refractive_shading(scene, se, rng, &ray, &T); break; case BSDF_GLOSSY: glossy_shading(scene, se, rng, &ray, &T); break; case BSDF_PLASTIC: coat_shading(scene, se, rng, &ray, &T); break; default: break; } //russian roulette if(k >= 3) { float illum = illuminance(T); if(hiprand_uniform(&rng) > illum) break; T /= illum; } } running_estimate(params.hdr_buffer[offset], L, params.iteration_count); L = reinhard_tone_mapping(params.hdr_buffer[offset], params.exposure); img[offset] = glm::u8vec4(fabsf(L.x) * 255, fabsf(L.y) * 255, fabsf(L.z) * 255, 0); } extern "C" void test(glm::u8vec4* img, cudaScene& scene, RenderParameters& params) { dim3 blockSize(16, 16); dim3 gridSize(640 / blockSize.x, 480 / blockSize.y); if(params.iteration_count == 0) { checkCudaErrors(hipMemset(params.hdr_buffer, 0, sizeof(float3) * WIDTH * HEIGHT)); } hipLaunchKernelGGL(( testSimpleScene), dim3(gridSize), dim3(blockSize), 0, 0, img, scene, params, wangHash(params.iteration_count)); checkCudaErrors(hipDeviceSynchronize()); }
9aee2181b77615c27184538218e6a47fae774d41.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include "cuda_shape.h" #include "cuda_camera.h" #include "cuda_scene.h" #include "tonemapping.h" #include "render_parameters.h" #include "kernel_globals.h" #include "shader.h" auto constexpr WIDTH = 640; auto constexpr HEIGHT = 480; __global__ void testSimpleScene(glm::u8vec4* img, cudaScene scene, RenderParameters params, unsigned int hashed_N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; unsigned int idy = blockDim.y * blockIdx.y + threadIdx.y; unsigned int offset = idy * scene.camera.imageW + idx; img[offset] = IMG_BLACK; curandState rng; curand_init(hashed_N + offset, 0, 0, &rng); cudaRay ray; scene.camera.GenerateRay(idx, idy, rng, &ray); glm::vec3 L = glm::vec3(0.f, 0.f, 0.f); glm::vec3 T = glm::vec3(1.f, 1.f, 1.f); SurfaceElement se; for(auto k = 0; k < params.rayDepth; ++k) { if(!scene_intersect(scene, ray, se)) { L += T * scene.env_light.GetEnvRadiance(ray.dir, 0.6); break; } L += T * scene.materials[se.matID].emition; switch(scene.materials[se.matID].bsdf_type) { case BSDF_DIFFUSE: diffuse_shading(scene, se, rng, &ray, &T); break; case BSDF_GLASS: refractive_shading(scene, se, rng, &ray, &T); break; case BSDF_GLOSSY: glossy_shading(scene, se, rng, &ray, &T); break; case BSDF_PLASTIC: coat_shading(scene, se, rng, &ray, &T); break; default: break; } //russian roulette if(k >= 3) { float illum = illuminance(T); if(curand_uniform(&rng) > illum) break; T /= illum; } } running_estimate(params.hdr_buffer[offset], L, params.iteration_count); L = reinhard_tone_mapping(params.hdr_buffer[offset], params.exposure); img[offset] = glm::u8vec4(fabsf(L.x) * 255, fabsf(L.y) * 255, fabsf(L.z) * 255, 0); } extern "C" void test(glm::u8vec4* img, cudaScene& scene, RenderParameters& params) { dim3 blockSize(16, 16); dim3 gridSize(640 / blockSize.x, 480 / blockSize.y); if(params.iteration_count == 0) { checkCudaErrors(cudaMemset(params.hdr_buffer, 0, sizeof(float3) * WIDTH * HEIGHT)); } testSimpleScene<<<gridSize, blockSize>>>(img, scene, params, wangHash(params.iteration_count)); checkCudaErrors(cudaDeviceSynchronize()); }
39ebcd532c3703899aecd22d851675049d4fdf07.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/copying.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/fill.hpp> #include <cudf/detail/gather.cuh> #include <cudf/detail/gather.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/scatter.cuh> #include <cudf/detail/scatter.hpp> #include <cudf/detail/stream_compaction.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/lists/list_view.cuh> #include <cudf/stream_compaction.hpp> #include <cudf/strings/detail/scatter.cuh> #include <cudf/strings/string_view.cuh> #include <cudf/structs/struct_view.hpp> #include <cudf/table/table_device_view.cuh> #include <cudf/utilities/traits.hpp> #include <thrust/iterator/counting_iterator.h> #include <thrust/sequence.h> #include <numeric> namespace cudf { namespace detail { namespace { struct dispatch_map_type { template <typename MapType, std::enable_if_t<is_index_type<MapType>()>* = nullptr> std::unique_ptr<table> operator()(table_view const& source, column_view const& scatter_map, table_view const& target, bool check_bounds, rmm::mr::device_memory_resource* mr, hipStream_t stream) const { return detail::scatter(source, scatter_map.begin<MapType>(), scatter_map.end<MapType>(), target, check_bounds, mr, stream); } template <typename MapType, std::enable_if_t<not is_index_type<MapType>()>* = nullptr> std::unique_ptr<table> operator()(table_view const& source, column_view const& scatter_map, table_view const& target, bool check_bounds, rmm::mr::device_memory_resource* mr, hipStream_t stream) const { CUDF_FAIL("Scatter map column must be an integral, non-boolean type"); } }; template <bool mark_true, typename MapIterator> __global__ void marking_bitmask_kernel(mutable_column_device_view destination, MapIterator scatter_map, size_type num_scatter_rows) { size_type row = threadIdx.x + blockIdx.x * blockDim.x; while (row < num_scatter_rows) { size_type const output_row = scatter_map[row]; if (mark_true) { destination.set_valid(output_row); } else { destination.set_null(output_row); } row += blockDim.x * gridDim.x; } } template <typename MapIterator> void scatter_scalar_bitmask(std::vector<std::unique_ptr<scalar>> const& source, MapIterator scatter_map, size_type num_scatter_rows, std::vector<std::unique_ptr<column>>& target, rmm::mr::device_memory_resource* mr, hipStream_t stream) { constexpr size_type block_size = 256; size_type const grid_size = grid_1d(num_scatter_rows, block_size).num_blocks; for (size_t i = 0; i < target.size(); ++i) { auto const source_is_valid = source[i]->is_valid(stream); if (target[i]->nullable() or not source_is_valid) { if (not target[i]->nullable()) { // Target must have a null mask if the source is not valid auto mask = create_null_mask(target[i]->size(), mask_state::ALL_VALID, stream, mr); target[i]->set_null_mask(std::move(mask), 0); } auto target_view = mutable_column_device_view::create(target[i]->mutable_view(), stream); auto bitmask_kernel = source_is_valid ? marking_bitmask_kernel<true, decltype(scatter_map)> : marking_bitmask_kernel<false, decltype(scatter_map)>; hipLaunchKernelGGL(( bitmask_kernel), dim3(grid_size), dim3(block_size), 0, stream, *target_view, scatter_map, num_scatter_rows); } } } template <typename Element, typename MapIterator> struct column_scalar_scatterer_impl { std::unique_ptr<column> operator()(std::unique_ptr<scalar> const& source, MapIterator scatter_iter, size_type scatter_rows, column_view const& target, rmm::mr::device_memory_resource* mr, hipStream_t stream) const { auto result = std::make_unique<column>(target, stream, mr); auto result_view = result->mutable_view(); // Use permutation iterator with constant index to dereference scalar data auto scalar_impl = static_cast<scalar_type_t<Element>*>(source.get()); auto scalar_iter = thrust::make_permutation_iterator(scalar_impl->data(), thrust::make_constant_iterator(0)); thrust::scatter(rmm::exec_policy(stream)->on(stream), scalar_iter, scalar_iter + scatter_rows, scatter_iter, result_view.begin<Element>()); return result; } }; template <typename MapIterator> struct column_scalar_scatterer_impl<string_view, MapIterator> { std::unique_ptr<column> operator()(std::unique_ptr<scalar> const& source, MapIterator scatter_iter, size_type scatter_rows, column_view const& target, rmm::mr::device_memory_resource* mr, hipStream_t stream) const { auto const scalar_impl = static_cast<string_scalar*>(source.get()); auto const source_view = string_view(scalar_impl->data(), scalar_impl->size()); auto const begin = thrust::make_constant_iterator(source_view); auto const end = begin + scatter_rows; return strings::detail::scatter(begin, end, scatter_iter, target, mr, stream); } }; template <typename MapIterator> struct column_scalar_scatterer_impl<dictionary32, MapIterator> { std::unique_ptr<column> operator()(std::unique_ptr<scalar> const& source, MapIterator scatter_iter, size_type scatter_rows, column_view const& target, rmm::mr::device_memory_resource* mr, hipStream_t stream) const { CUDF_FAIL("scatter scalar to dictionary not implemented"); } }; template <typename MapIterator> struct column_scalar_scatterer_impl<list_view, MapIterator> { std::unique_ptr<column> operator()(std::unique_ptr<scalar> const& source, MapIterator scatter_iter, size_type scatter_rows, column_view const& target, rmm::mr::device_memory_resource* mr, hipStream_t stream) const { CUDF_FAIL("scatter scalar to list_view not implemented"); } }; template <typename MapIterator> struct column_scalar_scatterer_impl<struct_view, MapIterator> { std::unique_ptr<column> operator()(std::unique_ptr<scalar> const& source, MapIterator scatter_iter, size_type scatter_rows, column_view const& target, rmm::mr::device_memory_resource* mr, hipStream_t stream) const { CUDF_FAIL("scatter scalar to struct_view not implemented"); } }; template <typename MapIterator> struct column_scalar_scatterer { template <typename Element> std::unique_ptr<column> operator()(std::unique_ptr<scalar> const& source, MapIterator scatter_iter, size_type scatter_rows, column_view const& target, rmm::mr::device_memory_resource* mr, hipStream_t stream) const { column_scalar_scatterer_impl<Element, MapIterator> scatterer{}; return scatterer(source, scatter_iter, scatter_rows, target, mr, stream); } }; struct scatter_scalar_impl { template < typename T, std::enable_if_t<std::is_integral<T>::value and not std::is_same<T, bool>::value>* = nullptr> std::unique_ptr<table> operator()(std::vector<std::unique_ptr<scalar>> const& source, column_view const& indices, table_view const& target, bool check_bounds, rmm::mr::device_memory_resource* mr, hipStream_t stream) const { if (check_bounds) { auto const begin = -target.num_rows(); auto const end = target.num_rows(); auto bounds = bounds_checker<T>{begin, end}; CUDF_EXPECTS( indices.size() == thrust::count_if( rmm::exec_policy(stream)->on(stream), indices.begin<T>(), indices.end<T>(), bounds), "Scatter map index out of bounds"); } // Transform negative indices to index + target size auto scatter_rows = indices.size(); auto scatter_iter = thrust::make_transform_iterator(indices.begin<T>(), index_converter<T>{target.num_rows()}); // Second dispatch over data type per column auto result = std::vector<std::unique_ptr<column>>(target.num_columns()); auto scatter_functor = column_scalar_scatterer<decltype(scatter_iter)>{}; std::transform(source.begin(), source.end(), target.begin(), result.begin(), [=](auto const& source_scalar, auto const& target_col) { return type_dispatcher(source_scalar->type(), scatter_functor, source_scalar, scatter_iter, scatter_rows, target_col, mr, stream); }); scatter_scalar_bitmask(source, scatter_iter, scatter_rows, result, mr, stream); return std::make_unique<table>(std::move(result)); } template < typename T, std::enable_if_t<not std::is_integral<T>::value or std::is_same<T, bool>::value>* = nullptr> std::unique_ptr<table> operator()(std::vector<std::unique_ptr<scalar>> const& source, column_view const& indices, table_view const& target, bool check_bounds, rmm::mr::device_memory_resource* mr, hipStream_t stream) const { CUDF_FAIL("Scatter index column must be an integral, non-boolean type"); } }; } // namespace std::unique_ptr<table> scatter(table_view const& source, column_view const& scatter_map, table_view const& target, bool check_bounds, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_EXPECTS(source.num_columns() == target.num_columns(), "Number of columns in source and target not equal"); CUDF_EXPECTS(scatter_map.size() <= source.num_rows(), "Size of scatter map must be equal to or less than source rows"); CUDF_EXPECTS(std::equal(source.begin(), source.end(), target.begin(), [](auto const& col1, auto const& col2) { return col1.type().id() == col2.type().id(); }), "Column types do not match between source and target"); CUDF_EXPECTS(scatter_map.has_nulls() == false, "Scatter map contains nulls"); if (scatter_map.size() == 0) { return std::make_unique<table>(target, stream, mr); } // First dispatch for scatter map index type return type_dispatcher( scatter_map.type(), dispatch_map_type{}, source, scatter_map, target, check_bounds, mr, stream); } std::unique_ptr<table> scatter(std::vector<std::unique_ptr<scalar>> const& source, column_view const& indices, table_view const& target, bool check_bounds, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_EXPECTS(source.size() == static_cast<size_t>(target.num_columns()), "Number of columns in source and target not equal"); CUDF_EXPECTS(std::equal(source.begin(), source.end(), target.begin(), [](auto const& scalar, auto const& col) { return scalar->type().id() == col.type().id(); }), "Column types do not match between source and target"); CUDF_EXPECTS(indices.has_nulls() == false, "indices contains nulls"); if (indices.size() == 0) { return std::make_unique<table>(target, stream, mr); } // First dispatch for scatter index type return type_dispatcher( indices.type(), scatter_scalar_impl{}, source, indices, target, check_bounds, mr, stream); } std::unique_ptr<column> boolean_mask_scatter(column_view const& input, column_view const& target, column_view const& boolean_mask, rmm::mr::device_memory_resource* mr, hipStream_t stream) { auto indices = cudf::make_numeric_column( data_type{type_id::INT32}, target.size(), mask_state::UNALLOCATED, stream); auto mutable_indices = indices->mutable_view(); thrust::sequence(rmm::exec_policy(stream)->on(stream), mutable_indices.begin<size_type>(), mutable_indices.end<size_type>(), 0); // The scatter map is actually a table with only one column, which is scatter map. auto scatter_map = detail::apply_boolean_mask( table_view{{indices->view()}}, boolean_mask, rmm::mr::get_current_device_resource(), stream); auto output_table = detail::scatter(table_view{{input}}, scatter_map->get_column(0).view(), table_view{{target}}, false, mr, stream); // There is only one column in output_table return std::make_unique<column>(std::move(output_table->get_column(0))); } std::unique_ptr<column> boolean_mask_scatter(scalar const& input, column_view const& target, column_view const& boolean_mask, rmm::mr::device_memory_resource* mr, hipStream_t stream) { return detail::copy_if_else(input, target, boolean_mask, mr, stream); } std::unique_ptr<table> boolean_mask_scatter(table_view const& input, table_view const& target, column_view const& boolean_mask, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_EXPECTS(input.num_columns() == target.num_columns(), "Mismatch in number of input columns and target columns"); CUDF_EXPECTS(boolean_mask.size() == target.num_rows(), "Boolean mask size and number of target rows mismatch"); CUDF_EXPECTS(boolean_mask.type().id() == type_id::BOOL8, "Mask must be of Boolean type"); // Count valid pair of input and columns as per type at each column index i CUDF_EXPECTS( std::all_of(thrust::counting_iterator<size_type>(0), thrust::counting_iterator<size_type>(target.num_columns()), [&input, &target](auto index) { return ((input.column(index).type().id()) == (target.column(index).type().id())); }), "Type mismatch in input column and target column"); if (target.num_rows() != 0) { std::vector<std::unique_ptr<column>> out_columns(target.num_columns()); std::transform( input.begin(), input.end(), target.begin(), out_columns.begin(), [&boolean_mask, mr, stream](auto const& input_column, auto const& target_column) { return boolean_mask_scatter(input_column, target_column, boolean_mask, mr, stream); }); return std::make_unique<table>(std::move(out_columns)); } else { return empty_like(target); } } std::unique_ptr<table> boolean_mask_scatter( std::vector<std::reference_wrapper<scalar>> const& input, table_view const& target, column_view const& boolean_mask, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_EXPECTS(static_cast<size_type>(input.size()) == target.num_columns(), "Mismatch in number of scalars and target columns"); CUDF_EXPECTS(boolean_mask.size() == target.num_rows(), "Boolean mask size and number of target rows mismatch"); CUDF_EXPECTS(boolean_mask.type().id() == type_id::BOOL8, "Mask must be of Boolean type"); // Count valid pair of input and columns as per type at each column/scalar index i CUDF_EXPECTS( std::all_of(thrust::counting_iterator<size_type>(0), thrust::counting_iterator<size_type>(target.num_columns()), [&input, &target](auto index) { return (input[index].get().type().id() == target.column(index).type().id()); }), "Type mismatch in input scalar and target column"); if (target.num_rows() != 0) { std::vector<std::unique_ptr<column>> out_columns(target.num_columns()); std::transform(input.begin(), input.end(), target.begin(), out_columns.begin(), [&boolean_mask, mr, stream](auto const& scalar, auto const& target_column) { return boolean_mask_scatter( scalar.get(), target_column, boolean_mask, mr, stream); }); return std::make_unique<table>(std::move(out_columns)); } else { return empty_like(target); } } } // namespace detail std::unique_ptr<table> scatter(table_view const& source, column_view const& scatter_map, table_view const& target, bool check_bounds, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::scatter(source, scatter_map, target, check_bounds, mr); } std::unique_ptr<table> scatter(std::vector<std::unique_ptr<scalar>> const& source, column_view const& indices, table_view const& target, bool check_bounds, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::scatter(source, indices, target, check_bounds, mr); } std::unique_ptr<table> boolean_mask_scatter(table_view const& input, table_view const& target, column_view const& boolean_mask, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::boolean_mask_scatter(input, target, boolean_mask, mr); } std::unique_ptr<table> boolean_mask_scatter( std::vector<std::reference_wrapper<scalar>> const& input, table_view const& target, column_view const& boolean_mask, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::boolean_mask_scatter(input, target, boolean_mask, mr); } } // namespace cudf
39ebcd532c3703899aecd22d851675049d4fdf07.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/copying.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/fill.hpp> #include <cudf/detail/gather.cuh> #include <cudf/detail/gather.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/scatter.cuh> #include <cudf/detail/scatter.hpp> #include <cudf/detail/stream_compaction.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/lists/list_view.cuh> #include <cudf/stream_compaction.hpp> #include <cudf/strings/detail/scatter.cuh> #include <cudf/strings/string_view.cuh> #include <cudf/structs/struct_view.hpp> #include <cudf/table/table_device_view.cuh> #include <cudf/utilities/traits.hpp> #include <thrust/iterator/counting_iterator.h> #include <thrust/sequence.h> #include <numeric> namespace cudf { namespace detail { namespace { struct dispatch_map_type { template <typename MapType, std::enable_if_t<is_index_type<MapType>()>* = nullptr> std::unique_ptr<table> operator()(table_view const& source, column_view const& scatter_map, table_view const& target, bool check_bounds, rmm::mr::device_memory_resource* mr, cudaStream_t stream) const { return detail::scatter(source, scatter_map.begin<MapType>(), scatter_map.end<MapType>(), target, check_bounds, mr, stream); } template <typename MapType, std::enable_if_t<not is_index_type<MapType>()>* = nullptr> std::unique_ptr<table> operator()(table_view const& source, column_view const& scatter_map, table_view const& target, bool check_bounds, rmm::mr::device_memory_resource* mr, cudaStream_t stream) const { CUDF_FAIL("Scatter map column must be an integral, non-boolean type"); } }; template <bool mark_true, typename MapIterator> __global__ void marking_bitmask_kernel(mutable_column_device_view destination, MapIterator scatter_map, size_type num_scatter_rows) { size_type row = threadIdx.x + blockIdx.x * blockDim.x; while (row < num_scatter_rows) { size_type const output_row = scatter_map[row]; if (mark_true) { destination.set_valid(output_row); } else { destination.set_null(output_row); } row += blockDim.x * gridDim.x; } } template <typename MapIterator> void scatter_scalar_bitmask(std::vector<std::unique_ptr<scalar>> const& source, MapIterator scatter_map, size_type num_scatter_rows, std::vector<std::unique_ptr<column>>& target, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { constexpr size_type block_size = 256; size_type const grid_size = grid_1d(num_scatter_rows, block_size).num_blocks; for (size_t i = 0; i < target.size(); ++i) { auto const source_is_valid = source[i]->is_valid(stream); if (target[i]->nullable() or not source_is_valid) { if (not target[i]->nullable()) { // Target must have a null mask if the source is not valid auto mask = create_null_mask(target[i]->size(), mask_state::ALL_VALID, stream, mr); target[i]->set_null_mask(std::move(mask), 0); } auto target_view = mutable_column_device_view::create(target[i]->mutable_view(), stream); auto bitmask_kernel = source_is_valid ? marking_bitmask_kernel<true, decltype(scatter_map)> : marking_bitmask_kernel<false, decltype(scatter_map)>; bitmask_kernel<<<grid_size, block_size, 0, stream>>>( *target_view, scatter_map, num_scatter_rows); } } } template <typename Element, typename MapIterator> struct column_scalar_scatterer_impl { std::unique_ptr<column> operator()(std::unique_ptr<scalar> const& source, MapIterator scatter_iter, size_type scatter_rows, column_view const& target, rmm::mr::device_memory_resource* mr, cudaStream_t stream) const { auto result = std::make_unique<column>(target, stream, mr); auto result_view = result->mutable_view(); // Use permutation iterator with constant index to dereference scalar data auto scalar_impl = static_cast<scalar_type_t<Element>*>(source.get()); auto scalar_iter = thrust::make_permutation_iterator(scalar_impl->data(), thrust::make_constant_iterator(0)); thrust::scatter(rmm::exec_policy(stream)->on(stream), scalar_iter, scalar_iter + scatter_rows, scatter_iter, result_view.begin<Element>()); return result; } }; template <typename MapIterator> struct column_scalar_scatterer_impl<string_view, MapIterator> { std::unique_ptr<column> operator()(std::unique_ptr<scalar> const& source, MapIterator scatter_iter, size_type scatter_rows, column_view const& target, rmm::mr::device_memory_resource* mr, cudaStream_t stream) const { auto const scalar_impl = static_cast<string_scalar*>(source.get()); auto const source_view = string_view(scalar_impl->data(), scalar_impl->size()); auto const begin = thrust::make_constant_iterator(source_view); auto const end = begin + scatter_rows; return strings::detail::scatter(begin, end, scatter_iter, target, mr, stream); } }; template <typename MapIterator> struct column_scalar_scatterer_impl<dictionary32, MapIterator> { std::unique_ptr<column> operator()(std::unique_ptr<scalar> const& source, MapIterator scatter_iter, size_type scatter_rows, column_view const& target, rmm::mr::device_memory_resource* mr, cudaStream_t stream) const { CUDF_FAIL("scatter scalar to dictionary not implemented"); } }; template <typename MapIterator> struct column_scalar_scatterer_impl<list_view, MapIterator> { std::unique_ptr<column> operator()(std::unique_ptr<scalar> const& source, MapIterator scatter_iter, size_type scatter_rows, column_view const& target, rmm::mr::device_memory_resource* mr, cudaStream_t stream) const { CUDF_FAIL("scatter scalar to list_view not implemented"); } }; template <typename MapIterator> struct column_scalar_scatterer_impl<struct_view, MapIterator> { std::unique_ptr<column> operator()(std::unique_ptr<scalar> const& source, MapIterator scatter_iter, size_type scatter_rows, column_view const& target, rmm::mr::device_memory_resource* mr, cudaStream_t stream) const { CUDF_FAIL("scatter scalar to struct_view not implemented"); } }; template <typename MapIterator> struct column_scalar_scatterer { template <typename Element> std::unique_ptr<column> operator()(std::unique_ptr<scalar> const& source, MapIterator scatter_iter, size_type scatter_rows, column_view const& target, rmm::mr::device_memory_resource* mr, cudaStream_t stream) const { column_scalar_scatterer_impl<Element, MapIterator> scatterer{}; return scatterer(source, scatter_iter, scatter_rows, target, mr, stream); } }; struct scatter_scalar_impl { template < typename T, std::enable_if_t<std::is_integral<T>::value and not std::is_same<T, bool>::value>* = nullptr> std::unique_ptr<table> operator()(std::vector<std::unique_ptr<scalar>> const& source, column_view const& indices, table_view const& target, bool check_bounds, rmm::mr::device_memory_resource* mr, cudaStream_t stream) const { if (check_bounds) { auto const begin = -target.num_rows(); auto const end = target.num_rows(); auto bounds = bounds_checker<T>{begin, end}; CUDF_EXPECTS( indices.size() == thrust::count_if( rmm::exec_policy(stream)->on(stream), indices.begin<T>(), indices.end<T>(), bounds), "Scatter map index out of bounds"); } // Transform negative indices to index + target size auto scatter_rows = indices.size(); auto scatter_iter = thrust::make_transform_iterator(indices.begin<T>(), index_converter<T>{target.num_rows()}); // Second dispatch over data type per column auto result = std::vector<std::unique_ptr<column>>(target.num_columns()); auto scatter_functor = column_scalar_scatterer<decltype(scatter_iter)>{}; std::transform(source.begin(), source.end(), target.begin(), result.begin(), [=](auto const& source_scalar, auto const& target_col) { return type_dispatcher(source_scalar->type(), scatter_functor, source_scalar, scatter_iter, scatter_rows, target_col, mr, stream); }); scatter_scalar_bitmask(source, scatter_iter, scatter_rows, result, mr, stream); return std::make_unique<table>(std::move(result)); } template < typename T, std::enable_if_t<not std::is_integral<T>::value or std::is_same<T, bool>::value>* = nullptr> std::unique_ptr<table> operator()(std::vector<std::unique_ptr<scalar>> const& source, column_view const& indices, table_view const& target, bool check_bounds, rmm::mr::device_memory_resource* mr, cudaStream_t stream) const { CUDF_FAIL("Scatter index column must be an integral, non-boolean type"); } }; } // namespace std::unique_ptr<table> scatter(table_view const& source, column_view const& scatter_map, table_view const& target, bool check_bounds, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_EXPECTS(source.num_columns() == target.num_columns(), "Number of columns in source and target not equal"); CUDF_EXPECTS(scatter_map.size() <= source.num_rows(), "Size of scatter map must be equal to or less than source rows"); CUDF_EXPECTS(std::equal(source.begin(), source.end(), target.begin(), [](auto const& col1, auto const& col2) { return col1.type().id() == col2.type().id(); }), "Column types do not match between source and target"); CUDF_EXPECTS(scatter_map.has_nulls() == false, "Scatter map contains nulls"); if (scatter_map.size() == 0) { return std::make_unique<table>(target, stream, mr); } // First dispatch for scatter map index type return type_dispatcher( scatter_map.type(), dispatch_map_type{}, source, scatter_map, target, check_bounds, mr, stream); } std::unique_ptr<table> scatter(std::vector<std::unique_ptr<scalar>> const& source, column_view const& indices, table_view const& target, bool check_bounds, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_EXPECTS(source.size() == static_cast<size_t>(target.num_columns()), "Number of columns in source and target not equal"); CUDF_EXPECTS(std::equal(source.begin(), source.end(), target.begin(), [](auto const& scalar, auto const& col) { return scalar->type().id() == col.type().id(); }), "Column types do not match between source and target"); CUDF_EXPECTS(indices.has_nulls() == false, "indices contains nulls"); if (indices.size() == 0) { return std::make_unique<table>(target, stream, mr); } // First dispatch for scatter index type return type_dispatcher( indices.type(), scatter_scalar_impl{}, source, indices, target, check_bounds, mr, stream); } std::unique_ptr<column> boolean_mask_scatter(column_view const& input, column_view const& target, column_view const& boolean_mask, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { auto indices = cudf::make_numeric_column( data_type{type_id::INT32}, target.size(), mask_state::UNALLOCATED, stream); auto mutable_indices = indices->mutable_view(); thrust::sequence(rmm::exec_policy(stream)->on(stream), mutable_indices.begin<size_type>(), mutable_indices.end<size_type>(), 0); // The scatter map is actually a table with only one column, which is scatter map. auto scatter_map = detail::apply_boolean_mask( table_view{{indices->view()}}, boolean_mask, rmm::mr::get_current_device_resource(), stream); auto output_table = detail::scatter(table_view{{input}}, scatter_map->get_column(0).view(), table_view{{target}}, false, mr, stream); // There is only one column in output_table return std::make_unique<column>(std::move(output_table->get_column(0))); } std::unique_ptr<column> boolean_mask_scatter(scalar const& input, column_view const& target, column_view const& boolean_mask, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { return detail::copy_if_else(input, target, boolean_mask, mr, stream); } std::unique_ptr<table> boolean_mask_scatter(table_view const& input, table_view const& target, column_view const& boolean_mask, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_EXPECTS(input.num_columns() == target.num_columns(), "Mismatch in number of input columns and target columns"); CUDF_EXPECTS(boolean_mask.size() == target.num_rows(), "Boolean mask size and number of target rows mismatch"); CUDF_EXPECTS(boolean_mask.type().id() == type_id::BOOL8, "Mask must be of Boolean type"); // Count valid pair of input and columns as per type at each column index i CUDF_EXPECTS( std::all_of(thrust::counting_iterator<size_type>(0), thrust::counting_iterator<size_type>(target.num_columns()), [&input, &target](auto index) { return ((input.column(index).type().id()) == (target.column(index).type().id())); }), "Type mismatch in input column and target column"); if (target.num_rows() != 0) { std::vector<std::unique_ptr<column>> out_columns(target.num_columns()); std::transform( input.begin(), input.end(), target.begin(), out_columns.begin(), [&boolean_mask, mr, stream](auto const& input_column, auto const& target_column) { return boolean_mask_scatter(input_column, target_column, boolean_mask, mr, stream); }); return std::make_unique<table>(std::move(out_columns)); } else { return empty_like(target); } } std::unique_ptr<table> boolean_mask_scatter( std::vector<std::reference_wrapper<scalar>> const& input, table_view const& target, column_view const& boolean_mask, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_EXPECTS(static_cast<size_type>(input.size()) == target.num_columns(), "Mismatch in number of scalars and target columns"); CUDF_EXPECTS(boolean_mask.size() == target.num_rows(), "Boolean mask size and number of target rows mismatch"); CUDF_EXPECTS(boolean_mask.type().id() == type_id::BOOL8, "Mask must be of Boolean type"); // Count valid pair of input and columns as per type at each column/scalar index i CUDF_EXPECTS( std::all_of(thrust::counting_iterator<size_type>(0), thrust::counting_iterator<size_type>(target.num_columns()), [&input, &target](auto index) { return (input[index].get().type().id() == target.column(index).type().id()); }), "Type mismatch in input scalar and target column"); if (target.num_rows() != 0) { std::vector<std::unique_ptr<column>> out_columns(target.num_columns()); std::transform(input.begin(), input.end(), target.begin(), out_columns.begin(), [&boolean_mask, mr, stream](auto const& scalar, auto const& target_column) { return boolean_mask_scatter( scalar.get(), target_column, boolean_mask, mr, stream); }); return std::make_unique<table>(std::move(out_columns)); } else { return empty_like(target); } } } // namespace detail std::unique_ptr<table> scatter(table_view const& source, column_view const& scatter_map, table_view const& target, bool check_bounds, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::scatter(source, scatter_map, target, check_bounds, mr); } std::unique_ptr<table> scatter(std::vector<std::unique_ptr<scalar>> const& source, column_view const& indices, table_view const& target, bool check_bounds, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::scatter(source, indices, target, check_bounds, mr); } std::unique_ptr<table> boolean_mask_scatter(table_view const& input, table_view const& target, column_view const& boolean_mask, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::boolean_mask_scatter(input, target, boolean_mask, mr); } std::unique_ptr<table> boolean_mask_scatter( std::vector<std::reference_wrapper<scalar>> const& input, table_view const& target, column_view const& boolean_mask, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::boolean_mask_scatter(input, target, boolean_mask, mr); } } // namespace cudf
df043cf3d49b58c4f61266773bfadf6bad41ce46.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include <cstdlib> #include <stdlib.h> #include <stdio.h> #include <time.h> const int BLOCK = 256; __global__ void AddListK(float *I, float *O, int l) { int b = blockIdx.x; int t = threadIdx.x; __shared__ float pSum[BLOCK*2]; unsigned int start = 2*blockDim.x*b; (start+t < l) ? pSum[t] = I[start+t]: pSum[t] = 0.0;//First half (start+blockDim.x+t < l) ? pSum[t+blockDim.x] = I[start+blockDim.x+t] : pSum[t+blockDim.x] = 0.0;//Second half __syncthreads(); for(unsigned int s = blockDim.x; s > 0; s/=2){ __syncthreads(); (t < s) ? pSum[t] += pSum[t+s] : pSum[t]+= 0; } //printf("Sum =%f ", pSum[0]); O[b] = pSum[0]; } __host__ double addList(float *h_I, int h_l){ float *d_I, *d_O; int olen; olen = h_l / (BLOCK<<1); //The output length equals twice the total of the length divided by width if (olen % (BLOCK<<1)) { olen++; } float h_O[olen]; hipMalloc((void **) &d_I, sizeof(float)*h_l); hipMalloc((void **) &d_O, sizeof(float)*olen); hipMemcpy(d_I, h_I, sizeof(float)*h_l, hipMemcpyHostToDevice); dim3 dimGrid(olen, 1, 1); dim3 dimBlock(BLOCK, 1, 1); hipLaunchKernelGGL(( AddListK), dim3(dimGrid), dim3(dimBlock), 0, 0, d_I, d_O, h_l); hipMemcpy(h_O, d_O, sizeof(float)*olen, hipMemcpyDeviceToHost); hipFree(d_I);hipFree(d_O); double total = 0.0; for(int i = 0; i < olen; i ++){ total+=h_O[i]; } return total; } void populateArray(float a[], int l){ srand48(time(NULL)); float prev = drand48()*100; float nxt; for(int i = 1; i < l; i++){ do{ nxt = drand48()*100; }while(nxt==prev); a[i] = nxt; prev = nxt; } } int main(){ srand(time(NULL)); //int ilen = (rand() % 6553) * BLOCK; int ilen = 2000000; float I[ilen]; populateArray(I, ilen); printf("Input length %d", ilen); time_t gstart = time(NULL); double gtotal = 0.0; for(int i = 0; i < 1000; i ++){ gtotal = addList(I,ilen); } time_t gstop = time(NULL); time_t start = time(NULL); double total = 0.0; for(int i = 0; i < 1000; i ++){ total = 0.0; for(int i = 0; i < ilen; i ++){ total+=I[i]; } } time_t stop = time(NULL); printf("Average times\n GPU: %f CPU: %f", difftime(gstop, gstart), difftime(stop, start)); printf("TOTAL: %f == %f \n DIF: %f", total, gtotal, total-gtotal); return 0; }
df043cf3d49b58c4f61266773bfadf6bad41ce46.cu
#include <iostream> #include <cuda.h> #include <cstdlib> #include <stdlib.h> #include <stdio.h> #include <time.h> const int BLOCK = 256; __global__ void AddListK(float *I, float *O, int l) { int b = blockIdx.x; int t = threadIdx.x; __shared__ float pSum[BLOCK*2]; unsigned int start = 2*blockDim.x*b; (start+t < l) ? pSum[t] = I[start+t]: pSum[t] = 0.0;//First half (start+blockDim.x+t < l) ? pSum[t+blockDim.x] = I[start+blockDim.x+t] : pSum[t+blockDim.x] = 0.0;//Second half __syncthreads(); for(unsigned int s = blockDim.x; s > 0; s/=2){ __syncthreads(); (t < s) ? pSum[t] += pSum[t+s] : pSum[t]+= 0; } //printf("Sum =%f ", pSum[0]); O[b] = pSum[0]; } __host__ double addList(float *h_I, int h_l){ float *d_I, *d_O; int olen; olen = h_l / (BLOCK<<1); //The output length equals twice the total of the length divided by width if (olen % (BLOCK<<1)) { olen++; } float h_O[olen]; cudaMalloc((void **) &d_I, sizeof(float)*h_l); cudaMalloc((void **) &d_O, sizeof(float)*olen); cudaMemcpy(d_I, h_I, sizeof(float)*h_l, cudaMemcpyHostToDevice); dim3 dimGrid(olen, 1, 1); dim3 dimBlock(BLOCK, 1, 1); AddListK<<<dimGrid, dimBlock>>>(d_I, d_O, h_l); cudaMemcpy(h_O, d_O, sizeof(float)*olen, cudaMemcpyDeviceToHost); cudaFree(d_I);cudaFree(d_O); double total = 0.0; for(int i = 0; i < olen; i ++){ total+=h_O[i]; } return total; } void populateArray(float a[], int l){ srand48(time(NULL)); float prev = drand48()*100; float nxt; for(int i = 1; i < l; i++){ do{ nxt = drand48()*100; }while(nxt==prev); a[i] = nxt; prev = nxt; } } int main(){ srand(time(NULL)); //int ilen = (rand() % 6553) * BLOCK; int ilen = 2000000; float I[ilen]; populateArray(I, ilen); printf("Input length %d", ilen); time_t gstart = time(NULL); double gtotal = 0.0; for(int i = 0; i < 1000; i ++){ gtotal = addList(I,ilen); } time_t gstop = time(NULL); time_t start = time(NULL); double total = 0.0; for(int i = 0; i < 1000; i ++){ total = 0.0; for(int i = 0; i < ilen; i ++){ total+=I[i]; } } time_t stop = time(NULL); printf("Average times\n GPU: %f CPU: %f", difftime(gstop, gstart), difftime(stop, start)); printf("TOTAL: %f == %f \n DIF: %f", total, gtotal, total-gtotal); return 0; }
1aac8c11255d9e2ec85c6190784b21a427635ef6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file mul_veff_with_phase_factors.cu * * \brief CUDA kernel to multiply effective potential by the phase factors. */ #include "../SDDK/GPU/cuda_common.hpp" #include "../SDDK/GPU/acc.hpp" __global__ void mul_veff_with_phase_factors_gpu_kernel(int num_gvec_loc__, hipDoubleComplex const* veff__, int const* gvec__, int num_atoms__, double const* atom_pos__, hipDoubleComplex* veff_a__) { int ia = blockIdx.y; double ax = atom_pos__[array2D_offset(ia, 0, num_atoms__)]; double ay = atom_pos__[array2D_offset(ia, 1, num_atoms__)]; double az = atom_pos__[array2D_offset(ia, 2, num_atoms__)]; int igloc = blockDim.x * blockIdx.x + threadIdx.x; if (igloc < num_gvec_loc__) { int gvx = gvec__[array2D_offset(igloc, 0, num_gvec_loc__)]; int gvy = gvec__[array2D_offset(igloc, 1, num_gvec_loc__)]; int gvz = gvec__[array2D_offset(igloc, 2, num_gvec_loc__)]; double p = twopi * (ax * gvx + ay * gvy + az * gvz); //veff_a__[array2D_offset(igloc, ia, num_gvec_loc__)] = cuConj(cuCmul(veff__[igloc], make_cuDoubleComplex(cos(p), sin(p)))); veff_a__[array2D_offset(igloc, ia, num_gvec_loc__)] = cuCmul(veff__[igloc], make_cuDoubleComplex(cos(p), sin(p))); } } extern "C" void mul_veff_with_phase_factors_gpu(int num_atoms__, int num_gvec_loc__, hipDoubleComplex const* veff__, int const* gvec__, double const* atom_pos__, double* veff_a__, int stream_id__) { dim3 grid_t(64); dim3 grid_b(num_blocks(num_gvec_loc__, grid_t.x), num_atoms__); hipStream_t stream = acc::stream(stream_id(stream_id__)); hipLaunchKernelGGL(( mul_veff_with_phase_factors_gpu_kernel) , dim3(grid_b), dim3(grid_t), 0, stream, num_gvec_loc__, veff__, gvec__, num_atoms__, atom_pos__, (hipDoubleComplex*)veff_a__ ); }
1aac8c11255d9e2ec85c6190784b21a427635ef6.cu
// Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file mul_veff_with_phase_factors.cu * * \brief CUDA kernel to multiply effective potential by the phase factors. */ #include "../SDDK/GPU/cuda_common.hpp" #include "../SDDK/GPU/acc.hpp" __global__ void mul_veff_with_phase_factors_gpu_kernel(int num_gvec_loc__, cuDoubleComplex const* veff__, int const* gvec__, int num_atoms__, double const* atom_pos__, cuDoubleComplex* veff_a__) { int ia = blockIdx.y; double ax = atom_pos__[array2D_offset(ia, 0, num_atoms__)]; double ay = atom_pos__[array2D_offset(ia, 1, num_atoms__)]; double az = atom_pos__[array2D_offset(ia, 2, num_atoms__)]; int igloc = blockDim.x * blockIdx.x + threadIdx.x; if (igloc < num_gvec_loc__) { int gvx = gvec__[array2D_offset(igloc, 0, num_gvec_loc__)]; int gvy = gvec__[array2D_offset(igloc, 1, num_gvec_loc__)]; int gvz = gvec__[array2D_offset(igloc, 2, num_gvec_loc__)]; double p = twopi * (ax * gvx + ay * gvy + az * gvz); //veff_a__[array2D_offset(igloc, ia, num_gvec_loc__)] = cuConj(cuCmul(veff__[igloc], make_cuDoubleComplex(cos(p), sin(p)))); veff_a__[array2D_offset(igloc, ia, num_gvec_loc__)] = cuCmul(veff__[igloc], make_cuDoubleComplex(cos(p), sin(p))); } } extern "C" void mul_veff_with_phase_factors_gpu(int num_atoms__, int num_gvec_loc__, cuDoubleComplex const* veff__, int const* gvec__, double const* atom_pos__, double* veff_a__, int stream_id__) { dim3 grid_t(64); dim3 grid_b(num_blocks(num_gvec_loc__, grid_t.x), num_atoms__); cudaStream_t stream = acc::stream(stream_id(stream_id__)); mul_veff_with_phase_factors_gpu_kernel <<<grid_b, grid_t, 0, stream>>> ( num_gvec_loc__, veff__, gvec__, num_atoms__, atom_pos__, (cuDoubleComplex*)veff_a__ ); }
631005dea3ebf82e27614c4298b7fe110acfc0be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************************** GpuShareSat -- Copyright (c) 2020, Nicolas Prevot Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************************************/ #include "gpuShareLib/Helper.cuh" #include "../testUtils/TestHelper.cuh" #include "../gpu/CompositionRoot.h" #include "gpuShareLib/Clauses.cuh" #include <boost/test/unit_test.hpp> #include "gpuShareLib/GpuClauseSharer.h" #include "../simp/SimpSolver.h" using namespace Minisat; namespace GpuShare { void setDefaultOptions(GpuClauseSharerOptions &options) { options.gpuBlockCountGuideline = 3; options.gpuThreadsPerBlockGuideline = 32; options.minGpuLatencyMicros = 50; } GpuFixture::GpuFixture(GpuClauseSharerOptions &options, int varCount, int _solverCount) : gpuClauseSharer(options), logger {2, directPrint} { gpuClauseSharer.setVarCount(varCount); gpuClauseSharer.setCpuSolverCount(_solverCount); for (int s = 0; s < _solverCount; s++) { SimpSolver *solv = new SimpSolver(s, gpuClauseSharer, finisher, false, logger); solvers.push_back(solv); for (int i = 0; i < varCount; i++) { solv->newVar(); } solv->prepareForSearch(); } } GpuClauseSharerForTests::GpuClauseSharerForTests(GpuClauseSharerOptions opts): GpuClauseSharerImpl(opts, [&](const std::string &str) {std::cout << str;}) { } void execute(GpuClauseSharer &gpuClauseSharer) { // if we run execute just once, it will start the gpu run but won't // get the results back for (int i = 0; i < 2; i++) gpuClauseSharer.gpuRun(); } void GpuFixture::execute() { for (int i = 0; i < solvers.size(); i++) { solvers[i]->tryCopyTrailForGpu(); } GpuShare::execute(gpuClauseSharer); } CRef GpuFixture::executeAndImportClauses() { assert(solvers.size() == 1); std::vector<CRef> res; executeAndImportClauses(res); return res[0]; } void GpuFixture::executeAndImportClauses(std::vector<CRef> &res) { execute(); bool foundEmptyClause = false; res.clear(); for (int i = 0; i < solvers.size(); i++) { res.push_back(solvers[i]->gpuImportClauses(foundEmptyClause)); } } void GpuFixture::checkReportedImported(int count, int instance, bool unit) { BOOST_CHECK_EQUAL(gpuClauseSharer.getOneSolverStat(instance, reportedClauses), count); BOOST_CHECK_EQUAL(solvers[instance]->usedWhenImported, count); if (unit) { BOOST_CHECK_EQUAL(gpuClauseSharer.getOneSolverStat(instance, reportedClausesUnit), count); } } GpuFixture::~GpuFixture() { for (int i = 0; i < solvers.size(); i++) { delete solvers[i]; } } __global__ void globalUpdateClauses(DClauseUpdates clUpdates, DClauses dClauses) { updateClauses(clUpdates, dClauses); } // often, this method is called just to make the clause counts on the host clauses right void copyToDeviceAsync(HostClauses &hCls, hipStream_t &stream, GpuDims gpuDims) { Logger logger {2, directPrint}; ContigCopier cc(logger); copyToDeviceAsync(hCls, stream, cc, gpuDims); } void GpuFixture::addClause(const std::vector<Lit> &cl) { gpuClauseSharer.addClause(-1, (int*) &cl[0], cl.size()); } void copyToDeviceAsync(HostClauses &hCls, hipStream_t &stream, ContigCopier &cc, GpuDims gpuDims) { cc.clear(false); ClUpdateSet updates = hCls.getUpdatesForDevice(stream, cc); RunInfo runInfo = hCls.makeRunInfo(stream, cc); exitIfFalse(cc.tryCopyAsync(hipMemcpyHostToDevice, stream), POSITION); // TODO: take GpuDims here DClauses dClauses = runInfo.getDClauses(); hipLaunchKernelGGL(( globalUpdateClauses), dim3(gpuDims.blockCount), dim3(gpuDims.threadsPerBlock), 0, stream, updates.getDClauseUpdates(), dClauses); exitIfError(hipStreamSynchronize(stream), POSITION); } void addClause(HostClauses &hostClauses, const std::vector<Lit> &cl) { hostClauses.addClause(MinHArr<Lit>((size_t) cl.size(), (Lit*) &cl[0]), cl.size()); } }
631005dea3ebf82e27614c4298b7fe110acfc0be.cu
/*************************************************************************************** GpuShareSat -- Copyright (c) 2020, Nicolas Prevot Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************************************/ #include "gpuShareLib/Helper.cuh" #include "../testUtils/TestHelper.cuh" #include "../gpu/CompositionRoot.h" #include "gpuShareLib/Clauses.cuh" #include <boost/test/unit_test.hpp> #include "gpuShareLib/GpuClauseSharer.h" #include "../simp/SimpSolver.h" using namespace Minisat; namespace GpuShare { void setDefaultOptions(GpuClauseSharerOptions &options) { options.gpuBlockCountGuideline = 3; options.gpuThreadsPerBlockGuideline = 32; options.minGpuLatencyMicros = 50; } GpuFixture::GpuFixture(GpuClauseSharerOptions &options, int varCount, int _solverCount) : gpuClauseSharer(options), logger {2, directPrint} { gpuClauseSharer.setVarCount(varCount); gpuClauseSharer.setCpuSolverCount(_solverCount); for (int s = 0; s < _solverCount; s++) { SimpSolver *solv = new SimpSolver(s, gpuClauseSharer, finisher, false, logger); solvers.push_back(solv); for (int i = 0; i < varCount; i++) { solv->newVar(); } solv->prepareForSearch(); } } GpuClauseSharerForTests::GpuClauseSharerForTests(GpuClauseSharerOptions opts): GpuClauseSharerImpl(opts, [&](const std::string &str) {std::cout << str;}) { } void execute(GpuClauseSharer &gpuClauseSharer) { // if we run execute just once, it will start the gpu run but won't // get the results back for (int i = 0; i < 2; i++) gpuClauseSharer.gpuRun(); } void GpuFixture::execute() { for (int i = 0; i < solvers.size(); i++) { solvers[i]->tryCopyTrailForGpu(); } GpuShare::execute(gpuClauseSharer); } CRef GpuFixture::executeAndImportClauses() { assert(solvers.size() == 1); std::vector<CRef> res; executeAndImportClauses(res); return res[0]; } void GpuFixture::executeAndImportClauses(std::vector<CRef> &res) { execute(); bool foundEmptyClause = false; res.clear(); for (int i = 0; i < solvers.size(); i++) { res.push_back(solvers[i]->gpuImportClauses(foundEmptyClause)); } } void GpuFixture::checkReportedImported(int count, int instance, bool unit) { BOOST_CHECK_EQUAL(gpuClauseSharer.getOneSolverStat(instance, reportedClauses), count); BOOST_CHECK_EQUAL(solvers[instance]->usedWhenImported, count); if (unit) { BOOST_CHECK_EQUAL(gpuClauseSharer.getOneSolverStat(instance, reportedClausesUnit), count); } } GpuFixture::~GpuFixture() { for (int i = 0; i < solvers.size(); i++) { delete solvers[i]; } } __global__ void globalUpdateClauses(DClauseUpdates clUpdates, DClauses dClauses) { updateClauses(clUpdates, dClauses); } // often, this method is called just to make the clause counts on the host clauses right void copyToDeviceAsync(HostClauses &hCls, cudaStream_t &stream, GpuDims gpuDims) { Logger logger {2, directPrint}; ContigCopier cc(logger); copyToDeviceAsync(hCls, stream, cc, gpuDims); } void GpuFixture::addClause(const std::vector<Lit> &cl) { gpuClauseSharer.addClause(-1, (int*) &cl[0], cl.size()); } void copyToDeviceAsync(HostClauses &hCls, cudaStream_t &stream, ContigCopier &cc, GpuDims gpuDims) { cc.clear(false); ClUpdateSet updates = hCls.getUpdatesForDevice(stream, cc); RunInfo runInfo = hCls.makeRunInfo(stream, cc); exitIfFalse(cc.tryCopyAsync(cudaMemcpyHostToDevice, stream), POSITION); // TODO: take GpuDims here DClauses dClauses = runInfo.getDClauses(); globalUpdateClauses<<<gpuDims.blockCount, gpuDims.threadsPerBlock, 0, stream>>>(updates.getDClauseUpdates(), dClauses); exitIfError(cudaStreamSynchronize(stream), POSITION); } void addClause(HostClauses &hostClauses, const std::vector<Lit> &cl) { hostClauses.addClause(MinHArr<Lit>((size_t) cl.size(), (Lit*) &cl[0]), cl.size()); } }
285fce768eb491cb4e4793e33c51c480340e3a85.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // // Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ///////////////////////////////////////////////////////////////////////////////// #include "assert.h" #include "matx.h" #include "matx_pybind.h" #include "test_types.h" #include "utilities.h" #include "gtest/gtest.h" using namespace matx; constexpr int dim_size = 100; template <typename T> class EigenSolverTest : public ::testing::Test { protected: void SetUp() override { pb = std::make_unique<MatXPybind>(); pb->InitAndRunTVGenerator<T>("00_solver", "eig", "run", {dim_size}); pb->NumpyToTensorView(Bv, "B"); } void TearDown() { pb.reset(); } std::unique_ptr<MatXPybind> pb; tensor_t<T, 2> Bv{{dim_size, dim_size}}; tensor_t<T, 2> Btv{{dim_size, dim_size}}; tensor_t<T, 2> Evv{{dim_size, dim_size}}; tensor_t<T, 2> Wv{{dim_size, 1}}; tensor_t<T, 1> Wov{{dim_size}}; tensor_t<T, 2> Gtv{{dim_size, 1}}; tensor_t<T, 2> Lvv{{dim_size, 1}}; }; template <typename TensorType> class EigenSolverTestNonComplexFloatTypes : public EigenSolverTest<TensorType> { }; TYPED_TEST_SUITE(EigenSolverTestNonComplexFloatTypes, MatXFloatNonComplexNonHalfTypes); TYPED_TEST(EigenSolverTestNonComplexFloatTypes, EigenBasic) { MATX_ENTER_HANDLER(); eig(this->Evv, this->Wov, this->Bv); // Now we need to go through all the eigenvectors and eigenvalues and make // sure the results match the equation A*v = lambda*v, where v are the // eigenvectors corresponding to the eigenvalue lambda. for (index_t i = 0; i < dim_size; i++) { auto v = this->Evv.template Slice<2>({0, i}, {matxEnd, i + 1}); copy(this->Wv, v, 0); // Compute lambda*v (this->Lvv = v * this->Wov(i)).run(); // Compute A*v matmul(this->Gtv, this->Bv, this->Wv); hipStreamSynchronize(0); // Compare for (index_t j = 0; j < dim_size; j++) { ASSERT_NEAR(this->Gtv(j, 0), this->Lvv(j, 0), 0.001); } } MATX_EXIT_HANDLER(); }
285fce768eb491cb4e4793e33c51c480340e3a85.cu
//////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // // Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ///////////////////////////////////////////////////////////////////////////////// #include "assert.h" #include "matx.h" #include "matx_pybind.h" #include "test_types.h" #include "utilities.h" #include "gtest/gtest.h" using namespace matx; constexpr int dim_size = 100; template <typename T> class EigenSolverTest : public ::testing::Test { protected: void SetUp() override { pb = std::make_unique<MatXPybind>(); pb->InitAndRunTVGenerator<T>("00_solver", "eig", "run", {dim_size}); pb->NumpyToTensorView(Bv, "B"); } void TearDown() { pb.reset(); } std::unique_ptr<MatXPybind> pb; tensor_t<T, 2> Bv{{dim_size, dim_size}}; tensor_t<T, 2> Btv{{dim_size, dim_size}}; tensor_t<T, 2> Evv{{dim_size, dim_size}}; tensor_t<T, 2> Wv{{dim_size, 1}}; tensor_t<T, 1> Wov{{dim_size}}; tensor_t<T, 2> Gtv{{dim_size, 1}}; tensor_t<T, 2> Lvv{{dim_size, 1}}; }; template <typename TensorType> class EigenSolverTestNonComplexFloatTypes : public EigenSolverTest<TensorType> { }; TYPED_TEST_SUITE(EigenSolverTestNonComplexFloatTypes, MatXFloatNonComplexNonHalfTypes); TYPED_TEST(EigenSolverTestNonComplexFloatTypes, EigenBasic) { MATX_ENTER_HANDLER(); eig(this->Evv, this->Wov, this->Bv); // Now we need to go through all the eigenvectors and eigenvalues and make // sure the results match the equation A*v = lambda*v, where v are the // eigenvectors corresponding to the eigenvalue lambda. for (index_t i = 0; i < dim_size; i++) { auto v = this->Evv.template Slice<2>({0, i}, {matxEnd, i + 1}); copy(this->Wv, v, 0); // Compute lambda*v (this->Lvv = v * this->Wov(i)).run(); // Compute A*v matmul(this->Gtv, this->Bv, this->Wv); cudaStreamSynchronize(0); // Compare for (index_t j = 0; j < dim_size; j++) { ASSERT_NEAR(this->Gtv(j, 0), this->Lvv(j, 0), 0.001); } } MATX_EXIT_HANDLER(); }
7132c8f40b45dc3ae3ecba01b3e70a22199a6710.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) // output: idx (b,m,nsample), pts_cnt (b,m) __global__ void query_ball_point_gpu(int b, int n, int m, int nsample, const float *radius, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { int batch_index = blockIdx.x; xyz1 += n*3*batch_index; xyz2 += m*3*batch_index; idx += m*nsample*batch_index; pts_cnt += m*batch_index; // counting how many unique points selected in local region int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { int cnt = 0; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball float x2=xyz2[j*3+0]; float y2=xyz2[j*3+1]; float z2=xyz2[j*3+2]; float x1=xyz1[k*3+0]; float y1=xyz1[k*3+1]; float z1=xyz1[k*3+2]; float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f); if (d<radius[batch_index]) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) idx[j*nsample+l] = k; } idx[j*nsample+cnt] = k; cnt+=1; } } pts_cnt[j] = cnt; } } // input: points (b,n,c), idx (b,m,nsample) // output: out (b,m,nsample,c) __global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) { int batch_index = blockIdx.x; points += n*c*batch_index; idx += m*nsample*batch_index; out += m*nsample*c*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { for (int k=0;k<nsample;++k) { int ii = idx[j*nsample+k]; for (int l=0;l<c;++l) { out[j*nsample*c+k*c+l] = points[ii*c+l]; } } } } // input: grad_out (b,m,nsample,c), idx (b,m,nsample), // output: grad_points (b,n,c) __global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) { int batch_index = blockIdx.x; idx += m*nsample*batch_index; grad_out += m*nsample*c*batch_index; grad_points += n*c*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { for (int k=0;k<nsample;++k) { int ii = idx[j*nsample+k]; for (int l=0;l<c;++l) { atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]); } } } } // input: k (1), distance matrix dist (b,m,n) // output: idx (b,m,n), dist_out (b,m,n) // only the top k results within n are useful __global__ void selection_sort_gpu(int b, int n, int m, int k, const float *dist, int *outi, float *out) { int batch_index = blockIdx.x; dist+=m*n*batch_index; outi+=m*n*batch_index; out+=m*n*batch_index; int index = threadIdx.x; int stride = blockDim.x; // copy from dist to dist_out for (int j=index;j<m;j+=stride) { for (int s=0;s<n;++s) { out[j*n+s] = dist[j*n+s]; outi[j*n+s] = s; } } float *p_dist; for (int j=index;j<m;j+=stride) { p_dist = out+j*n; // selection sort for the first k elements for (int s=0;s<k;++s) { int min=s; // find the min for (int t=s+1;t<n;++t) { if (p_dist[t]<p_dist[min]) { min = t; } } // swap min-th and i-th element if (min!=s) { float tmp = p_dist[min]; p_dist[min] = p_dist[s]; p_dist[s] = tmp; int tmpi = outi[j*n+min]; outi[j*n+min] = outi[j*n+s]; outi[j*n+s] = tmpi; } } } } void queryBallPointLauncher(int b, int n, int m, int nsample, const float *radius, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { hipLaunchKernelGGL(( query_ball_point_gpu), dim3(b),dim3(256), 0, 0, b,n,m,nsample,radius,xyz1,xyz2,idx,pts_cnt); //hipDeviceSynchronize(); } void selectionSortLauncher(int b, int n, int m, int k, const float *dist, int *outi, float *out) { hipLaunchKernelGGL(( selection_sort_gpu), dim3(b),dim3(256), 0, 0, b,n,m,k,dist,outi,out); //hipDeviceSynchronize(); } void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){ hipLaunchKernelGGL(( group_point_gpu), dim3(b),dim3(256), 0, 0, b,n,c,m,nsample,points,idx,out); //hipDeviceSynchronize(); } void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points){ hipLaunchKernelGGL(( group_point_grad_gpu), dim3(b),dim3(256), 0, 0, b,n,c,m,nsample,grad_out,idx,grad_points); //group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points); //hipDeviceSynchronize(); }
7132c8f40b45dc3ae3ecba01b3e70a22199a6710.cu
// input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) // output: idx (b,m,nsample), pts_cnt (b,m) __global__ void query_ball_point_gpu(int b, int n, int m, int nsample, const float *radius, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { int batch_index = blockIdx.x; xyz1 += n*3*batch_index; xyz2 += m*3*batch_index; idx += m*nsample*batch_index; pts_cnt += m*batch_index; // counting how many unique points selected in local region int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { int cnt = 0; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball float x2=xyz2[j*3+0]; float y2=xyz2[j*3+1]; float z2=xyz2[j*3+2]; float x1=xyz1[k*3+0]; float y1=xyz1[k*3+1]; float z1=xyz1[k*3+2]; float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f); if (d<radius[batch_index]) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) idx[j*nsample+l] = k; } idx[j*nsample+cnt] = k; cnt+=1; } } pts_cnt[j] = cnt; } } // input: points (b,n,c), idx (b,m,nsample) // output: out (b,m,nsample,c) __global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) { int batch_index = blockIdx.x; points += n*c*batch_index; idx += m*nsample*batch_index; out += m*nsample*c*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { for (int k=0;k<nsample;++k) { int ii = idx[j*nsample+k]; for (int l=0;l<c;++l) { out[j*nsample*c+k*c+l] = points[ii*c+l]; } } } } // input: grad_out (b,m,nsample,c), idx (b,m,nsample), // output: grad_points (b,n,c) __global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) { int batch_index = blockIdx.x; idx += m*nsample*batch_index; grad_out += m*nsample*c*batch_index; grad_points += n*c*batch_index; int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { for (int k=0;k<nsample;++k) { int ii = idx[j*nsample+k]; for (int l=0;l<c;++l) { atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]); } } } } // input: k (1), distance matrix dist (b,m,n) // output: idx (b,m,n), dist_out (b,m,n) // only the top k results within n are useful __global__ void selection_sort_gpu(int b, int n, int m, int k, const float *dist, int *outi, float *out) { int batch_index = blockIdx.x; dist+=m*n*batch_index; outi+=m*n*batch_index; out+=m*n*batch_index; int index = threadIdx.x; int stride = blockDim.x; // copy from dist to dist_out for (int j=index;j<m;j+=stride) { for (int s=0;s<n;++s) { out[j*n+s] = dist[j*n+s]; outi[j*n+s] = s; } } float *p_dist; for (int j=index;j<m;j+=stride) { p_dist = out+j*n; // selection sort for the first k elements for (int s=0;s<k;++s) { int min=s; // find the min for (int t=s+1;t<n;++t) { if (p_dist[t]<p_dist[min]) { min = t; } } // swap min-th and i-th element if (min!=s) { float tmp = p_dist[min]; p_dist[min] = p_dist[s]; p_dist[s] = tmp; int tmpi = outi[j*n+min]; outi[j*n+min] = outi[j*n+s]; outi[j*n+s] = tmpi; } } } } void queryBallPointLauncher(int b, int n, int m, int nsample, const float *radius, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { query_ball_point_gpu<<<b,256>>>(b,n,m,nsample,radius,xyz1,xyz2,idx,pts_cnt); //cudaDeviceSynchronize(); } void selectionSortLauncher(int b, int n, int m, int k, const float *dist, int *outi, float *out) { selection_sort_gpu<<<b,256>>>(b,n,m,k,dist,outi,out); //cudaDeviceSynchronize(); } void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){ group_point_gpu<<<b,256>>>(b,n,c,m,nsample,points,idx,out); //cudaDeviceSynchronize(); } void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points){ group_point_grad_gpu<<<b,256>>>(b,n,c,m,nsample,grad_out,idx,grad_points); //group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points); //cudaDeviceSynchronize(); }
ae570156f37706a7ec3e4133ffb59c087e8e90b5.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> #include <cusparse_v2.h> #include "rocblas.h" #include <hiprand/hiprand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ //KC_FP_TYPE can be assumed to mean "double", but originally //this definition could also work with "float" for faster speed. //float compatability is no longer supported in this function. #include "kcArrayFunctions.h" #define MAX_P 1e25 #define MIN_P 1e-25 __device__ KC_FP_TYPE positiveBound(KC_FP_TYPE a) { //return a; if(isinf(a)) return MAX_P; else return fmin(fmax(a,MIN_P),MAX_P); } __device__ KC_FP_TYPE h(KC_FP_TYPE z, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE sh, KC_FP_TYPE bias, KC_FP_TYPE log_power) { KC_FP_TYPE logex = ((gamma*z)>100)?(gamma*z):KC_MIN(log1p(exp(z*gamma)),KC_MAXN); return KC_MIN((KC_POW(logex*1.0000000,log_power)+bias)*KC_EXP(sh)*dt,MAX_P); } //one thread per particle <<< nTrials,nParticles >>> __global__ void kcMoveParticles(KC_FP_TYPE * y, KC_FP_TYPE * spe, KC_FP_TYPE * pos, KC_FP_TYPE * wt, KC_FP_TYPE * b, int * betaIdxVector, KC_FP_TYPE l_0, KC_FP_TYPE * g, KC_FP_TYPE w, KC_FP_TYPE dt, KC_FP_TYPE * randN, KC_FP_TYPE sigMult, KC_FP_TYPE * log_li, KC_FP_TYPE * lw, KC_FP_TYPE * lw2, KC_FP_TYPE * ncdf, KC_FP_TYPE * posc, int * trIdx, int NT, int TT, int numParticles, int t, int numNeur, KC_FP_TYPE * bias, KC_FP_TYPE log_power) { int threadNum = blockIdx.x*blockDim.x + threadIdx.x; int tr_num = (int)threadNum / (int)numParticles; int p_num = threadNum % numParticles; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; if(t < trLength) { int row = trIdx[tr_num] + t; int idx = TT*p_num + row; int pidx = tr_num*numParticles+p_num; KC_FP_TYPE cb = b[betaIdxVector[row]]; KC_FP_TYPE sw = sqrt(w); KC_FP_TYPE mup = (t==0)?(l_0):(pos[idx-1]+cb); KC_FP_TYPE mu = mup; KC_FP_TYPE sig2 = sigMult*w; KC_FP_TYPE sig = sqrt(sig2); KC_FP_TYPE maxI = fmin(1.0-1e-20, fmax( normcdf((1.0-mu)/sig),1e-20 )); pos[idx] = fmin(1.0-1e-20, normcdfinv(maxI*randN[pidx])*sig + mu); posc[pidx] = pos[idx]; KC_FP_TYPE dpos = pos[idx]-mu; KC_FP_TYPE log_pi_k = -log(maxI)-0.5*log(2.0*M_PI*sig2) - 0.5/sig2*(dpos*dpos); //to be stored for each particle: ncdf, lw, lw2 ncdf[idx] = normcdf((1-mup)/sw); KC_FP_TYPE dposp = pos[idx]-mup; KC_FP_TYPE log_p = -0*log(maxI) -0.5*log(2*M_PI*w)- 0.5/w*(dposp*dposp); log_li[pidx] = 0; for(int nn = 0; nn < numNeur; nn++) { log_li[pidx] += -h(pos[idx],g[nn],dt,spe[row+TT*nn],bias[nn],log_power)+y[row+TT*nn]*(log(fmax(h(pos[idx],g[nn],1.0,spe[row+TT*nn],bias[nn],log_power),1e-30))+log(dt))-lgamma(y[row+TT*nn]+1); } KC_FP_TYPE pw = (t==0)?(log(1/(KC_FP_TYPE)numParticles) ):( log(fmax(wt[idx-1], 1e-30)) ); lw[pidx] = exp(pw+log_p+log_li[pidx]-log_pi_k); lw2[pidx] = exp(pw+log_p -log_pi_k); //safety checks for numerical errors if(isnan(lw[pidx]) || isinf(lw[pidx]) || isnan(pos[idx]) || isinf(pos[idx]) || isnan(lw2[pidx]) || isinf(lw2[pidx])) { lw[pidx] = 0; lw2[pidx] = 0; pos[idx] = mup; posc[pidx] = mup; } } } } //one thread per trial <<< nTrials,1 >>> __global__ void kcNormalizeWeights(KC_FP_TYPE * y, KC_FP_TYPE * wt, KC_FP_TYPE * wt_p, KC_FP_TYPE * lw, KC_FP_TYPE * lw2, KC_FP_TYPE * nEff, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) { int tr_num = blockIdx.x*blockDim.x + threadIdx.x; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; if(t < trLength) { int row = trIdx[tr_num] + t; //sum up and normalize weights KC_FP_TYPE weightSum = 0; KC_FP_TYPE weightSum2 = 0; for(int p_num = 0; p_num < numParticles; p_num++) { int pidx = tr_num*numParticles+p_num; weightSum += lw[pidx]; weightSum2 += lw2[pidx]; } KC_FP_TYPE n_eff_den = 0; weightSum = fmax(weightSum,1e-20); weightSum2 = fmax(weightSum2,1e-20); for(int p_num = 0; p_num < numParticles; p_num++) { int idx = TT*p_num + row; int pidx = tr_num*numParticles+p_num; wt[idx] = lw[pidx] /weightSum; wt_p[pidx] = lw2[pidx]/weightSum2; n_eff_den += wt[idx]*wt[idx]; cumsum[pidx] = (p_num>0)?(cumsum[pidx-1]+wt[idx]):(wt[idx]);//for resampling } nEff[tr_num] = 1/n_eff_den; } } } //initial calculation - probability of each set of spike counts coming from a rate at the bound __global__ void kcSetupLG(KC_FP_TYPE * y,KC_FP_TYPE * spe,KC_FP_TYPE * lg,KC_FP_TYPE * g, KC_FP_TYPE dt,int TT, int numNeur, KC_FP_TYPE * bias, KC_FP_TYPE log_power) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx < TT) { KC_FP_TYPE log_sum = 0; for(int nn = 0; nn < numNeur; nn++) { log_sum += -h(1,g[nn], dt,spe[idx+TT*nn],bias[nn],log_power) + y[idx+TT*nn]*log(fmax(h(1,g[nn],dt,spe[idx+TT*nn],bias[nn],log_power),1e-30)) - lgamma(y[idx+TT*nn]+1); } lg[idx] = exp(log_sum); } } //one thread per particle <<< nTrials,nParticles >>> // if particles look bad, resamples them from the distribution before the next step __global__ void kcResampleParticles(KC_FP_TYPE * y, KC_FP_TYPE * pos, KC_FP_TYPE * posc, KC_FP_TYPE * wt, KC_FP_TYPE * log_li, KC_FP_TYPE * wt_p, int minEffParticles, KC_FP_TYPE * cumsum, KC_FP_TYPE * nEff, KC_FP_TYPE * randU, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * ncdf, int * trIdx, int NT, int TT, int numParticles, int t) { int threadNum = blockIdx.x*blockDim.x + threadIdx.x; int tr_num = (int)threadNum / (int)numParticles; int p_num = threadNum % numParticles; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; if(t < trLength) { int pidx = tr_num*numParticles+p_num; int row = trIdx[tr_num] + t; int idx = TT*p_num + row; int pidx_new = pidx; if(nEff[tr_num] < minEffParticles) { int p_num_new; for(p_num_new = 0; p_num_new < numParticles-1 && randU[pidx] > cumsum[numParticles*tr_num+p_num_new]; p_num_new++) { //everything taken care of in loop statement } pidx_new = tr_num*numParticles+p_num_new; wt[idx] = 1.0/(KC_FP_TYPE)numParticles; //weights are now uniform again pos[idx] = posc[pidx_new]; } KC_FP_TYPE wt_old = (t==0)?(1.0/(KC_FP_TYPE)numParticles):(wt[idx-1]); p_cet_0[pidx] = (1.0-ncdf[idx])*wt_old; p_cgt_0a[pidx] = exp(log_li[pidx])*wt_p[pidx]; //or pidx_new? p_cgt_0b[pidx] = ncdf[idx]*wt_old; } } } //one thread per trial <<< nTrials,1 >>> //move bound crossing probabilities forward in time __global__ void kcPropogateBoundaryDensity(KC_FP_TYPE * y, KC_FP_TYPE * p_clt, KC_FP_TYPE * p_cet, KC_FP_TYPE * p_cgt, KC_FP_TYPE * p_clte, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * lg, KC_FP_TYPE * nEff, int minEffParticles, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) { int tr_num = blockIdx.x*blockDim.x + threadIdx.x; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; if(t < trLength) { int row = trIdx[tr_num] + t; KC_FP_TYPE p_cet_s = 0; KC_FP_TYPE p_cgt_sa = 0; KC_FP_TYPE p_cgt_sb = 0; for(int p_num = 0; p_num < numParticles; p_num++) { int pidx = tr_num*numParticles+p_num; //int idx = TT*p_num + row; p_cet_s += p_cet_0[pidx]; p_cgt_sa += p_cgt_0a[pidx]; p_cgt_sb += p_cgt_0b[pidx]; //finished a bit of the resampler that must run post-sampling for parallelization not to screw up, this will only be used again if this is last timestep in trial if(nEff[tr_num] < minEffParticles && t-1==trLength) { cumsum[pidx] = 1/(KC_FP_TYPE)numParticles*(1+p_num); } } KC_FP_TYPE p_clte_old = ((t==0)?(0):(p_clte[row-1])); KC_FP_TYPE p_cgt_old = ((t==0)?(1):(p_cgt[row-1])); KC_FP_TYPE p_clt_1 = lg[row]*p_clte_old; KC_FP_TYPE p_cet_1 = lg[row]*(1.0-p_clte_old)*p_cet_s; KC_FP_TYPE p_cgt_1 = (1.0-p_clte_old)*p_cgt_sa*p_cgt_sb; p_cet[row] = p_cet_1/(p_clt_1+p_cet_1+p_cgt_1); p_clte[row] = (p_cet_1+p_clt_1)/(p_clt_1+p_cet_1+p_cgt_1); //this is a little redudant, but I think it is convenient later? p_clt[row] = p_clt_1/(p_clt_1+p_cet_1+p_cgt_1); p_cgt[row] = p_cgt_1/(p_clt_1+p_cet_1+p_cgt_1); p_cpr[row] = p_cgt_old*p_cet_s; //compare this index in MATLAB code } } } //Finally do that backwards sampling, <<< NT, 1 >>> __global__ void kcBackwardsSample(KC_FP_TYPE * sample, int * crossingTimes, KC_FP_TYPE * pos, KC_FP_TYPE * wt, KC_FP_TYPE * ncdf, KC_FP_TYPE * b, int * betaIdx, KC_FP_TYPE l_0, KC_FP_TYPE w, KC_FP_TYPE * g, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_clte, KC_FP_TYPE * randUp, KC_FP_TYPE * randUb, KC_FP_TYPE * wt_p, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) { int tr_num = blockIdx.x*blockDim.x + threadIdx.x; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; int row = trIdx[tr_num] + t; if(t == trLength-1) { //if t=end of trial, start off the backwards sampling crossingTimes[tr_num] = trLength; //decide whether end trial has hit boundary if(randUb[tr_num] < p_clte[row]) { sample[row] = 1; crossingTimes[tr_num] = t; } //else select a particle to be end of trial (cumsum holds the CDF of the distribution over particles) else { int p_num; for(p_num = 0; p_num < numParticles-1 && randUp[tr_num] > cumsum[numParticles*tr_num+p_num]; p_num++) { } int idx = TT*p_num + row; sample[row] = pos[idx]; } } else if(t < trLength-1 && t >= 0) { //else, propgate backwards //if previous sample had hit threshold if(sample[row+1] >= 1) { //if boundary already reached if(randUb[tr_num] < p_clte[row]/(p_cpr[row+1] + p_clte[row])) { crossingTimes[tr_num] = t; sample[row] = 1; } //gets pre-crossing particle else { KC_FP_TYPE wtSum = 0; int p_num; for(p_num = 0; p_num < numParticles; p_num++) { int idx = TT*p_num + row; int pidx = tr_num*numParticles+p_num; wt_p[pidx] = wt[idx]*fmax(1.0-ncdf[idx+1],1e-25); wtSum += wt_p[pidx]; } wtSum = fmax(wtSum,1e-30); KC_FP_TYPE csum = wt_p[tr_num*numParticles+0]/wtSum; for(p_num = 0; p_num < numParticles-1 && csum < randUp[tr_num]; p_num++) { int pidx = tr_num*numParticles+p_num+1; csum += wt_p[pidx]/wtSum; } int idx = TT*p_num + row; sample[row] = pos[idx]; } } //else, samples a particle else { KC_FP_TYPE wtSum = 0; int p_num; for(p_num = 0; p_num < numParticles; p_num++) { int idx = TT*p_num + row; int pidx = tr_num*numParticles+p_num; wt_p[pidx] = wt[idx]*exp(-0.5/w*pow( sample[row+1] - (pos[idx] + b[betaIdx[row]]),2 )); wtSum += wt_p[pidx]; } wtSum = fmax(wtSum,1e-30); KC_FP_TYPE csum = wt_p[tr_num*numParticles+0]/wtSum; for(p_num = 0; p_num < numParticles-1 && csum < randUp[tr_num]; p_num++) { int pidx = tr_num*numParticles+p_num+1; csum += wt_p[pidx]/wtSum; } int idx = TT*p_num + row; sample[row] = pos[idx]; } } } } /* Performs a forward sweep of the path after backwards sampling Draws from prior for steps post-threshold crossing (for conjugate sampling of parameters) Calculates som statistics for later sampling trial number given by CUDA thread */ __global__ void kcForwardFinalPass( KC_FP_TYPE* lambda, const int * crossingTimes, const KC_FP_TYPE * randUni, const KC_FP_TYPE* b, const int * betaIndVec,const KC_FP_TYPE l_0, const KC_FP_TYPE w, const int* trIdx,const int NT, KC_FP_TYPE * beta_sum) { int tr_num = blockIdx.x*blockDim.x+threadIdx.x; if(tr_num < NT) { int t_0 = trIdx[tr_num]; beta_sum[tr_num] = 0; int trLength = trIdx[tr_num+1] - trIdx[tr_num]; KC_FP_TYPE cb = b[betaIndVec[t_0]]; for(int t = 0; t < trLength; t++) { if(t == crossingTimes[tr_num]) { //samples the first value of lambda to cross the bound (truncated normal, > 1) KC_FP_TYPE mu = (t > 0)?(lambda[t_0 + t-1]+cb):l_0; KC_FP_TYPE minS = normcdf((1-mu)/sqrt(w)); if(minS >= 1.0-1e-5) { lambda[t_0 + t] = 1; } else { lambda[t_0 + t] = mu+sqrt(w)*normcdfinv( minS + (1-minS)*randUni[t_0+t]); } } else if(t > crossingTimes[tr_num]) { lambda[t_0 + t] = lambda[t_0 + t - 1] + cb + KC_SQRT(w)*normcdfinv( randUni[t_0+t]); } beta_sum[tr_num] += (t>0 && t <= crossingTimes[tr_num])?(lambda[t_0 + t] - lambda[t_0 + t-1]):0; //only include lambdas up until first threshold crossing to look at drift rates } } } //single thread kernel to assemble stats of the ramps across trials for sampling beta,l_0 __global__ void kcAssembleSamplingStatistics(KC_FP_TYPE * sigMat, KC_FP_TYPE * muVec, const KC_FP_TYPE* lambda, const int * crossingTimes, const KC_FP_TYPE * beta_sum,const int*betaIndVec,const KC_FP_TYPE l_0, const KC_FP_TYPE w, const int* trIdx, const int NT, const int numBetas) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx == 0) { for(int trNum = 0; trNum < NT; trNum++) { int t_0 = trIdx[trNum]; int cb = betaIndVec[t_0]; int trLength = trIdx[trNum+1] - trIdx[trNum]; sigMat[(cb)*(numBetas+1) + cb] += fmin(1.0*crossingTimes[trNum],trLength-1.0)/w; sigMat[(numBetas)*(numBetas+1) + numBetas] += 1.0/w; muVec[cb] += beta_sum[trNum]/w; muVec[numBetas] += lambda[t_0]/w; } } } //Samples a single set of latent paths from the ramping model for a set of trials given fixed parameters //args // 0 = new lambda (output, should be pre-allocated on GPU, same size as y) // 1 = new auxiliary variable for threshold crossing (output, should be pre-allocated on GPU, vector of length number of trials) // 2 = y (observations) // 3 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y) // 4 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB) // 5 = betas (the beta values) // 6 = w (variance of diffusion process) // 7 = l_0 (starting lambda value) // 8 = g (absorbing boundary effective height) -> numNeur x 1 // 9 = dt (bin/timestep size) // 10 = numParticles // 11 = minEffParticles (how many effective particles per trial to keep around) // 12 = sigMult (used for particle proposals, proposal variance is sigMult*w) // 13 = maxTrialLength // 14 = beta/l_0 sampling vec param c (uses this as output for sampling betas, l_0) // 15 = beta/l_0 sampling vec param p uses this as output for sampling betas, l_0) // 16 = spike history effect // 17 = numNeurons // 18 = biases // 19 = power void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { hipError_t ce; hiprandStatus_t cre; /*ce = hipSetDevice(KC_GPU_DEVICE); if(ce != hipSuccess) { mexPrintf("Error initializing device (kcParticleFilterProp.cu) "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); }*/ //init data unsigned int TY = kcGetArrayNumEl(prhs[2]); // TY is total length of y unsigned int TT = kcGetArrayNumEl(prhs[0]); KC_FP_TYPE * lambdaTarget = kcGetArrayData(prhs[0]); int * auxiliaryTarget = kcGetArrayDataInt(prhs[1]); KC_FP_TYPE * y = kcGetArrayData(prhs[2],TY); int numNeur = mxGetScalar(prhs[17]); int * trIdx = kcGetArrayDataInt(prhs[3]); unsigned int NT = kcGetArrayNumEl(prhs[3])-1; int * betaIdxVector = kcGetArrayDataInt(prhs[4]); KC_FP_TYPE * b = mxGetPr(prhs[5]); int numBetas = mxGetNumberOfElements(prhs[5]); KC_FP_TYPE * b_gpu; ce = hipMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas); if(ce != hipSuccess) { mexPrintf("Error allocating space for betas on GPU - first allocation in function (particle filter) "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } ce = hipMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,hipMemcpyHostToDevice); if(ce != hipSuccess) { mexPrintf("Error moving betas to GPU (particle filter) "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } KC_FP_TYPE w = mxGetScalar(prhs[6]); KC_FP_TYPE l_0 = mxGetScalar(prhs[7]); KC_FP_TYPE * g; checkCudaErrors(hipMalloc((void**)&g,sizeof(KC_FP_TYPE)*numNeur)); // number of gammas is numNeurons checkCudaErrors(hipMemcpy(g,(KC_FP_TYPE*)mxGetPr(prhs[8]),sizeof(KC_FP_TYPE)*numNeur,hipMemcpyHostToDevice)); KC_FP_TYPE dt = mxGetScalar(prhs[9]); int numParticles = mxGetScalar(prhs[10]); int minEffParticles = mxGetScalar(prhs[11]); int sigMult = mxGetScalar(prhs[12]); int maxTrialLength = mxGetScalar(prhs[13]); KC_FP_TYPE log_power = mxGetScalar(prhs[19]); //load spike history effect KC_FP_TYPE * spe = kcGetArrayData(prhs[16],TY); KC_FP_TYPE * bias; checkCudaErrors(hipMalloc((void**)&bias,sizeof(KC_FP_TYPE)*numNeur)); // number of biases is numNeurons checkCudaErrors(hipMemcpy(bias,(KC_FP_TYPE*)mxGetPr(prhs[18]),sizeof(KC_FP_TYPE)*numNeur,hipMemcpyHostToDevice)); //particle weights/probabilities of hitting the bound KC_FP_TYPE * p_clte; KC_FP_TYPE * p_cet; KC_FP_TYPE * p_cgt; KC_FP_TYPE * p_clt; KC_FP_TYPE * p_cpr; checkCudaErrors(hipMalloc((void**)&p_clte, TT*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&p_cet, TT*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&p_cgt, TT*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&p_clt, TT*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&p_cpr, TT*sizeof(KC_FP_TYPE))); KC_FP_TYPE * wt; KC_FP_TYPE * wt_p; KC_FP_TYPE * pos;//particle positions checkCudaErrors(hipMalloc((void**)&wt, (TT)*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&wt_p, (NT)*numParticles*sizeof(KC_FP_TYPE))); ce = hipMalloc((void**)&pos, (TT)*numParticles*sizeof(KC_FP_TYPE)); if(ce != hipSuccess) { mexPrintf("Error allocating pos "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } KC_FP_TYPE * log_li; KC_FP_TYPE * posc; //for resampling KC_FP_TYPE * lw; //unnormalized weights KC_FP_TYPE * lw2; KC_FP_TYPE * ncdf; KC_FP_TYPE * p_cet_0; KC_FP_TYPE * p_cgt_0a; KC_FP_TYPE * p_cgt_0b; KC_FP_TYPE * lg; //log p(y|at boundary) KC_FP_TYPE * cumsum; KC_FP_TYPE * beta_sum; checkCudaErrors(hipMalloc((void**)&log_li, NT*numParticles*sizeof(KC_FP_TYPE))); //checkCudaErrors(hipMalloc((void**)&log_lic, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&posc, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&lw, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&lw2, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&ncdf, TT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&p_cet_0, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&p_cgt_0a, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&p_cgt_0b, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&cumsum, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&beta_sum, NT*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&lg, TT*sizeof(KC_FP_TYPE))); KC_FP_TYPE * nEff; checkCudaErrors(hipMalloc((void**)&nEff, NT*sizeof(KC_FP_TYPE))); int randSize = (NT*numParticles) + ((NT*numParticles)%2==0?0:1); int randSizeS = (NT) + (NT%2==0?0:1); int randSizeT = (TT) + (TT%2==0?0:1); KC_FP_TYPE * randN; KC_FP_TYPE * randNs; KC_FP_TYPE * randTs; ce = hipMalloc((void**)&randN, randSize *sizeof(KC_FP_TYPE)); if(ce != hipSuccess) { mexPrintf("Error allocating randN "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } ce = hipMalloc((void**)&randNs, randSizeS*sizeof(KC_FP_TYPE)); if(ce != hipSuccess) { mexPrintf("Error allocating randNs "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } ce = hipMalloc((void**)&randTs, randSizeT*sizeof(KC_FP_TYPE)); if(ce != hipSuccess) { mexPrintf("Error allocating randTs "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } //setup the random number generator hiprandGenerator_t curandGen = 0; hiprandStatus_t hiprandStatus_t; hiprandStatus_t = hiprandCreateGenerator(&curandGen, HIPRAND_RNG_PSEUDO_DEFAULT); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS) { char buffer [50]; sprintf(buffer, "Error initializing random number generator (%d).\n",(int)hiprandStatus_t); mexErrMsgTxt(buffer); } struct timeval now; gettimeofday(&now,NULL); unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec); hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(curandGen, mySeed); //hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(curandGen, (unsigned int)time(NULL)); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS) { char buffer [50]; sprintf(buffer, "Error random number seed (%d).\n",(int)hiprandStatus_t); mexErrMsgTxt(buffer); } hiprandStatus_t = hiprandGenerateSeeds(curandGen); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS) { char buffer [50]; sprintf(buffer, "Error random number generating seed (%d).\n",(int)hiprandStatus_t); mexErrMsgTxt(buffer); } //hipThreadSetLimit(hipLimitStackSize, 1024); //setup initial particle positions int blockSize , nBlocks; int blockSizeT, nBlocksT; int blockSizeN, nBlocksN; blockSizeT = 4; nBlocksT = TT/blockSizeT + ((TT%blockSizeT==0)?0:1); blockSizeN = 1; nBlocksN = NT/blockSizeN + ((NT%blockSizeN==0)?0:1); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error before kcSetupLG "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } hipLaunchKernelGGL(( kcSetupLG) , dim3(nBlocksT), dim3(blockSizeT) , 0, 0, y,spe,lg,g,dt,TT,numNeur,bias,log_power); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error after kcSetupLG<<<%d,%d>>> ",nBlocksT,blockSizeT); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } blockSize = 8; int totalThreads = numParticles*NT; nBlocks = totalThreads/blockSize + ((totalThreads%blockSize==0)?0:1); //mexPrintf("Max trial length = %d, blockSizes = %d,%d, nBlocks = %d,%d\n", maxTrialLength,blockSize,blockSizeN,nBlocks,nBlocksN); //forward pass loop for (int ii = 0; ii < maxTrialLength;ii++) { //move all particles foward cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN,randSize); //random sample steps for all particles ce = hipDeviceSynchronize(); if(ce != hipSuccess) { int currDev; hipGetDevice(&currDev); mexPrintf("Error synchronizing post-rand draw 1 Size=%d ii=%d, current device=%d ",randSize,ii,currDev); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } if(cre != HIPRAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in particle propogation. Size=%d ii=%d ",randSize,ii); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } hipLaunchKernelGGL(( kcMoveParticles) , dim3(nBlocks), dim3(blockSize) , 0, 0, y,spe,pos,wt, b_gpu,betaIdxVector,l_0,g,w,dt,randN, sigMult,log_li,lw,lw2,ncdf, posc, trIdx, NT, TT, numParticles, ii, numNeur, bias, log_power); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { int currDev; hipGetDevice(&currDev); mexPrintf("Error after kcMoveParticles<<<%d,%d>>> ii=%d/%d, dev=%d ",nBlocks,blockSize,ii,maxTrialLength,currDev); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //normalize weights hipLaunchKernelGGL(( kcNormalizeWeights) , dim3(nBlocksN),dim3(blockSizeN) , 0, 0, y,wt,wt_p, lw, lw2, nEff, cumsum, trIdx, NT, TT, numParticles, ii); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error after kcNormalizeWeights<<<%d,%d>>> ii=%d/%d ",nBlocksN,blockSizeN,ii,maxTrialLength); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //check effective num particles, resample when necessary cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN, randSize); if(cre != HIPRAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in resampler. ii=%d/%d ",ii,maxTrialLength); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } hipLaunchKernelGGL(( kcResampleParticles) , dim3(nBlocks), dim3(blockSize) , 0, 0, y,pos,posc,wt,log_li,wt_p, minEffParticles,cumsum,nEff,randN,p_cet_0,p_cgt_0a,p_cgt_0b,ncdf,trIdx, NT, TT, numParticles, ii); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error after kcResampleParticles<<<%d,%d>>> ii=%d/%d ",nBlocks,blockSize,ii,maxTrialLength); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //move passage density foward //__global__ void kcPropogateBoundaryDensity(KC_FP_TYPE * y, KC_FP_TYPE * p_clt, KC_FP_TYPE * p_cet, KC_FP_TYPE * p_cgt, KC_FP_TYPE * p_clte, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * lg, int * trIdx, KC_FP_TYPE * nEff, int minEffParticles, KC_FP_TYPE * cumsum, int t, int NT, int TT, int numParticles) { hipLaunchKernelGGL(( kcPropogateBoundaryDensity) , dim3(nBlocksN),dim3(blockSizeN) , 0, 0, y,p_clt,p_cet,p_cgt,p_clte,p_cpr,p_cet_0,p_cgt_0a, p_cgt_0b, lg, nEff, minEffParticles, cumsum,trIdx, NT, TT, numParticles, ii); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error after kcPropogateBoundaryDensity<<<%d,%d>>> ii=%d/%d ",nBlocksN,blockSizeN,ii,maxTrialLength); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } } //backwards sample the particles for (int jj = maxTrialLength-1; jj >= 0; jj--) { cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN, randSizeS); if(cre != HIPRAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in backwards sampler (1). jj=%d/%d ",jj,maxTrialLength); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randNs,randSizeS); //ce = hipDeviceSynchronize(); if(cre != HIPRAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in backwards sampler (2). jj=%d/%d ",jj,maxTrialLength); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error synchronizing before kcBackwardsSample (post random generation) jj=%d/%d ",jj,maxTrialLength); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } hipLaunchKernelGGL(( kcBackwardsSample) , dim3(nBlocksN),dim3(blockSizeN) , 0, 0, lambdaTarget, auxiliaryTarget, pos, wt, ncdf, b_gpu, betaIdxVector, l_0, w, g, p_cpr, p_clte, randN, randNs, wt_p, cumsum, trIdx, NT, TT, numParticles, jj); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error after kcBackwardsSample<<<%d,%d>>> jj=%d/%d ",nBlocksN,blockSizeN,jj,maxTrialLength); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } } cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randTs, randSizeT); //ce = hipDeviceSynchronize(); if(cre != HIPRAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in final sampler (2). "); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error synchronizing before kcForwardFinalPass (post random generation) "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //samples all latent variables beyond bound hit time hipLaunchKernelGGL(( kcForwardFinalPass) , dim3(nBlocksN),dim3(blockSizeN) , 0, 0, lambdaTarget, auxiliaryTarget, randTs, b_gpu, betaIdxVector, l_0, w, trIdx, NT, beta_sum); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error after kcForwardFinalPass "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //gets some statistics about the latent variables put together to be able to sample the drift rates KC_FP_TYPE * sampling_c; KC_FP_TYPE * sampling_p; checkCudaErrors(hipMalloc((void**)&sampling_c, sizeof(KC_FP_TYPE)*(numBetas+1))); checkCudaErrors(hipMalloc((void**)&sampling_p, sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1))); checkCudaErrors(hipMemcpy(sampling_c,(KC_FP_TYPE*)mxGetPr(prhs[14]), sizeof(KC_FP_TYPE)*(numBetas+1),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(sampling_p,(KC_FP_TYPE*)mxGetPr(prhs[15]), sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1),hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kcAssembleSamplingStatistics), dim3(1),dim3(1), 0, 0, sampling_p, sampling_c, lambdaTarget, auxiliaryTarget, beta_sum,betaIdxVector,l_0, w, trIdx, NT, numBetas); checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(prhs[14]),sampling_c, sizeof(KC_FP_TYPE)*(numBetas+1),hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(prhs[15]),sampling_p, sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1),hipMemcpyDeviceToHost)); //free up memory cre = hiprandDestroyGenerator(curandGen); if(cre != HIPRAND_STATUS_SUCCESS) { mexPrintf("Error destroying rand generator (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error synchronizing post-rand generator destruction (particleFilter) "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } checkCudaErrors(hipFree(b_gpu)); checkCudaErrors(hipFree(p_clte)); checkCudaErrors(hipFree(p_cet)); checkCudaErrors(hipFree(p_cgt)); checkCudaErrors(hipFree(p_clt)); checkCudaErrors(hipFree(p_cpr)); checkCudaErrors(hipFree(pos)); checkCudaErrors(hipFree(wt)); ce = hipFree(wt_p); if(ce != hipSuccess) { mexPrintf("Error freeing memory in particle filter (wt_p) "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } checkCudaErrors(hipFree(log_li)); checkCudaErrors(hipFree(posc)); checkCudaErrors(hipFree(lw)); checkCudaErrors(hipFree(lw2)); checkCudaErrors(hipFree(ncdf)); checkCudaErrors(hipFree(p_cet_0)); checkCudaErrors(hipFree(p_cgt_0a)); checkCudaErrors(hipFree(p_cgt_0b)); checkCudaErrors(hipFree(lg)); checkCudaErrors(hipFree(g)); checkCudaErrors(hipFree(bias)); checkCudaErrors(hipFree(cumsum)); checkCudaErrors(hipFree(beta_sum)); checkCudaErrors(hipFree(sampling_c)); checkCudaErrors(hipFree(sampling_p)); checkCudaErrors(hipFree(nEff)); checkCudaErrors(hipFree(randN)); checkCudaErrors(hipFree(randNs)); checkCudaErrors(hipFree(randTs)); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error at the end ofthe particle filter "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } }
ae570156f37706a7ec3e4133ffb59c087e8e90b5.cu
#include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cuda_runtime.h> #include <cusparse_v2.h> #include "cublas_v2.h" #include <curand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ //KC_FP_TYPE can be assumed to mean "double", but originally //this definition could also work with "float" for faster speed. //float compatability is no longer supported in this function. #include "kcArrayFunctions.h" #define MAX_P 1e25 #define MIN_P 1e-25 __device__ KC_FP_TYPE positiveBound(KC_FP_TYPE a) { //return a; if(isinf(a)) return MAX_P; else return fmin(fmax(a,MIN_P),MAX_P); } __device__ KC_FP_TYPE h(KC_FP_TYPE z, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE sh, KC_FP_TYPE bias, KC_FP_TYPE log_power) { KC_FP_TYPE logex = ((gamma*z)>100)?(gamma*z):KC_MIN(log1p(exp(z*gamma)),KC_MAXN); return KC_MIN((KC_POW(logex*1.0000000,log_power)+bias)*KC_EXP(sh)*dt,MAX_P); } //one thread per particle <<< nTrials,nParticles >>> __global__ void kcMoveParticles(KC_FP_TYPE * y, KC_FP_TYPE * spe, KC_FP_TYPE * pos, KC_FP_TYPE * wt, KC_FP_TYPE * b, int * betaIdxVector, KC_FP_TYPE l_0, KC_FP_TYPE * g, KC_FP_TYPE w, KC_FP_TYPE dt, KC_FP_TYPE * randN, KC_FP_TYPE sigMult, KC_FP_TYPE * log_li, KC_FP_TYPE * lw, KC_FP_TYPE * lw2, KC_FP_TYPE * ncdf, KC_FP_TYPE * posc, int * trIdx, int NT, int TT, int numParticles, int t, int numNeur, KC_FP_TYPE * bias, KC_FP_TYPE log_power) { int threadNum = blockIdx.x*blockDim.x + threadIdx.x; int tr_num = (int)threadNum / (int)numParticles; int p_num = threadNum % numParticles; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; if(t < trLength) { int row = trIdx[tr_num] + t; int idx = TT*p_num + row; int pidx = tr_num*numParticles+p_num; KC_FP_TYPE cb = b[betaIdxVector[row]]; KC_FP_TYPE sw = sqrt(w); KC_FP_TYPE mup = (t==0)?(l_0):(pos[idx-1]+cb); KC_FP_TYPE mu = mup; KC_FP_TYPE sig2 = sigMult*w; KC_FP_TYPE sig = sqrt(sig2); KC_FP_TYPE maxI = fmin(1.0-1e-20, fmax( normcdf((1.0-mu)/sig),1e-20 )); pos[idx] = fmin(1.0-1e-20, normcdfinv(maxI*randN[pidx])*sig + mu); posc[pidx] = pos[idx]; KC_FP_TYPE dpos = pos[idx]-mu; KC_FP_TYPE log_pi_k = -log(maxI)-0.5*log(2.0*M_PI*sig2) - 0.5/sig2*(dpos*dpos); //to be stored for each particle: ncdf, lw, lw2 ncdf[idx] = normcdf((1-mup)/sw); KC_FP_TYPE dposp = pos[idx]-mup; KC_FP_TYPE log_p = -0*log(maxI) -0.5*log(2*M_PI*w)- 0.5/w*(dposp*dposp); log_li[pidx] = 0; for(int nn = 0; nn < numNeur; nn++) { log_li[pidx] += -h(pos[idx],g[nn],dt,spe[row+TT*nn],bias[nn],log_power)+y[row+TT*nn]*(log(fmax(h(pos[idx],g[nn],1.0,spe[row+TT*nn],bias[nn],log_power),1e-30))+log(dt))-lgamma(y[row+TT*nn]+1); } KC_FP_TYPE pw = (t==0)?(log(1/(KC_FP_TYPE)numParticles) ):( log(fmax(wt[idx-1], 1e-30)) ); lw[pidx] = exp(pw+log_p+log_li[pidx]-log_pi_k); lw2[pidx] = exp(pw+log_p -log_pi_k); //safety checks for numerical errors if(isnan(lw[pidx]) || isinf(lw[pidx]) || isnan(pos[idx]) || isinf(pos[idx]) || isnan(lw2[pidx]) || isinf(lw2[pidx])) { lw[pidx] = 0; lw2[pidx] = 0; pos[idx] = mup; posc[pidx] = mup; } } } } //one thread per trial <<< nTrials,1 >>> __global__ void kcNormalizeWeights(KC_FP_TYPE * y, KC_FP_TYPE * wt, KC_FP_TYPE * wt_p, KC_FP_TYPE * lw, KC_FP_TYPE * lw2, KC_FP_TYPE * nEff, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) { int tr_num = blockIdx.x*blockDim.x + threadIdx.x; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; if(t < trLength) { int row = trIdx[tr_num] + t; //sum up and normalize weights KC_FP_TYPE weightSum = 0; KC_FP_TYPE weightSum2 = 0; for(int p_num = 0; p_num < numParticles; p_num++) { int pidx = tr_num*numParticles+p_num; weightSum += lw[pidx]; weightSum2 += lw2[pidx]; } KC_FP_TYPE n_eff_den = 0; weightSum = fmax(weightSum,1e-20); weightSum2 = fmax(weightSum2,1e-20); for(int p_num = 0; p_num < numParticles; p_num++) { int idx = TT*p_num + row; int pidx = tr_num*numParticles+p_num; wt[idx] = lw[pidx] /weightSum; wt_p[pidx] = lw2[pidx]/weightSum2; n_eff_den += wt[idx]*wt[idx]; cumsum[pidx] = (p_num>0)?(cumsum[pidx-1]+wt[idx]):(wt[idx]);//for resampling } nEff[tr_num] = 1/n_eff_den; } } } //initial calculation - probability of each set of spike counts coming from a rate at the bound __global__ void kcSetupLG(KC_FP_TYPE * y,KC_FP_TYPE * spe,KC_FP_TYPE * lg,KC_FP_TYPE * g, KC_FP_TYPE dt,int TT, int numNeur, KC_FP_TYPE * bias, KC_FP_TYPE log_power) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx < TT) { KC_FP_TYPE log_sum = 0; for(int nn = 0; nn < numNeur; nn++) { log_sum += -h(1,g[nn], dt,spe[idx+TT*nn],bias[nn],log_power) + y[idx+TT*nn]*log(fmax(h(1,g[nn],dt,spe[idx+TT*nn],bias[nn],log_power),1e-30)) - lgamma(y[idx+TT*nn]+1); } lg[idx] = exp(log_sum); } } //one thread per particle <<< nTrials,nParticles >>> // if particles look bad, resamples them from the distribution before the next step __global__ void kcResampleParticles(KC_FP_TYPE * y, KC_FP_TYPE * pos, KC_FP_TYPE * posc, KC_FP_TYPE * wt, KC_FP_TYPE * log_li, KC_FP_TYPE * wt_p, int minEffParticles, KC_FP_TYPE * cumsum, KC_FP_TYPE * nEff, KC_FP_TYPE * randU, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * ncdf, int * trIdx, int NT, int TT, int numParticles, int t) { int threadNum = blockIdx.x*blockDim.x + threadIdx.x; int tr_num = (int)threadNum / (int)numParticles; int p_num = threadNum % numParticles; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; if(t < trLength) { int pidx = tr_num*numParticles+p_num; int row = trIdx[tr_num] + t; int idx = TT*p_num + row; int pidx_new = pidx; if(nEff[tr_num] < minEffParticles) { int p_num_new; for(p_num_new = 0; p_num_new < numParticles-1 && randU[pidx] > cumsum[numParticles*tr_num+p_num_new]; p_num_new++) { //everything taken care of in loop statement } pidx_new = tr_num*numParticles+p_num_new; wt[idx] = 1.0/(KC_FP_TYPE)numParticles; //weights are now uniform again pos[idx] = posc[pidx_new]; } KC_FP_TYPE wt_old = (t==0)?(1.0/(KC_FP_TYPE)numParticles):(wt[idx-1]); p_cet_0[pidx] = (1.0-ncdf[idx])*wt_old; p_cgt_0a[pidx] = exp(log_li[pidx])*wt_p[pidx]; //or pidx_new? p_cgt_0b[pidx] = ncdf[idx]*wt_old; } } } //one thread per trial <<< nTrials,1 >>> //move bound crossing probabilities forward in time __global__ void kcPropogateBoundaryDensity(KC_FP_TYPE * y, KC_FP_TYPE * p_clt, KC_FP_TYPE * p_cet, KC_FP_TYPE * p_cgt, KC_FP_TYPE * p_clte, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * lg, KC_FP_TYPE * nEff, int minEffParticles, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) { int tr_num = blockIdx.x*blockDim.x + threadIdx.x; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; if(t < trLength) { int row = trIdx[tr_num] + t; KC_FP_TYPE p_cet_s = 0; KC_FP_TYPE p_cgt_sa = 0; KC_FP_TYPE p_cgt_sb = 0; for(int p_num = 0; p_num < numParticles; p_num++) { int pidx = tr_num*numParticles+p_num; //int idx = TT*p_num + row; p_cet_s += p_cet_0[pidx]; p_cgt_sa += p_cgt_0a[pidx]; p_cgt_sb += p_cgt_0b[pidx]; //finished a bit of the resampler that must run post-sampling for parallelization not to screw up, this will only be used again if this is last timestep in trial if(nEff[tr_num] < minEffParticles && t-1==trLength) { cumsum[pidx] = 1/(KC_FP_TYPE)numParticles*(1+p_num); } } KC_FP_TYPE p_clte_old = ((t==0)?(0):(p_clte[row-1])); KC_FP_TYPE p_cgt_old = ((t==0)?(1):(p_cgt[row-1])); KC_FP_TYPE p_clt_1 = lg[row]*p_clte_old; KC_FP_TYPE p_cet_1 = lg[row]*(1.0-p_clte_old)*p_cet_s; KC_FP_TYPE p_cgt_1 = (1.0-p_clte_old)*p_cgt_sa*p_cgt_sb; p_cet[row] = p_cet_1/(p_clt_1+p_cet_1+p_cgt_1); p_clte[row] = (p_cet_1+p_clt_1)/(p_clt_1+p_cet_1+p_cgt_1); //this is a little redudant, but I think it is convenient later? p_clt[row] = p_clt_1/(p_clt_1+p_cet_1+p_cgt_1); p_cgt[row] = p_cgt_1/(p_clt_1+p_cet_1+p_cgt_1); p_cpr[row] = p_cgt_old*p_cet_s; //compare this index in MATLAB code } } } //Finally do that backwards sampling, <<< NT, 1 >>> __global__ void kcBackwardsSample(KC_FP_TYPE * sample, int * crossingTimes, KC_FP_TYPE * pos, KC_FP_TYPE * wt, KC_FP_TYPE * ncdf, KC_FP_TYPE * b, int * betaIdx, KC_FP_TYPE l_0, KC_FP_TYPE w, KC_FP_TYPE * g, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_clte, KC_FP_TYPE * randUp, KC_FP_TYPE * randUb, KC_FP_TYPE * wt_p, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) { int tr_num = blockIdx.x*blockDim.x + threadIdx.x; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; int row = trIdx[tr_num] + t; if(t == trLength-1) { //if t=end of trial, start off the backwards sampling crossingTimes[tr_num] = trLength; //decide whether end trial has hit boundary if(randUb[tr_num] < p_clte[row]) { sample[row] = 1; crossingTimes[tr_num] = t; } //else select a particle to be end of trial (cumsum holds the CDF of the distribution over particles) else { int p_num; for(p_num = 0; p_num < numParticles-1 && randUp[tr_num] > cumsum[numParticles*tr_num+p_num]; p_num++) { } int idx = TT*p_num + row; sample[row] = pos[idx]; } } else if(t < trLength-1 && t >= 0) { //else, propgate backwards //if previous sample had hit threshold if(sample[row+1] >= 1) { //if boundary already reached if(randUb[tr_num] < p_clte[row]/(p_cpr[row+1] + p_clte[row])) { crossingTimes[tr_num] = t; sample[row] = 1; } //gets pre-crossing particle else { KC_FP_TYPE wtSum = 0; int p_num; for(p_num = 0; p_num < numParticles; p_num++) { int idx = TT*p_num + row; int pidx = tr_num*numParticles+p_num; wt_p[pidx] = wt[idx]*fmax(1.0-ncdf[idx+1],1e-25); wtSum += wt_p[pidx]; } wtSum = fmax(wtSum,1e-30); KC_FP_TYPE csum = wt_p[tr_num*numParticles+0]/wtSum; for(p_num = 0; p_num < numParticles-1 && csum < randUp[tr_num]; p_num++) { int pidx = tr_num*numParticles+p_num+1; csum += wt_p[pidx]/wtSum; } int idx = TT*p_num + row; sample[row] = pos[idx]; } } //else, samples a particle else { KC_FP_TYPE wtSum = 0; int p_num; for(p_num = 0; p_num < numParticles; p_num++) { int idx = TT*p_num + row; int pidx = tr_num*numParticles+p_num; wt_p[pidx] = wt[idx]*exp(-0.5/w*pow( sample[row+1] - (pos[idx] + b[betaIdx[row]]),2 )); wtSum += wt_p[pidx]; } wtSum = fmax(wtSum,1e-30); KC_FP_TYPE csum = wt_p[tr_num*numParticles+0]/wtSum; for(p_num = 0; p_num < numParticles-1 && csum < randUp[tr_num]; p_num++) { int pidx = tr_num*numParticles+p_num+1; csum += wt_p[pidx]/wtSum; } int idx = TT*p_num + row; sample[row] = pos[idx]; } } } } /* Performs a forward sweep of the path after backwards sampling Draws from prior for steps post-threshold crossing (for conjugate sampling of parameters) Calculates som statistics for later sampling trial number given by CUDA thread */ __global__ void kcForwardFinalPass( KC_FP_TYPE* lambda, const int * crossingTimes, const KC_FP_TYPE * randUni, const KC_FP_TYPE* b, const int * betaIndVec,const KC_FP_TYPE l_0, const KC_FP_TYPE w, const int* trIdx,const int NT, KC_FP_TYPE * beta_sum) { int tr_num = blockIdx.x*blockDim.x+threadIdx.x; if(tr_num < NT) { int t_0 = trIdx[tr_num]; beta_sum[tr_num] = 0; int trLength = trIdx[tr_num+1] - trIdx[tr_num]; KC_FP_TYPE cb = b[betaIndVec[t_0]]; for(int t = 0; t < trLength; t++) { if(t == crossingTimes[tr_num]) { //samples the first value of lambda to cross the bound (truncated normal, > 1) KC_FP_TYPE mu = (t > 0)?(lambda[t_0 + t-1]+cb):l_0; KC_FP_TYPE minS = normcdf((1-mu)/sqrt(w)); if(minS >= 1.0-1e-5) { lambda[t_0 + t] = 1; } else { lambda[t_0 + t] = mu+sqrt(w)*normcdfinv( minS + (1-minS)*randUni[t_0+t]); } } else if(t > crossingTimes[tr_num]) { lambda[t_0 + t] = lambda[t_0 + t - 1] + cb + KC_SQRT(w)*normcdfinv( randUni[t_0+t]); } beta_sum[tr_num] += (t>0 && t <= crossingTimes[tr_num])?(lambda[t_0 + t] - lambda[t_0 + t-1]):0; //only include lambdas up until first threshold crossing to look at drift rates } } } //single thread kernel to assemble stats of the ramps across trials for sampling beta,l_0 __global__ void kcAssembleSamplingStatistics(KC_FP_TYPE * sigMat, KC_FP_TYPE * muVec, const KC_FP_TYPE* lambda, const int * crossingTimes, const KC_FP_TYPE * beta_sum,const int*betaIndVec,const KC_FP_TYPE l_0, const KC_FP_TYPE w, const int* trIdx, const int NT, const int numBetas) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx == 0) { for(int trNum = 0; trNum < NT; trNum++) { int t_0 = trIdx[trNum]; int cb = betaIndVec[t_0]; int trLength = trIdx[trNum+1] - trIdx[trNum]; sigMat[(cb)*(numBetas+1) + cb] += fmin(1.0*crossingTimes[trNum],trLength-1.0)/w; sigMat[(numBetas)*(numBetas+1) + numBetas] += 1.0/w; muVec[cb] += beta_sum[trNum]/w; muVec[numBetas] += lambda[t_0]/w; } } } //Samples a single set of latent paths from the ramping model for a set of trials given fixed parameters //args // 0 = new lambda (output, should be pre-allocated on GPU, same size as y) // 1 = new auxiliary variable for threshold crossing (output, should be pre-allocated on GPU, vector of length number of trials) // 2 = y (observations) // 3 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y) // 4 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB) // 5 = betas (the beta values) // 6 = w (variance of diffusion process) // 7 = l_0 (starting lambda value) // 8 = g (absorbing boundary effective height) -> numNeur x 1 // 9 = dt (bin/timestep size) // 10 = numParticles // 11 = minEffParticles (how many effective particles per trial to keep around) // 12 = sigMult (used for particle proposals, proposal variance is sigMult*w) // 13 = maxTrialLength // 14 = beta/l_0 sampling vec param c (uses this as output for sampling betas, l_0) // 15 = beta/l_0 sampling vec param p uses this as output for sampling betas, l_0) // 16 = spike history effect // 17 = numNeurons // 18 = biases // 19 = power void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { cudaError_t ce; curandStatus_t cre; /*ce = cudaSetDevice(KC_GPU_DEVICE); if(ce != cudaSuccess) { mexPrintf("Error initializing device (kcParticleFilterProp.cu) "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); }*/ //init data unsigned int TY = kcGetArrayNumEl(prhs[2]); // TY is total length of y unsigned int TT = kcGetArrayNumEl(prhs[0]); KC_FP_TYPE * lambdaTarget = kcGetArrayData(prhs[0]); int * auxiliaryTarget = kcGetArrayDataInt(prhs[1]); KC_FP_TYPE * y = kcGetArrayData(prhs[2],TY); int numNeur = mxGetScalar(prhs[17]); int * trIdx = kcGetArrayDataInt(prhs[3]); unsigned int NT = kcGetArrayNumEl(prhs[3])-1; int * betaIdxVector = kcGetArrayDataInt(prhs[4]); KC_FP_TYPE * b = mxGetPr(prhs[5]); int numBetas = mxGetNumberOfElements(prhs[5]); KC_FP_TYPE * b_gpu; ce = cudaMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas); if(ce != cudaSuccess) { mexPrintf("Error allocating space for betas on GPU - first allocation in function (particle filter) "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } ce = cudaMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,cudaMemcpyHostToDevice); if(ce != cudaSuccess) { mexPrintf("Error moving betas to GPU (particle filter) "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } KC_FP_TYPE w = mxGetScalar(prhs[6]); KC_FP_TYPE l_0 = mxGetScalar(prhs[7]); KC_FP_TYPE * g; checkCudaErrors(cudaMalloc((void**)&g,sizeof(KC_FP_TYPE)*numNeur)); // number of gammas is numNeurons checkCudaErrors(cudaMemcpy(g,(KC_FP_TYPE*)mxGetPr(prhs[8]),sizeof(KC_FP_TYPE)*numNeur,cudaMemcpyHostToDevice)); KC_FP_TYPE dt = mxGetScalar(prhs[9]); int numParticles = mxGetScalar(prhs[10]); int minEffParticles = mxGetScalar(prhs[11]); int sigMult = mxGetScalar(prhs[12]); int maxTrialLength = mxGetScalar(prhs[13]); KC_FP_TYPE log_power = mxGetScalar(prhs[19]); //load spike history effect KC_FP_TYPE * spe = kcGetArrayData(prhs[16],TY); KC_FP_TYPE * bias; checkCudaErrors(cudaMalloc((void**)&bias,sizeof(KC_FP_TYPE)*numNeur)); // number of biases is numNeurons checkCudaErrors(cudaMemcpy(bias,(KC_FP_TYPE*)mxGetPr(prhs[18]),sizeof(KC_FP_TYPE)*numNeur,cudaMemcpyHostToDevice)); //particle weights/probabilities of hitting the bound KC_FP_TYPE * p_clte; KC_FP_TYPE * p_cet; KC_FP_TYPE * p_cgt; KC_FP_TYPE * p_clt; KC_FP_TYPE * p_cpr; checkCudaErrors(cudaMalloc((void**)&p_clte, TT*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&p_cet, TT*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&p_cgt, TT*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&p_clt, TT*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&p_cpr, TT*sizeof(KC_FP_TYPE))); KC_FP_TYPE * wt; KC_FP_TYPE * wt_p; KC_FP_TYPE * pos;//particle positions checkCudaErrors(cudaMalloc((void**)&wt, (TT)*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&wt_p, (NT)*numParticles*sizeof(KC_FP_TYPE))); ce = cudaMalloc((void**)&pos, (TT)*numParticles*sizeof(KC_FP_TYPE)); if(ce != cudaSuccess) { mexPrintf("Error allocating pos "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } KC_FP_TYPE * log_li; KC_FP_TYPE * posc; //for resampling KC_FP_TYPE * lw; //unnormalized weights KC_FP_TYPE * lw2; KC_FP_TYPE * ncdf; KC_FP_TYPE * p_cet_0; KC_FP_TYPE * p_cgt_0a; KC_FP_TYPE * p_cgt_0b; KC_FP_TYPE * lg; //log p(y|at boundary) KC_FP_TYPE * cumsum; KC_FP_TYPE * beta_sum; checkCudaErrors(cudaMalloc((void**)&log_li, NT*numParticles*sizeof(KC_FP_TYPE))); //checkCudaErrors(cudaMalloc((void**)&log_lic, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&posc, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&lw, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&lw2, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&ncdf, TT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&p_cet_0, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&p_cgt_0a, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&p_cgt_0b, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&cumsum, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&beta_sum, NT*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&lg, TT*sizeof(KC_FP_TYPE))); KC_FP_TYPE * nEff; checkCudaErrors(cudaMalloc((void**)&nEff, NT*sizeof(KC_FP_TYPE))); int randSize = (NT*numParticles) + ((NT*numParticles)%2==0?0:1); int randSizeS = (NT) + (NT%2==0?0:1); int randSizeT = (TT) + (TT%2==0?0:1); KC_FP_TYPE * randN; KC_FP_TYPE * randNs; KC_FP_TYPE * randTs; ce = cudaMalloc((void**)&randN, randSize *sizeof(KC_FP_TYPE)); if(ce != cudaSuccess) { mexPrintf("Error allocating randN "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } ce = cudaMalloc((void**)&randNs, randSizeS*sizeof(KC_FP_TYPE)); if(ce != cudaSuccess) { mexPrintf("Error allocating randNs "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } ce = cudaMalloc((void**)&randTs, randSizeT*sizeof(KC_FP_TYPE)); if(ce != cudaSuccess) { mexPrintf("Error allocating randTs "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } //setup the random number generator curandGenerator_t curandGen = 0; curandStatus_t curandStatus; curandStatus = curandCreateGenerator(&curandGen, CURAND_RNG_PSEUDO_DEFAULT); if(curandStatus != CURAND_STATUS_SUCCESS) { char buffer [50]; sprintf(buffer, "Error initializing random number generator (%d).\n",(int)curandStatus); mexErrMsgTxt(buffer); } struct timeval now; gettimeofday(&now,NULL); unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec); curandStatus = curandSetPseudoRandomGeneratorSeed(curandGen, mySeed); //curandStatus = curandSetPseudoRandomGeneratorSeed(curandGen, (unsigned int)time(NULL)); if(curandStatus != CURAND_STATUS_SUCCESS) { char buffer [50]; sprintf(buffer, "Error random number seed (%d).\n",(int)curandStatus); mexErrMsgTxt(buffer); } curandStatus = curandGenerateSeeds(curandGen); if(curandStatus != CURAND_STATUS_SUCCESS) { char buffer [50]; sprintf(buffer, "Error random number generating seed (%d).\n",(int)curandStatus); mexErrMsgTxt(buffer); } //cudaThreadSetLimit(cudaLimitStackSize, 1024); //setup initial particle positions int blockSize , nBlocks; int blockSizeT, nBlocksT; int blockSizeN, nBlocksN; blockSizeT = 4; nBlocksT = TT/blockSizeT + ((TT%blockSizeT==0)?0:1); blockSizeN = 1; nBlocksN = NT/blockSizeN + ((NT%blockSizeN==0)?0:1); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error before kcSetupLG "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } kcSetupLG <<< nBlocksT, blockSizeT >>> (y,spe,lg,g,dt,TT,numNeur,bias,log_power); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error after kcSetupLG<<<%d,%d>>> ",nBlocksT,blockSizeT); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } blockSize = 8; int totalThreads = numParticles*NT; nBlocks = totalThreads/blockSize + ((totalThreads%blockSize==0)?0:1); //mexPrintf("Max trial length = %d, blockSizes = %d,%d, nBlocks = %d,%d\n", maxTrialLength,blockSize,blockSizeN,nBlocks,nBlocksN); //forward pass loop for (int ii = 0; ii < maxTrialLength;ii++) { //move all particles foward cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN,randSize); //random sample steps for all particles ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { int currDev; cudaGetDevice(&currDev); mexPrintf("Error synchronizing post-rand draw 1 Size=%d ii=%d, current device=%d ",randSize,ii,currDev); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } if(cre != CURAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in particle propogation. Size=%d ii=%d ",randSize,ii); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } kcMoveParticles <<< nBlocks, blockSize >>> (y,spe,pos,wt, b_gpu,betaIdxVector,l_0,g,w,dt,randN, sigMult,log_li,lw,lw2,ncdf, posc, trIdx, NT, TT, numParticles, ii, numNeur, bias, log_power); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { int currDev; cudaGetDevice(&currDev); mexPrintf("Error after kcMoveParticles<<<%d,%d>>> ii=%d/%d, dev=%d ",nBlocks,blockSize,ii,maxTrialLength,currDev); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //normalize weights kcNormalizeWeights <<< nBlocksN,blockSizeN >>> (y,wt,wt_p, lw, lw2, nEff, cumsum, trIdx, NT, TT, numParticles, ii); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error after kcNormalizeWeights<<<%d,%d>>> ii=%d/%d ",nBlocksN,blockSizeN,ii,maxTrialLength); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //check effective num particles, resample when necessary cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN, randSize); if(cre != CURAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in resampler. ii=%d/%d ",ii,maxTrialLength); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } kcResampleParticles <<< nBlocks, blockSize >>> (y,pos,posc,wt,log_li,wt_p, minEffParticles,cumsum,nEff,randN,p_cet_0,p_cgt_0a,p_cgt_0b,ncdf,trIdx, NT, TT, numParticles, ii); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error after kcResampleParticles<<<%d,%d>>> ii=%d/%d ",nBlocks,blockSize,ii,maxTrialLength); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //move passage density foward //__global__ void kcPropogateBoundaryDensity(KC_FP_TYPE * y, KC_FP_TYPE * p_clt, KC_FP_TYPE * p_cet, KC_FP_TYPE * p_cgt, KC_FP_TYPE * p_clte, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * lg, int * trIdx, KC_FP_TYPE * nEff, int minEffParticles, KC_FP_TYPE * cumsum, int t, int NT, int TT, int numParticles) { kcPropogateBoundaryDensity <<< nBlocksN,blockSizeN >>> (y,p_clt,p_cet,p_cgt,p_clte,p_cpr,p_cet_0,p_cgt_0a, p_cgt_0b, lg, nEff, minEffParticles, cumsum,trIdx, NT, TT, numParticles, ii); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error after kcPropogateBoundaryDensity<<<%d,%d>>> ii=%d/%d ",nBlocksN,blockSizeN,ii,maxTrialLength); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } } //backwards sample the particles for (int jj = maxTrialLength-1; jj >= 0; jj--) { cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN, randSizeS); if(cre != CURAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in backwards sampler (1). jj=%d/%d ",jj,maxTrialLength); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randNs,randSizeS); //ce = cudaDeviceSynchronize(); if(cre != CURAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in backwards sampler (2). jj=%d/%d ",jj,maxTrialLength); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error synchronizing before kcBackwardsSample (post random generation) jj=%d/%d ",jj,maxTrialLength); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } kcBackwardsSample <<< nBlocksN,blockSizeN >>> (lambdaTarget, auxiliaryTarget, pos, wt, ncdf, b_gpu, betaIdxVector, l_0, w, g, p_cpr, p_clte, randN, randNs, wt_p, cumsum, trIdx, NT, TT, numParticles, jj); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error after kcBackwardsSample<<<%d,%d>>> jj=%d/%d ",nBlocksN,blockSizeN,jj,maxTrialLength); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } } cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randTs, randSizeT); //ce = cudaDeviceSynchronize(); if(cre != CURAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in final sampler (2). "); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error synchronizing before kcForwardFinalPass (post random generation) "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //samples all latent variables beyond bound hit time kcForwardFinalPass <<< nBlocksN,blockSizeN >>> (lambdaTarget, auxiliaryTarget, randTs, b_gpu, betaIdxVector, l_0, w, trIdx, NT, beta_sum); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error after kcForwardFinalPass "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //gets some statistics about the latent variables put together to be able to sample the drift rates KC_FP_TYPE * sampling_c; KC_FP_TYPE * sampling_p; checkCudaErrors(cudaMalloc((void**)&sampling_c, sizeof(KC_FP_TYPE)*(numBetas+1))); checkCudaErrors(cudaMalloc((void**)&sampling_p, sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1))); checkCudaErrors(cudaMemcpy(sampling_c,(KC_FP_TYPE*)mxGetPr(prhs[14]), sizeof(KC_FP_TYPE)*(numBetas+1),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(sampling_p,(KC_FP_TYPE*)mxGetPr(prhs[15]), sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1),cudaMemcpyHostToDevice)); kcAssembleSamplingStatistics<<<1,1>>>(sampling_p, sampling_c, lambdaTarget, auxiliaryTarget, beta_sum,betaIdxVector,l_0, w, trIdx, NT, numBetas); checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(prhs[14]),sampling_c, sizeof(KC_FP_TYPE)*(numBetas+1),cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(prhs[15]),sampling_p, sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1),cudaMemcpyDeviceToHost)); //free up memory cre = curandDestroyGenerator(curandGen); if(cre != CURAND_STATUS_SUCCESS) { mexPrintf("Error destroying rand generator (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error synchronizing post-rand generator destruction (particleFilter) "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } checkCudaErrors(cudaFree(b_gpu)); checkCudaErrors(cudaFree(p_clte)); checkCudaErrors(cudaFree(p_cet)); checkCudaErrors(cudaFree(p_cgt)); checkCudaErrors(cudaFree(p_clt)); checkCudaErrors(cudaFree(p_cpr)); checkCudaErrors(cudaFree(pos)); checkCudaErrors(cudaFree(wt)); ce = cudaFree(wt_p); if(ce != cudaSuccess) { mexPrintf("Error freeing memory in particle filter (wt_p) "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } checkCudaErrors(cudaFree(log_li)); checkCudaErrors(cudaFree(posc)); checkCudaErrors(cudaFree(lw)); checkCudaErrors(cudaFree(lw2)); checkCudaErrors(cudaFree(ncdf)); checkCudaErrors(cudaFree(p_cet_0)); checkCudaErrors(cudaFree(p_cgt_0a)); checkCudaErrors(cudaFree(p_cgt_0b)); checkCudaErrors(cudaFree(lg)); checkCudaErrors(cudaFree(g)); checkCudaErrors(cudaFree(bias)); checkCudaErrors(cudaFree(cumsum)); checkCudaErrors(cudaFree(beta_sum)); checkCudaErrors(cudaFree(sampling_c)); checkCudaErrors(cudaFree(sampling_p)); checkCudaErrors(cudaFree(nEff)); checkCudaErrors(cudaFree(randN)); checkCudaErrors(cudaFree(randNs)); checkCudaErrors(cudaFree(randTs)); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error at the end ofthe particle filter "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } }
cebda599b1f7d4974c48f0768beecbd8eb4fd96e.hip
// !!! This is a file automatically generated by hipify!!! #include "knn_gpgpu.h" #include <stdio.h> #include <helper_cuda.h> void writePoints(char *file_path, int n, struct Point *points) { printf("writing points...\n"); FILE *file = fopen(file_path, "w"); if (file == NULL) { fputs ("File error\n", stderr); exit (1); } for (int i = 0; i < n; ++i) { fwrite(&points[i].p, sizeof(float), 3, file); } fclose(file); } void readPoints(const char *file_path, int n, struct Point *points) { printf("Reading points...\n"); FILE *file = fopen(file_path, "rb"); if (file == NULL) { fputs ("File error\n", stderr); exit (1); } for (int i = 0; i < n; ++i) { fread(&points[i].p, sizeof(float), 3, file); } fclose(file); } void populatePoints(struct Point *points, int n) { int i; srand((int)time(NULL)); for (i = 0; i < n; ++i) { struct Point t; t.p[0] = (float) rand(), t.p[1] = (float) rand(), t.p[2] = (float) rand(); points[i] = t; } } int main(int argc, char const *argv[]) { int n, nu, ni = 8388608, step = 250000; bool from_file = 0; n = nu = ni; if (argc == 2) { nu = ni = atoi(argv[1]); printf("Running kd-tree-build with n = %d\n", nu); } else if (argc == 3) { nu = ni = atoi(argv[1]); from_file = 1; printf("Running kd-tree-build from file '%s' with n = %d\n", argv[2], nu); } else if (argc == 4) { nu = atoi(argv[1]); ni = atoi(argv[2]); step = atoi(argv[3]); printf("Running kd-tree-build from n = %d to n = %d with step = %d\n", nu, ni, step); } else { printf("Running kd-tree-build with n = %d\n", nu); } for (n = nu; n <= ni ; n += step) { struct Node *points_out = (struct Node *) malloc(n * sizeof(Node)); struct Point *points = (struct Point *) malloc(n * sizeof(Point)); if (from_file) { readPoints(argv[2], n, points); } else { populatePoints(points, n); } hipEvent_t start, stop; float elapsed_time = 0; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); checkCudaErrors(hipEventRecord(start, 0)); buildKdTree(points, n, points_out); checkCudaErrors(hipEventRecord(stop, 0)); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); printf("buildKdTree_naive, Time = %.5f ms, Size = %u Elements, NumDevsUsed = %d\n", elapsed_time, n, 1); free(points); free(points_out); hipDeviceReset(); } return 0; }
cebda599b1f7d4974c48f0768beecbd8eb4fd96e.cu
#include "knn_gpgpu.h" #include <stdio.h> #include <helper_cuda.h> void writePoints(char *file_path, int n, struct Point *points) { printf("writing points...\n"); FILE *file = fopen(file_path, "w"); if (file == NULL) { fputs ("File error\n", stderr); exit (1); } for (int i = 0; i < n; ++i) { fwrite(&points[i].p, sizeof(float), 3, file); } fclose(file); } void readPoints(const char *file_path, int n, struct Point *points) { printf("Reading points...\n"); FILE *file = fopen(file_path, "rb"); if (file == NULL) { fputs ("File error\n", stderr); exit (1); } for (int i = 0; i < n; ++i) { fread(&points[i].p, sizeof(float), 3, file); } fclose(file); } void populatePoints(struct Point *points, int n) { int i; srand((int)time(NULL)); for (i = 0; i < n; ++i) { struct Point t; t.p[0] = (float) rand(), t.p[1] = (float) rand(), t.p[2] = (float) rand(); points[i] = t; } } int main(int argc, char const *argv[]) { int n, nu, ni = 8388608, step = 250000; bool from_file = 0; n = nu = ni; if (argc == 2) { nu = ni = atoi(argv[1]); printf("Running kd-tree-build with n = %d\n", nu); } else if (argc == 3) { nu = ni = atoi(argv[1]); from_file = 1; printf("Running kd-tree-build from file '%s' with n = %d\n", argv[2], nu); } else if (argc == 4) { nu = atoi(argv[1]); ni = atoi(argv[2]); step = atoi(argv[3]); printf("Running kd-tree-build from n = %d to n = %d with step = %d\n", nu, ni, step); } else { printf("Running kd-tree-build with n = %d\n", nu); } for (n = nu; n <= ni ; n += step) { struct Node *points_out = (struct Node *) malloc(n * sizeof(Node)); struct Point *points = (struct Point *) malloc(n * sizeof(Point)); if (from_file) { readPoints(argv[2], n, points); } else { populatePoints(points, n); } cudaEvent_t start, stop; float elapsed_time = 0; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); checkCudaErrors(cudaEventRecord(start, 0)); buildKdTree(points, n, points_out); checkCudaErrors(cudaEventRecord(stop, 0)); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf("buildKdTree_naive, Time = %.5f ms, Size = %u Elements, NumDevsUsed = %d\n", elapsed_time, n, 1); free(points); free(points_out); cudaDeviceReset(); } return 0; }
b7d2f63b5197924ff1cc25c36d8dc6014defb9f9.hip
// !!! This is a file automatically generated by hipify!!! /** * Grid_Stride_Loop * Serve per elaborare array che sono piu grandi della griglia di kernel * la soluzione che un ogni thread vengono eseguiti piu volte sospoandosi trami gli indice * di un gridDim * gridSize. */ #include <stdio.h> #include <hip/hip_runtime.h> #include "cudaUtility.h" __global__ void grid_stride(float* A, int N) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int stride = gridDim.x * blockDim.x ; for(int i = idx; i < N; i += stride) { A[idx] *= 2; } } bool check(float *A, int N) { for(int i = 0; i < N; i++) { if(A[i] != i*2) return false; } return false; } int main(void) { //elemento int N = 1000; float *a; //allocazione di entrambe le memoria CPU e GPU da parte di CUDA size_t size = N * sizeof(float); CUDA(hipMallocManaged(&a, size)); //init di a: for(int i = 0; i < N; i++) { a[i] = rand()/(float)RAND_MAX; } //configuro il kernel size_t threads_per_blocks = 256; size_t blocks_per_grid = 32; hipLaunchKernelGGL(( grid_stride), dim3(threads_per_blocks), dim3(blocks_per_grid), 0, 0, a, N); CUDA(hipGetLastError()); CUDA(hipDeviceSynchronize()); bool test = check(a, N); printf("Tutti i numeri sono doppi? %s\n", test ? "VERO" : "FALSO"); CUDA(hipFree(a)); return 0; }
b7d2f63b5197924ff1cc25c36d8dc6014defb9f9.cu
/** * Grid_Stride_Loop * Serve per elaborare array che sono piu grandi della griglia di kernel * la soluzione è che un ogni thread vengono eseguiti piu volte sospoandosi trami gli indice * di un gridDim * gridSize. */ #include <stdio.h> #include <cuda_runtime.h> #include "cudaUtility.h" __global__ void grid_stride(float* A, int N) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int stride = gridDim.x * blockDim.x ; for(int i = idx; i < N; i += stride) { A[idx] *= 2; } } bool check(float *A, int N) { for(int i = 0; i < N; i++) { if(A[i] != i*2) return false; } return false; } int main(void) { //elemento int N = 1000; float *a; //allocazione di entrambe le memoria CPU e GPU da parte di CUDA size_t size = N * sizeof(float); CUDA(cudaMallocManaged(&a, size)); //init di a: for(int i = 0; i < N; i++) { a[i] = rand()/(float)RAND_MAX; } //configuro il kernel size_t threads_per_blocks = 256; size_t blocks_per_grid = 32; grid_stride<<<threads_per_blocks, blocks_per_grid>>>(a, N); CUDA(cudaGetLastError()); CUDA(cudaDeviceSynchronize()); bool test = check(a, N); printf("Tutti i numeri sono doppi? %s\n", test ? "VERO" : "FALSO"); CUDA(cudaFree(a)); return 0; }
bdf9e789d5d6392c2fa7fcd13dd9531b7f5baae0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////// // INCLUDES //////////////////////////////////////////// #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include <time.h> #include "RRT.cuh" //////////////////////////////////////////// // CUDA KERNELS //////////////////////////////////////////// /* * Initializes CUDA RNG */ __global__ void RNG_setup_kernel(hiprandState_t *state) { int idx = blockIdx.x * blockDim.x + threadIdx.x; // thread id hiprand_init(1234, idx, 0, &state[idx]); // using seed 1234 (change to time at a later stage) } /* * Initializes adjacent matrix */ __global__ void init_adj_matrix_kernel(int * adjacency_matrix){ int idx = blockIdx.x * blockDim.x + threadIdx.x; for(int i=0; i < NUM_THREADS*NUM_BLOCKS; i++){ int index = idx * NUM_THREADS*NUM_BLOCKS + i; if(index % (NUM_THREADS*NUM_BLOCKS + 1) == 0){ adjacency_matrix[index] = 0; }else{ adjacency_matrix[index] = 9999; //adjacency_matrix[index] = 0; } } } /* * Main kernel; Contains RRT algorithm */ __global__ void RRT_kernel(hiprandState_t *my_curandstate, int *adjacency_matrix, double * path_solutions, double * control_solutions, double* tmp) { int idx = blockIdx.x * blockDim.x + threadIdx.x; // thread id // computing initial state double start_state[] = { ANG_POS_MIN, ANG_VEL_MIN}; // initial state; angle position measured from x-axis start_state[0] += ((idx % GRID_X) * 2 * DELTA_X) + (2 * DELTA_X); start_state[1] += (((idx / GRID_X) % (GRID_Y*NUM_BLOCKS)) * 2 * DELTA_Y) + (2 * DELTA_Y); tmp[2*idx] = start_state[0]; tmp[2*idx+1] = start_state[1]; // automate goal placement around initial state double end_state[NUM_OF_GOAL_STATES][DIMENSIONS] = {{0}}; int goal_idx; for(goal_idx = 0; goal_idx < pow((float)3,(float)DIMENSIONS); goal_idx++) { if(goal_idx < NUM_OF_GOAL_STATES/2){ end_state[goal_idx][0] = start_state[0] + ((goal_idx%3) - 1)*2*DELTA_X; end_state[goal_idx][1] = start_state[1] + (((goal_idx/3)%3) - 1)*2*DELTA_Y; }else if(goal_idx > NUM_OF_GOAL_STATES/2){ end_state[goal_idx-1][0] = start_state[0] + ((goal_idx%3) - 1)*2*DELTA_X; end_state[goal_idx-1][1] = start_state[1] + (((goal_idx/3)%3) - 1)*2*DELTA_Y; } } double state_limits[2][2] = { { start_state[0] - 3 * DELTA_X, start_state[0] + 3 * DELTA_X }, { start_state[1] - 3 * DELTA_Y, start_state[1] + 3 * DELTA_Y } }; // state limits; angular position between -pi & pi rad; angular velocity between -10 & 10 rad/s // control torques to be used: linspace(-5,5,20) //* double discrete_control_torques[] = { -5.0000, -4.4737, -3.9474, -3.4211, -2.8947, -2.3684, -1.8421, -1.3158, -0.7895, -0.2632, 5.0000, 4.4737, 3.9474, 3.4211, 2.8947, 2.3684, 1.8421, 1.3158, 0.7895, 0.2632 }; //*/ /* double discrete_control_torques[] = { -1.0000, -0.8947, -0.7895, -0.6842, -0.5789, -0.4737, -0.3684, -0.2632, -0.1579, -0.0526, 1.0000, 0.8947, 0.7895, 0.6842, 0.5789, 0.4737, 0.3684, 0.2632, 0.1579, 0.0526}; //*/ int number_of_discrete_torques = (int) (sizeof(discrete_control_torques) / sizeof(discrete_control_torques[0])); double time_step = 0.02; // time interval between application of subsequent control torques // static memory allocation double random_state[DIMENSIONS]; // stores a state double next_state[DIMENSIONS]; double RRT_tree[NUM_OF_ITERATIONS][DIMENSIONS]; // stores tree int x, y; for (x = 0; x < NUM_OF_ITERATIONS; x++) { // initialize tree to initial state RRT_tree[x][0] = start_state[0]; RRT_tree[x][1] = start_state[1]; } //int adjMatrix[NUM_THREADS][NUM_THREADS]; //memset(adjMatrix, 0, sizeof(int)*NUM_THREADS*NUM_THREADS); int parent_state_index[NUM_OF_ITERATIONS]; // stores index of parent state for each state in graph RRT_tree int control_action_index[NUM_OF_ITERATIONS]; // stores index of control actions in discrete_control_torques (each state will use a control action value in discrete_control_torques) double u_path[NUM_OF_GOAL_STATES][LENGTH_OF_SOLN_PATH]; // stores sequence of control actions (solution to problem) double x_path[NUM_OF_GOAL_STATES][LENGTH_OF_SOLN_PATH][DIMENSIONS]; for (y = 0; y < NUM_OF_GOAL_STATES; y++) { for (x = 0; x < LENGTH_OF_SOLN_PATH; x++) { // initialize tree to initial state x_path[y][x][0] = 0; x_path[y][x][1] = 0; u_path[y][x] = 0; } } int state_index = 0; // stores sequence of states joining initial to goal state double temp_achievable_states[20][DIMENSIONS]; // stores temporary achievable states from a particular vertex; 20 is length of discrete_control_torques double distance_square_values[NUM_OF_ITERATIONS]; // stores distance square values int goal_index; int not_found[NUM_OF_GOAL_STATES] = {0}; for(int i=0; i < NUM_OF_GOAL_STATES;i++) not_found[i] = 1; int weight = 0; // keep growing RRT until goal found or run out of iterations int iteration; for (iteration = 1; iteration < NUM_OF_ITERATIONS; iteration++) { // get random state random_state[0] = hiprand_uniform(my_curandstate + idx) * (state_limits[0][1] - state_limits[0][0]) + state_limits[0][0]; random_state[1] = hiprand_uniform(my_curandstate + idx) * (state_limits[1][1] - state_limits[1][0]) + state_limits[1][0]; // find distances between that state point and every vertex in RRT euclidianDistSquare(random_state, RRT_tree, iteration, distance_square_values); // select RRT vertex closest to the state point int nearest_state_index = findMin(distance_square_values, iteration); // from the closest RRT vertex, compute all the states that can be reached, // given the pendulum dynamics and available torques int ui; for (ui = 0; ui < number_of_discrete_torques; ui++) { pendulumDynamics(RRT_tree[nearest_state_index], discrete_control_torques[ui], next_state); temp_achievable_states[ui][0] = RRT_tree[nearest_state_index][0] + time_step * next_state[0]; temp_achievable_states[ui][1] = RRT_tree[nearest_state_index][1] + time_step * next_state[1]; } // select the closest reachable state point euclidianDistSquare(random_state, temp_achievable_states, number_of_discrete_torques, distance_square_values); ui = findMin(distance_square_values, number_of_discrete_torques); random_state[0] = temp_achievable_states[ui][0]; random_state[1] = temp_achievable_states[ui][1]; // if angular position is greater than pi rads, wrap around if (random_state[0] > M_PI || random_state[0] < -M_PI) random_state[0] = fmod((random_state[0] + M_PI), (2 * M_PI)) - M_PI; // link reachable state point to the nearest vertex in the tree RRT_tree[iteration][0] = random_state[0]; RRT_tree[iteration][1] = random_state[1]; parent_state_index[iteration] = nearest_state_index; control_action_index[iteration] = ui; // if tree has grown near enough to one of the surrounding goal states // set that particular goal state to 'found' // save path from initial state to that goal state for (goal_index = 0; goal_index < NUM_OF_GOAL_STATES; goal_index++) { if (not_found[goal_index] == 1 && (random_state[0] <= end_state[goal_index][0] + 0.05) && (random_state[0] >= end_state[goal_index][0] - 0.05)) { if ((random_state[1] <= end_state[goal_index][1] + 0.25) && (random_state[1] >= end_state[goal_index][1] - 0.25)) { not_found[goal_index] = 0; state_index = iteration; int length_of_soln = 0; while (state_index != 0) { u_path[goal_index][length_of_soln] = discrete_control_torques[control_action_index[state_index]]; x_path[goal_index][length_of_soln][0] = RRT_tree[state_index][0]; x_path[goal_index][length_of_soln][1] = RRT_tree[state_index][1]; length_of_soln++; state_index = parent_state_index[state_index]; } } } } } // Update adjacency matrix: // for each goal state surrounding an initial state, // if the goal state has been reached, // if tree is growing near border of phase space, check if tree is growing within state space limits // set respective flag in adjacency matrix to 1 (or to a weight) //* int offset[8] = {-43,-42,-41,-1,1,41,42,43}; int offset_idx = 0; weight = 1; int k; for (k = 0; k < NUM_OF_GOAL_STATES; k++) { if (not_found[k] == 0) { offset_idx = offset[k]; if((idx * NUM_THREADS * NUM_BLOCKS + idx + offset_idx >= 0) && (idx * NUM_THREADS * NUM_BLOCKS + idx + offset_idx < NUM_RESULTS_PER_THREAD * NUM_THREADS * NUM_BLOCKS)){ if((end_state[k][0] > ANG_POS_MIN+DELTA_X) && (end_state[k][0] < ANG_POS_MAX-DELTA_X) && (end_state[k][1] > ANG_VEL_MIN+DELTA_Y) && (end_state[k][1] < ANG_VEL_MAX-DELTA_Y) ){ adjacency_matrix[idx * NUM_THREADS * NUM_BLOCKS + idx + offset_idx] = weight; } } } } //*/ //* copy path results of algorithm to device results array int i, j; int num_of_goals = NUM_OF_GOAL_STATES; for (j = 0; j < num_of_goals; j++) { for (i = 0; i < LENGTH_OF_SOLN_PATH; i++) { path_solutions[idx * DIMENSIONS * num_of_goals * LENGTH_OF_SOLN_PATH + j * DIMENSIONS * LENGTH_OF_SOLN_PATH + DIMENSIONS * i] = x_path[j][i][0]; path_solutions[idx * DIMENSIONS * num_of_goals * LENGTH_OF_SOLN_PATH + j * DIMENSIONS * LENGTH_OF_SOLN_PATH + DIMENSIONS * i + 1] = x_path[j][i][1]; control_solutions[idx * num_of_goals * LENGTH_OF_SOLN_PATH + j * LENGTH_OF_SOLN_PATH + i] = u_path[j][i]; if (not_found[j] == 0) { if (i == LENGTH_OF_SOLN_PATH - 2) { path_solutions[idx * DIMENSIONS * num_of_goals * LENGTH_OF_SOLN_PATH + j * DIMENSIONS * LENGTH_OF_SOLN_PATH + DIMENSIONS * i] = start_state[0]; path_solutions[idx * DIMENSIONS * num_of_goals * LENGTH_OF_SOLN_PATH + j * DIMENSIONS * LENGTH_OF_SOLN_PATH + DIMENSIONS * i + 1] = start_state[1]; } else if (i == LENGTH_OF_SOLN_PATH - 1) { path_solutions[idx * DIMENSIONS * num_of_goals * LENGTH_OF_SOLN_PATH + j * DIMENSIONS * LENGTH_OF_SOLN_PATH + DIMENSIONS * i] = end_state[j][0]; path_solutions[idx * DIMENSIONS * num_of_goals * LENGTH_OF_SOLN_PATH + j * DIMENSIONS * LENGTH_OF_SOLN_PATH + DIMENSIONS * i + 1] = end_state[j][1]; } } } } //*/ /* int i; for (i = 0; i < NUM_RESULTS_PER_THREAD; i++) result[idx * NUM_RESULTS_PER_THREAD + i] = start_state[i]; //*/ /* result[idx * NUM_RESULTS_PER_THREAD + 0] = start_state[0]; result[idx * NUM_RESULTS_PER_THREAD + 1] = start_state[1]; //*/ } //////////////////////////////////////////// // HELPER FUNCTIONS //////////////////////////////////////////// /* * computes the Euclidian distances squared from point A to every point in array B */ __device__ void euclidianDistSquare(double* A, double B[][2], int lengthOfB, double* listOfDistSq) { int i; for (i = 0; i < lengthOfB; i++) listOfDistSq[i] = pow((B[i][0] - A[0]), 2) + pow((B[i][1] - A[1]), 2); } /* * finds the index of the minimum in an array */ __device__ int findMin(double array[], int lengthOfArray) { int minIndex = 0; int i; for (i = 0; i < lengthOfArray; i++) { if (array[i] < array[minIndex]) minIndex = i; } return minIndex; } /* * Computes x_dot of the pendulum, given x and a control input u */ __device__ void pendulumDynamics(double* x, double u, double* next_state) { // pendulum parameters int m = 1; // mass int l = 1; // length of pendulum link int I = m * l * l; // moment of inertia double g = 9.8; // acceleration due to gravity double b = 0.1; // damping factor next_state[0] = x[1]; next_state[1] = (u - m * g * l * sin((M_PI / 2) - x[0]) - b * x[1]) / I; }
bdf9e789d5d6392c2fa7fcd13dd9531b7f5baae0.cu
//////////////////////////////////////////// // INCLUDES //////////////////////////////////////////// #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include <time.h> #include "RRT.cuh" //////////////////////////////////////////// // CUDA KERNELS //////////////////////////////////////////// /* * Initializes CUDA RNG */ __global__ void RNG_setup_kernel(curandState *state) { int idx = blockIdx.x * blockDim.x + threadIdx.x; // thread id curand_init(1234, idx, 0, &state[idx]); // using seed 1234 (change to time at a later stage) } /* * Initializes adjacent matrix */ __global__ void init_adj_matrix_kernel(int * adjacency_matrix){ int idx = blockIdx.x * blockDim.x + threadIdx.x; for(int i=0; i < NUM_THREADS*NUM_BLOCKS; i++){ int index = idx * NUM_THREADS*NUM_BLOCKS + i; if(index % (NUM_THREADS*NUM_BLOCKS + 1) == 0){ adjacency_matrix[index] = 0; }else{ adjacency_matrix[index] = 9999; //adjacency_matrix[index] = 0; } } } /* * Main kernel; Contains RRT algorithm */ __global__ void RRT_kernel(curandState *my_curandstate, int *adjacency_matrix, double * path_solutions, double * control_solutions, double* tmp) { int idx = blockIdx.x * blockDim.x + threadIdx.x; // thread id // computing initial state double start_state[] = { ANG_POS_MIN, ANG_VEL_MIN}; // initial state; angle position measured from x-axis start_state[0] += ((idx % GRID_X) * 2 * DELTA_X) + (2 * DELTA_X); start_state[1] += (((idx / GRID_X) % (GRID_Y*NUM_BLOCKS)) * 2 * DELTA_Y) + (2 * DELTA_Y); tmp[2*idx] = start_state[0]; tmp[2*idx+1] = start_state[1]; // automate goal placement around initial state double end_state[NUM_OF_GOAL_STATES][DIMENSIONS] = {{0}}; int goal_idx; for(goal_idx = 0; goal_idx < pow((float)3,(float)DIMENSIONS); goal_idx++) { if(goal_idx < NUM_OF_GOAL_STATES/2){ end_state[goal_idx][0] = start_state[0] + ((goal_idx%3) - 1)*2*DELTA_X; end_state[goal_idx][1] = start_state[1] + (((goal_idx/3)%3) - 1)*2*DELTA_Y; }else if(goal_idx > NUM_OF_GOAL_STATES/2){ end_state[goal_idx-1][0] = start_state[0] + ((goal_idx%3) - 1)*2*DELTA_X; end_state[goal_idx-1][1] = start_state[1] + (((goal_idx/3)%3) - 1)*2*DELTA_Y; } } double state_limits[2][2] = { { start_state[0] - 3 * DELTA_X, start_state[0] + 3 * DELTA_X }, { start_state[1] - 3 * DELTA_Y, start_state[1] + 3 * DELTA_Y } }; // state limits; angular position between -pi & pi rad; angular velocity between -10 & 10 rad/s // control torques to be used: linspace(-5,5,20) //* double discrete_control_torques[] = { -5.0000, -4.4737, -3.9474, -3.4211, -2.8947, -2.3684, -1.8421, -1.3158, -0.7895, -0.2632, 5.0000, 4.4737, 3.9474, 3.4211, 2.8947, 2.3684, 1.8421, 1.3158, 0.7895, 0.2632 }; //*/ /* double discrete_control_torques[] = { -1.0000, -0.8947, -0.7895, -0.6842, -0.5789, -0.4737, -0.3684, -0.2632, -0.1579, -0.0526, 1.0000, 0.8947, 0.7895, 0.6842, 0.5789, 0.4737, 0.3684, 0.2632, 0.1579, 0.0526}; //*/ int number_of_discrete_torques = (int) (sizeof(discrete_control_torques) / sizeof(discrete_control_torques[0])); double time_step = 0.02; // time interval between application of subsequent control torques // static memory allocation double random_state[DIMENSIONS]; // stores a state double next_state[DIMENSIONS]; double RRT_tree[NUM_OF_ITERATIONS][DIMENSIONS]; // stores tree int x, y; for (x = 0; x < NUM_OF_ITERATIONS; x++) { // initialize tree to initial state RRT_tree[x][0] = start_state[0]; RRT_tree[x][1] = start_state[1]; } //int adjMatrix[NUM_THREADS][NUM_THREADS]; //memset(adjMatrix, 0, sizeof(int)*NUM_THREADS*NUM_THREADS); int parent_state_index[NUM_OF_ITERATIONS]; // stores index of parent state for each state in graph RRT_tree int control_action_index[NUM_OF_ITERATIONS]; // stores index of control actions in discrete_control_torques (each state will use a control action value in discrete_control_torques) double u_path[NUM_OF_GOAL_STATES][LENGTH_OF_SOLN_PATH]; // stores sequence of control actions (solution to problem) double x_path[NUM_OF_GOAL_STATES][LENGTH_OF_SOLN_PATH][DIMENSIONS]; for (y = 0; y < NUM_OF_GOAL_STATES; y++) { for (x = 0; x < LENGTH_OF_SOLN_PATH; x++) { // initialize tree to initial state x_path[y][x][0] = 0; x_path[y][x][1] = 0; u_path[y][x] = 0; } } int state_index = 0; // stores sequence of states joining initial to goal state double temp_achievable_states[20][DIMENSIONS]; // stores temporary achievable states from a particular vertex; 20 is length of discrete_control_torques double distance_square_values[NUM_OF_ITERATIONS]; // stores distance square values int goal_index; int not_found[NUM_OF_GOAL_STATES] = {0}; for(int i=0; i < NUM_OF_GOAL_STATES;i++) not_found[i] = 1; int weight = 0; // keep growing RRT until goal found or run out of iterations int iteration; for (iteration = 1; iteration < NUM_OF_ITERATIONS; iteration++) { // get random state random_state[0] = curand_uniform(my_curandstate + idx) * (state_limits[0][1] - state_limits[0][0]) + state_limits[0][0]; random_state[1] = curand_uniform(my_curandstate + idx) * (state_limits[1][1] - state_limits[1][0]) + state_limits[1][0]; // find distances between that state point and every vertex in RRT euclidianDistSquare(random_state, RRT_tree, iteration, distance_square_values); // select RRT vertex closest to the state point int nearest_state_index = findMin(distance_square_values, iteration); // from the closest RRT vertex, compute all the states that can be reached, // given the pendulum dynamics and available torques int ui; for (ui = 0; ui < number_of_discrete_torques; ui++) { pendulumDynamics(RRT_tree[nearest_state_index], discrete_control_torques[ui], next_state); temp_achievable_states[ui][0] = RRT_tree[nearest_state_index][0] + time_step * next_state[0]; temp_achievable_states[ui][1] = RRT_tree[nearest_state_index][1] + time_step * next_state[1]; } // select the closest reachable state point euclidianDistSquare(random_state, temp_achievable_states, number_of_discrete_torques, distance_square_values); ui = findMin(distance_square_values, number_of_discrete_torques); random_state[0] = temp_achievable_states[ui][0]; random_state[1] = temp_achievable_states[ui][1]; // if angular position is greater than pi rads, wrap around if (random_state[0] > M_PI || random_state[0] < -M_PI) random_state[0] = fmod((random_state[0] + M_PI), (2 * M_PI)) - M_PI; // link reachable state point to the nearest vertex in the tree RRT_tree[iteration][0] = random_state[0]; RRT_tree[iteration][1] = random_state[1]; parent_state_index[iteration] = nearest_state_index; control_action_index[iteration] = ui; // if tree has grown near enough to one of the surrounding goal states // set that particular goal state to 'found' // save path from initial state to that goal state for (goal_index = 0; goal_index < NUM_OF_GOAL_STATES; goal_index++) { if (not_found[goal_index] == 1 && (random_state[0] <= end_state[goal_index][0] + 0.05) && (random_state[0] >= end_state[goal_index][0] - 0.05)) { if ((random_state[1] <= end_state[goal_index][1] + 0.25) && (random_state[1] >= end_state[goal_index][1] - 0.25)) { not_found[goal_index] = 0; state_index = iteration; int length_of_soln = 0; while (state_index != 0) { u_path[goal_index][length_of_soln] = discrete_control_torques[control_action_index[state_index]]; x_path[goal_index][length_of_soln][0] = RRT_tree[state_index][0]; x_path[goal_index][length_of_soln][1] = RRT_tree[state_index][1]; length_of_soln++; state_index = parent_state_index[state_index]; } } } } } // Update adjacency matrix: // for each goal state surrounding an initial state, // if the goal state has been reached, // if tree is growing near border of phase space, check if tree is growing within state space limits // set respective flag in adjacency matrix to 1 (or to a weight) //* int offset[8] = {-43,-42,-41,-1,1,41,42,43}; int offset_idx = 0; weight = 1; int k; for (k = 0; k < NUM_OF_GOAL_STATES; k++) { if (not_found[k] == 0) { offset_idx = offset[k]; if((idx * NUM_THREADS * NUM_BLOCKS + idx + offset_idx >= 0) && (idx * NUM_THREADS * NUM_BLOCKS + idx + offset_idx < NUM_RESULTS_PER_THREAD * NUM_THREADS * NUM_BLOCKS)){ if((end_state[k][0] > ANG_POS_MIN+DELTA_X) && (end_state[k][0] < ANG_POS_MAX-DELTA_X) && (end_state[k][1] > ANG_VEL_MIN+DELTA_Y) && (end_state[k][1] < ANG_VEL_MAX-DELTA_Y) ){ adjacency_matrix[idx * NUM_THREADS * NUM_BLOCKS + idx + offset_idx] = weight; } } } } //*/ //* copy path results of algorithm to device results array int i, j; int num_of_goals = NUM_OF_GOAL_STATES; for (j = 0; j < num_of_goals; j++) { for (i = 0; i < LENGTH_OF_SOLN_PATH; i++) { path_solutions[idx * DIMENSIONS * num_of_goals * LENGTH_OF_SOLN_PATH + j * DIMENSIONS * LENGTH_OF_SOLN_PATH + DIMENSIONS * i] = x_path[j][i][0]; path_solutions[idx * DIMENSIONS * num_of_goals * LENGTH_OF_SOLN_PATH + j * DIMENSIONS * LENGTH_OF_SOLN_PATH + DIMENSIONS * i + 1] = x_path[j][i][1]; control_solutions[idx * num_of_goals * LENGTH_OF_SOLN_PATH + j * LENGTH_OF_SOLN_PATH + i] = u_path[j][i]; if (not_found[j] == 0) { if (i == LENGTH_OF_SOLN_PATH - 2) { path_solutions[idx * DIMENSIONS * num_of_goals * LENGTH_OF_SOLN_PATH + j * DIMENSIONS * LENGTH_OF_SOLN_PATH + DIMENSIONS * i] = start_state[0]; path_solutions[idx * DIMENSIONS * num_of_goals * LENGTH_OF_SOLN_PATH + j * DIMENSIONS * LENGTH_OF_SOLN_PATH + DIMENSIONS * i + 1] = start_state[1]; } else if (i == LENGTH_OF_SOLN_PATH - 1) { path_solutions[idx * DIMENSIONS * num_of_goals * LENGTH_OF_SOLN_PATH + j * DIMENSIONS * LENGTH_OF_SOLN_PATH + DIMENSIONS * i] = end_state[j][0]; path_solutions[idx * DIMENSIONS * num_of_goals * LENGTH_OF_SOLN_PATH + j * DIMENSIONS * LENGTH_OF_SOLN_PATH + DIMENSIONS * i + 1] = end_state[j][1]; } } } } //*/ /* int i; for (i = 0; i < NUM_RESULTS_PER_THREAD; i++) result[idx * NUM_RESULTS_PER_THREAD + i] = start_state[i]; //*/ /* result[idx * NUM_RESULTS_PER_THREAD + 0] = start_state[0]; result[idx * NUM_RESULTS_PER_THREAD + 1] = start_state[1]; //*/ } //////////////////////////////////////////// // HELPER FUNCTIONS //////////////////////////////////////////// /* * computes the Euclidian distances squared from point A to every point in array B */ __device__ void euclidianDistSquare(double* A, double B[][2], int lengthOfB, double* listOfDistSq) { int i; for (i = 0; i < lengthOfB; i++) listOfDistSq[i] = pow((B[i][0] - A[0]), 2) + pow((B[i][1] - A[1]), 2); } /* * finds the index of the minimum in an array */ __device__ int findMin(double array[], int lengthOfArray) { int minIndex = 0; int i; for (i = 0; i < lengthOfArray; i++) { if (array[i] < array[minIndex]) minIndex = i; } return minIndex; } /* * Computes x_dot of the pendulum, given x and a control input u */ __device__ void pendulumDynamics(double* x, double u, double* next_state) { // pendulum parameters int m = 1; // mass int l = 1; // length of pendulum link int I = m * l * l; // moment of inertia double g = 9.8; // acceleration due to gravity double b = 0.1; // damping factor next_state[0] = x[1]; next_state[1] = (u - m * g * l * sin((M_PI / 2) - x[0]) - b * x[1]) / I; }
ef1e0c38964059e275750a7edb2bdc05dba1561b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2020 Zhixu Zhao * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "tl_tensor_internal_cuda.h" /* TODO: strided access should be avoid */ template <typename T> static __global__ void pick1d_kernel(T *src, T *dst, int *idx, int stride, int block_size, int total) { int di = blockIdx.x * block_size + threadIdx.x; if (di >= total) return; int si = idx[di]; for (int i = 0; i < stride; i++) dst[di * stride + i] = src[si * stride + i]; } TL_EXPORT tl_tensor *tl_tensor_pick1d_cuda(const tl_tensor *src, const tl_tensor *index, tl_tensor *dst, int stride, int len) { assert(src); assert(tl_is_device_mem(src->data)); assert(src->ndim == 1); assert(index); assert(tl_is_device_mem(index->data)); assert(index->dtype == TL_INT32); assert(index->ndim == 1); assert(index->len >= len); assert(stride >= 1); if (dst) { assert(dst); assert(tl_is_device_mem(dst->data)); assert(dst->ndim == 1); assert(dst->len == len * stride); assert(dst->dtype == src->dtype); } else { int dims[1]; dims[0] = len; dst = tl_tensor_zeros_cuda(1, dims, src->dtype); } int thread_num, block_num; thread_num = len; block_num = thread_num / BLOCK_SIZE + 1; switch (src->dtype) { case TL_DOUBLE: hipLaunchKernelGGL(( pick1d_kernel<double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (double *)src->data, (double *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; case TL_FLOAT: hipLaunchKernelGGL(( pick1d_kernel<float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (float *)src->data, (float *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; case TL_INT32: hipLaunchKernelGGL(( pick1d_kernel<int32_t>) , dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int32_t *)src->data, (int32_t *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; case TL_INT16: hipLaunchKernelGGL(( pick1d_kernel<int16_t>) , dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int16_t *)src->data, (int16_t *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; case TL_INT8: hipLaunchKernelGGL(( pick1d_kernel<int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int8_t *)src->data, (int8_t *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; case TL_UINT32: hipLaunchKernelGGL(( pick1d_kernel<uint32_t>) , dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint32_t *)src->data, (uint32_t *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; case TL_UINT16: hipLaunchKernelGGL(( pick1d_kernel<uint16_t>) , dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint16_t *)src->data, (uint16_t *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; case TL_UINT8: hipLaunchKernelGGL(( pick1d_kernel<uint8_t>) , dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint8_t *)src->data, (uint8_t *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; case TL_BOOL: hipLaunchKernelGGL(( pick1d_kernel<int>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int *)src->data, (int *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; default: assert(0 && "unsupported tl_dtype"); break; } tl_cuda_device_sync(); return dst; }
ef1e0c38964059e275750a7edb2bdc05dba1561b.cu
/* * Copyright (c) 2018-2020 Zhixu Zhao * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "tl_tensor_internal_cuda.h" /* TODO: strided access should be avoid */ template <typename T> static __global__ void pick1d_kernel(T *src, T *dst, int *idx, int stride, int block_size, int total) { int di = blockIdx.x * block_size + threadIdx.x; if (di >= total) return; int si = idx[di]; for (int i = 0; i < stride; i++) dst[di * stride + i] = src[si * stride + i]; } TL_EXPORT tl_tensor *tl_tensor_pick1d_cuda(const tl_tensor *src, const tl_tensor *index, tl_tensor *dst, int stride, int len) { assert(src); assert(tl_is_device_mem(src->data)); assert(src->ndim == 1); assert(index); assert(tl_is_device_mem(index->data)); assert(index->dtype == TL_INT32); assert(index->ndim == 1); assert(index->len >= len); assert(stride >= 1); if (dst) { assert(dst); assert(tl_is_device_mem(dst->data)); assert(dst->ndim == 1); assert(dst->len == len * stride); assert(dst->dtype == src->dtype); } else { int dims[1]; dims[0] = len; dst = tl_tensor_zeros_cuda(1, dims, src->dtype); } int thread_num, block_num; thread_num = len; block_num = thread_num / BLOCK_SIZE + 1; switch (src->dtype) { case TL_DOUBLE: pick1d_kernel<double><<<block_num, BLOCK_SIZE>>>((double *)src->data, (double *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; case TL_FLOAT: pick1d_kernel<float><<<block_num, BLOCK_SIZE>>>((float *)src->data, (float *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; case TL_INT32: pick1d_kernel<int32_t> <<<block_num, BLOCK_SIZE>>>((int32_t *)src->data, (int32_t *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; case TL_INT16: pick1d_kernel<int16_t> <<<block_num, BLOCK_SIZE>>>((int16_t *)src->data, (int16_t *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; case TL_INT8: pick1d_kernel<int8_t><<<block_num, BLOCK_SIZE>>>((int8_t *)src->data, (int8_t *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; case TL_UINT32: pick1d_kernel<uint32_t> <<<block_num, BLOCK_SIZE>>>((uint32_t *)src->data, (uint32_t *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; case TL_UINT16: pick1d_kernel<uint16_t> <<<block_num, BLOCK_SIZE>>>((uint16_t *)src->data, (uint16_t *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; case TL_UINT8: pick1d_kernel<uint8_t> <<<block_num, BLOCK_SIZE>>>((uint8_t *)src->data, (uint8_t *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; case TL_BOOL: pick1d_kernel<int><<<block_num, BLOCK_SIZE>>>( (int *)src->data, (int *)dst->data, (int *)index->data, stride, BLOCK_SIZE, thread_num); break; default: assert(0 && "unsupported tl_dtype"); break; } tl_cuda_device_sync(); return dst; }
da7247ca0c1bb952211ef68694d799fea0fe870f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void ReduceMeanKernel (double *Dens, double *Energy, int nsec, double *mean_dens, double *mean_energy, double *mean_dens2, double *mean_energy2, int nrad) { int j = threadIdx.x + blockDim.x*blockIdx.x; int i = 0; if(j<nsec){ mean_dens[j] = Dens[i*nsec+ j]; mean_energy[j] = Energy[i*nsec +j]; } i = nrad-1; if(j<nsec){ mean_dens2[j] = Dens[i*nsec + j]; mean_energy2[j] = Energy[i*nsec + j]; } }
da7247ca0c1bb952211ef68694d799fea0fe870f.cu
#include "includes.h" __global__ void ReduceMeanKernel (double *Dens, double *Energy, int nsec, double *mean_dens, double *mean_energy, double *mean_dens2, double *mean_energy2, int nrad) { int j = threadIdx.x + blockDim.x*blockIdx.x; int i = 0; if(j<nsec){ mean_dens[j] = Dens[i*nsec+ j]; mean_energy[j] = Energy[i*nsec +j]; } i = nrad-1; if(j<nsec){ mean_dens2[j] = Dens[i*nsec + j]; mean_energy2[j] = Energy[i*nsec + j]; } }
a53a336195f93619a12a6c7935c8fc379be33f3d.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************* * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************/ #include <chrono> #include <cstdio> #include <cstdlib> #include <string> #include <vector> #include "nccl.h" #include "test_utilities.h" #include <roctracer/roctx.h> int csv = false; template<typename T> void RunTest(T** sendbuff, T** recvbuff, const int N, const ncclDataType_t type, const ncclRedOp_t op, int root, ncclComm_t* const comms, const std::vector<int>& dList) { // initialize data T* buffer = (T*)malloc(N * sizeof(T)); T* result = (T*)malloc(N * sizeof(T)); memset(buffer, 0, N * sizeof(T)); memset(result, 0, N * sizeof(T)); int nDev = 0; ncclCommCount(comms[0], &nDev); hipStream_t* s = (hipStream_t*)malloc(sizeof(hipStream_t)*nDev); for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); CUDACHECK(hipStreamCreate(s+i)); CUDACHECK(hipMemset(recvbuff[i], 0, N * sizeof(T))); Randomize(sendbuff[i], N, i); if(i == 0) { CUDACHECK(hipMemcpy(result, sendbuff[i], N*sizeof(T), hipMemcpyDeviceToHost)); } else { Accumulate<T>(result, sendbuff[i], N, op); } } // warm up GPU for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); ncclReduce((const void*)sendbuff[i], (void*)recvbuff[i], ::min(N, 1024 * 1024), type, op, root, comms[i], s[i]); } for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); CUDACHECK(hipStreamSynchronize(s[i])); } // for (int n = 0; n <= N; n = (n > 0) ? n << 1 : 1) { int n = N; printf((csv) ? "%i,%i,%s,%s,%d," : "%12i %12i %6s %6s %4d", (int) (n * sizeof(T)), n, TypeName(type).c_str(), OperationName(op).c_str(), root); // do out-of-place reduction first roctxRangePushA("out of place"); auto start = std::chrono::high_resolution_clock::now(); //for (int i=0; i<100; i++) { for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); ncclReduce((const void*)sendbuff[i], (void*)recvbuff[i], n, type, op, root, comms[i], s[i]); } //} for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); CUDACHECK(hipStreamSynchronize(s[i])); } auto stop = std::chrono::high_resolution_clock::now(); roctxRangePop(); roctxRangePushA("out of place bookkeeping"); double elapsedSec = std::chrono::duration_cast<std::chrono::duration<double>>( stop - start).count(); // / 100.0; double algbw = (double)(n * sizeof(T)) / 1.0E9 / elapsedSec; double busbw = algbw; CUDACHECK(hipSetDevice(dList[root])); double maxDelta = CheckDelta<T>(recvbuff[root], result, N); printf((csv)?"%f,%f,%f,%le,":" %7.3f %5.2f %5.2f %7.0le", elapsedSec * 1.0E3, algbw, busbw, maxDelta); roctxRangePop(); } // for (int n = 0; n <= N; n = (n > 0) ? n << 1 : 1) { int n = N; // now do in-place reduction roctxRangePushA("in place"); auto start = std::chrono::high_resolution_clock::now(); //for (int i=0; i<100; i++) { for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); ncclReduce((const void*)sendbuff[i], (void*)sendbuff[i], n, type, op, root, comms[i], s[i]); } //} for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); CUDACHECK(hipStreamSynchronize(s[i])); } auto stop = std::chrono::high_resolution_clock::now(); roctxRangePop(); roctxRangePushA("in place bookkeeping"); double elapsedSec = std::chrono::duration_cast<std::chrono::duration<double>>( stop - start).count(); // / 100.0; double algbw = (double)(n * sizeof(T)) / 1.0E9 / elapsedSec; double busbw = algbw; CUDACHECK(hipSetDevice(dList[root])); double maxDelta = CheckDelta<T>(sendbuff[root], result, N); printf((csv)?"%f,%f,%f,%le,":" %7.3f %5.2f %5.2f %7.0le\n", elapsedSec * 1.0E3, algbw, busbw, maxDelta); roctxRangePop(); } for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); CUDACHECK(hipStreamDestroy(s[i])); } free(s); free(buffer); free(result); } template<typename T> void RunTests(const int N, const ncclDataType_t type, ncclComm_t* const comms, const std::vector<int>& dList) { int nDev = 0; ncclCommCount(comms[0], &nDev); T** sendbuff = (T**)malloc(nDev * sizeof(T*)); T** recvbuff = (T**)malloc(nDev * sizeof(T*)); for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); CUDACHECK(hipMalloc(sendbuff + i, N * sizeof(T))); CUDACHECK(hipMalloc(recvbuff + i, N * sizeof(T))); } for (ncclRedOp_t op : { ncclSum, ncclProd, ncclMax, ncclMin }) { // for (ncclRedOp_t op : { ncclSum }) { for(int root=0; root<nDev; ++root) { RunTest<T>(sendbuff, recvbuff, N, type, op, root, comms, dList); } } for (int i = 0; i < nDev; ++i) { CUDACHECK(hipSetDevice(dList[i])); CUDACHECK(hipFree(sendbuff[i])); CUDACHECK(hipFree(recvbuff[i])); } free(sendbuff); free(recvbuff); } void usage() { printf("Tests nccl Reduce with user supplied arguments.\n" " Usage: reduce_test <data size in bytes> [number of GPUs] " "[GPU 0] [GPU 1] ...\n\n"); } int main(int argc, char* argv[]) { int nVis = 0; CUDACHECK(hipGetDeviceCount(&nVis)); int N = 0; if (argc > 1) { int t = sscanf(argv[1], "%d", &N); if (t == 0) { printf("Error: %s is not an integer!\n\n", argv[1]); usage(); exit(EXIT_FAILURE); } } else { printf("Error: must specify at least data size in bytes!\n\n"); usage(); exit(EXIT_FAILURE); } int nDev = nVis; if (argc > 2) { int t = sscanf(argv[2], "%d", &nDev); if (t == 0) { printf("Error: %s is not an integer!\n\n", argv[1]); usage(); exit(EXIT_FAILURE); } } std::vector<int> dList(nDev); for (int i = 0; i < nDev; ++i) dList[i] = i % nVis; if (argc > 3) { if (argc - 3 != nDev) { printf("Error: insufficient number of GPUs in list\n\n"); usage(); exit(EXIT_FAILURE); } for (int i = 0; i < nDev; ++i) { int t = sscanf(argv[3 + i], "%d", dList.data() + i); if (t == 0) { printf("Error: %s is not an integer!\n\n", argv[2 + i]); usage(); exit(EXIT_FAILURE); } } } ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nDev); ncclCommInitAll(comms, nDev, dList.data()); if (!csv) { printf("# Using devices\n"); for (int g = 0; g < nDev; ++g) { int cudaDev; int rank; hipDeviceProp_t prop; ncclCommCuDevice(comms[g], &cudaDev); ncclCommUserRank(comms[g], &rank); CUDACHECK(hipGetDeviceProperties(&prop, cudaDev)); printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev, prop.pciBusID, prop.name); } printf("\n"); printf("# %10s %12s %6s %6s %4s out-of-place in-place\n", "", "", "", "", ""); printf("# %10s %12s %6s %6s %4s %7s %5s %5s %7s %7s %5s %5s %7s\n", "bytes", "N", "type", "op", "root", "time", "algbw", "busbw", "res", "time", "algbw", "busbw", "res"); } else { printf("B,N,type,op,root,oop_time,oop_algbw,oop_busbw,oop_res,ip_time,ip_algbw,ip_busbw,ip_res\n"); } RunTests<char>(N / sizeof(char), ncclChar, comms, dList); RunTests<int>(N / sizeof(int), ncclInt, comms, dList); #ifdef CUDA_HAS_HALF RunTests<half>(N / sizeof(half), ncclHalf, comms, dList); #endif RunTests<float>(N / sizeof(float), ncclFloat, comms, dList); RunTests<double>(N / sizeof(double), ncclDouble, comms, dList); printf("\n"); for(int i = 0; i < nDev; ++i) ncclCommDestroy(comms[i]); free(comms); exit(EXIT_SUCCESS); }
a53a336195f93619a12a6c7935c8fc379be33f3d.cu
/************************************************************************* * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************/ #include <chrono> #include <cstdio> #include <cstdlib> #include <string> #include <vector> #include "nccl.h" #include "test_utilities.h" #include <nvToolsExt.h> int csv = false; template<typename T> void RunTest(T** sendbuff, T** recvbuff, const int N, const ncclDataType_t type, const ncclRedOp_t op, int root, ncclComm_t* const comms, const std::vector<int>& dList) { // initialize data T* buffer = (T*)malloc(N * sizeof(T)); T* result = (T*)malloc(N * sizeof(T)); memset(buffer, 0, N * sizeof(T)); memset(result, 0, N * sizeof(T)); int nDev = 0; ncclCommCount(comms[0], &nDev); cudaStream_t* s = (cudaStream_t*)malloc(sizeof(cudaStream_t)*nDev); for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); CUDACHECK(cudaStreamCreate(s+i)); CUDACHECK(cudaMemset(recvbuff[i], 0, N * sizeof(T))); Randomize(sendbuff[i], N, i); if(i == 0) { CUDACHECK(cudaMemcpy(result, sendbuff[i], N*sizeof(T), cudaMemcpyDeviceToHost)); } else { Accumulate<T>(result, sendbuff[i], N, op); } } // warm up GPU for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); ncclReduce((const void*)sendbuff[i], (void*)recvbuff[i], std::min(N, 1024 * 1024), type, op, root, comms[i], s[i]); } for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); CUDACHECK(cudaStreamSynchronize(s[i])); } // for (int n = 0; n <= N; n = (n > 0) ? n << 1 : 1) { int n = N; printf((csv) ? "%i,%i,%s,%s,%d," : "%12i %12i %6s %6s %4d", (int) (n * sizeof(T)), n, TypeName(type).c_str(), OperationName(op).c_str(), root); // do out-of-place reduction first nvtxRangePushA("out of place"); auto start = std::chrono::high_resolution_clock::now(); //for (int i=0; i<100; i++) { for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); ncclReduce((const void*)sendbuff[i], (void*)recvbuff[i], n, type, op, root, comms[i], s[i]); } //} for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); CUDACHECK(cudaStreamSynchronize(s[i])); } auto stop = std::chrono::high_resolution_clock::now(); nvtxRangePop(); nvtxRangePushA("out of place bookkeeping"); double elapsedSec = std::chrono::duration_cast<std::chrono::duration<double>>( stop - start).count(); // / 100.0; double algbw = (double)(n * sizeof(T)) / 1.0E9 / elapsedSec; double busbw = algbw; CUDACHECK(cudaSetDevice(dList[root])); double maxDelta = CheckDelta<T>(recvbuff[root], result, N); printf((csv)?"%f,%f,%f,%le,":" %7.3f %5.2f %5.2f %7.0le", elapsedSec * 1.0E3, algbw, busbw, maxDelta); nvtxRangePop(); } // for (int n = 0; n <= N; n = (n > 0) ? n << 1 : 1) { int n = N; // now do in-place reduction nvtxRangePushA("in place"); auto start = std::chrono::high_resolution_clock::now(); //for (int i=0; i<100; i++) { for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); ncclReduce((const void*)sendbuff[i], (void*)sendbuff[i], n, type, op, root, comms[i], s[i]); } //} for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); CUDACHECK(cudaStreamSynchronize(s[i])); } auto stop = std::chrono::high_resolution_clock::now(); nvtxRangePop(); nvtxRangePushA("in place bookkeeping"); double elapsedSec = std::chrono::duration_cast<std::chrono::duration<double>>( stop - start).count(); // / 100.0; double algbw = (double)(n * sizeof(T)) / 1.0E9 / elapsedSec; double busbw = algbw; CUDACHECK(cudaSetDevice(dList[root])); double maxDelta = CheckDelta<T>(sendbuff[root], result, N); printf((csv)?"%f,%f,%f,%le,":" %7.3f %5.2f %5.2f %7.0le\n", elapsedSec * 1.0E3, algbw, busbw, maxDelta); nvtxRangePop(); } for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); CUDACHECK(cudaStreamDestroy(s[i])); } free(s); free(buffer); free(result); } template<typename T> void RunTests(const int N, const ncclDataType_t type, ncclComm_t* const comms, const std::vector<int>& dList) { int nDev = 0; ncclCommCount(comms[0], &nDev); T** sendbuff = (T**)malloc(nDev * sizeof(T*)); T** recvbuff = (T**)malloc(nDev * sizeof(T*)); for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); CUDACHECK(cudaMalloc(sendbuff + i, N * sizeof(T))); CUDACHECK(cudaMalloc(recvbuff + i, N * sizeof(T))); } for (ncclRedOp_t op : { ncclSum, ncclProd, ncclMax, ncclMin }) { // for (ncclRedOp_t op : { ncclSum }) { for(int root=0; root<nDev; ++root) { RunTest<T>(sendbuff, recvbuff, N, type, op, root, comms, dList); } } for (int i = 0; i < nDev; ++i) { CUDACHECK(cudaSetDevice(dList[i])); CUDACHECK(cudaFree(sendbuff[i])); CUDACHECK(cudaFree(recvbuff[i])); } free(sendbuff); free(recvbuff); } void usage() { printf("Tests nccl Reduce with user supplied arguments.\n" " Usage: reduce_test <data size in bytes> [number of GPUs] " "[GPU 0] [GPU 1] ...\n\n"); } int main(int argc, char* argv[]) { int nVis = 0; CUDACHECK(cudaGetDeviceCount(&nVis)); int N = 0; if (argc > 1) { int t = sscanf(argv[1], "%d", &N); if (t == 0) { printf("Error: %s is not an integer!\n\n", argv[1]); usage(); exit(EXIT_FAILURE); } } else { printf("Error: must specify at least data size in bytes!\n\n"); usage(); exit(EXIT_FAILURE); } int nDev = nVis; if (argc > 2) { int t = sscanf(argv[2], "%d", &nDev); if (t == 0) { printf("Error: %s is not an integer!\n\n", argv[1]); usage(); exit(EXIT_FAILURE); } } std::vector<int> dList(nDev); for (int i = 0; i < nDev; ++i) dList[i] = i % nVis; if (argc > 3) { if (argc - 3 != nDev) { printf("Error: insufficient number of GPUs in list\n\n"); usage(); exit(EXIT_FAILURE); } for (int i = 0; i < nDev; ++i) { int t = sscanf(argv[3 + i], "%d", dList.data() + i); if (t == 0) { printf("Error: %s is not an integer!\n\n", argv[2 + i]); usage(); exit(EXIT_FAILURE); } } } ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nDev); ncclCommInitAll(comms, nDev, dList.data()); if (!csv) { printf("# Using devices\n"); for (int g = 0; g < nDev; ++g) { int cudaDev; int rank; cudaDeviceProp prop; ncclCommCuDevice(comms[g], &cudaDev); ncclCommUserRank(comms[g], &rank); CUDACHECK(cudaGetDeviceProperties(&prop, cudaDev)); printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev, prop.pciBusID, prop.name); } printf("\n"); printf("# %10s %12s %6s %6s %4s out-of-place in-place\n", "", "", "", "", ""); printf("# %10s %12s %6s %6s %4s %7s %5s %5s %7s %7s %5s %5s %7s\n", "bytes", "N", "type", "op", "root", "time", "algbw", "busbw", "res", "time", "algbw", "busbw", "res"); } else { printf("B,N,type,op,root,oop_time,oop_algbw,oop_busbw,oop_res,ip_time,ip_algbw,ip_busbw,ip_res\n"); } RunTests<char>(N / sizeof(char), ncclChar, comms, dList); RunTests<int>(N / sizeof(int), ncclInt, comms, dList); #ifdef CUDA_HAS_HALF RunTests<half>(N / sizeof(half), ncclHalf, comms, dList); #endif RunTests<float>(N / sizeof(float), ncclFloat, comms, dList); RunTests<double>(N / sizeof(double), ncclDouble, comms, dList); printf("\n"); for(int i = 0; i < nDev; ++i) ncclCommDestroy(comms[i]); free(comms); exit(EXIT_SUCCESS); }
8f4df6ff47d29db4de96a1a079013452a7185140.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> __global__ void kernel(int *array) { int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; // map the two 2D indices to a single linear, 1D index int grid_width = gridDim.x * blockDim.x; int index = index_y * grid_width + index_x; // map the two 2D block indices to a single linear, 1D block index int result = blockIdx.y * gridDim.x + blockIdx.x; // write out the result array[index] = result; } int main(void) { int num_elements_x = 16; int num_elements_y = 16; int num_bytes = num_elements_x * num_elements_y * sizeof(int); int *device_array; int *host_array; // allocate memory in either space host_array = (int*)malloc(num_bytes); hipMalloc((void**)&device_array, num_bytes); // create two dimensional 4x4 thread blocks dim3 block_size; block_size.x = 4; block_size.y = 4; // configure a two dimensional gird as well dim3 grid_size; grid_size.x = num_elements_x / block_size.x; grid_size.y = num_elements_y / block_size.y; // grid size & block size are passed as arguments to the triple chevrons as // usual hipLaunchKernelGGL(( kernel), dim3(grid_size), dim3(block_size), 0, 0, device_array); // download and inspect the result on the host hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost); // print out the result element by element for (int row = 0; row < num_elements_y; row++) { for (int col = 0; col < num_elements_x; col++) { printf("%2d ", host_array[row * num_elements_x + col]); } printf("\n"); } printf("\n"); // deallocate memory free(host_array); hipFree(device_array); return 0; }
8f4df6ff47d29db4de96a1a079013452a7185140.cu
#include <stdio.h> #include <stdlib.h> __global__ void kernel(int *array) { int index_x = blockIdx.x * blockDim.x + threadIdx.x; int index_y = blockIdx.y * blockDim.y + threadIdx.y; // map the two 2D indices to a single linear, 1D index int grid_width = gridDim.x * blockDim.x; int index = index_y * grid_width + index_x; // map the two 2D block indices to a single linear, 1D block index int result = blockIdx.y * gridDim.x + blockIdx.x; // write out the result array[index] = result; } int main(void) { int num_elements_x = 16; int num_elements_y = 16; int num_bytes = num_elements_x * num_elements_y * sizeof(int); int *device_array; int *host_array; // allocate memory in either space host_array = (int*)malloc(num_bytes); cudaMalloc((void**)&device_array, num_bytes); // create two dimensional 4x4 thread blocks dim3 block_size; block_size.x = 4; block_size.y = 4; // configure a two dimensional gird as well dim3 grid_size; grid_size.x = num_elements_x / block_size.x; grid_size.y = num_elements_y / block_size.y; // grid size & block size are passed as arguments to the triple chevrons as // usual kernel<<<grid_size, block_size>>>(device_array); // download and inspect the result on the host cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost); // print out the result element by element for (int row = 0; row < num_elements_y; row++) { for (int col = 0; col < num_elements_x; col++) { printf("%2d ", host_array[row * num_elements_x + col]); } printf("\n"); } printf("\n"); // deallocate memory free(host_array); cudaFree(device_array); return 0; }
47c7628b85907f3c589623acf82a6b2c202fafd2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> //softmax w inner_produc_loss lossloss_weight #include "caffe/filler.hpp" #include "caffe/layers/inner_product_for_lmcenter_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe{ template <typename Dtype> __global__ void compute_loss(int nthreads, const int K, const Dtype* x_norm, const Dtype* w_norm ,const Dtype* weight, const Dtype* bottom_data, const Dtype* label, Dtype* diff_temp) { CUDA_KERNEL_LOOP(index, nthreads) { int i = index / K; int j = index % K; const int label_value = static_cast<int>(label[i]); diff_temp[i*K+j] = bottom_data[i*K+j] - weight[label_value*K+j] * x_norm[i] / w_norm[i]; } } template <typename Dtype> void InnerProductForLMCenterLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // Forward_cpu(bottom,top); // return; const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); if (M_ == 1) { caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype)1., weight, bottom_data, (Dtype)0., top_data); if (bias_term_) caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0], this->blobs_[1]->gpu_data(), top_data); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, transpose_ ? CblasNoTrans : CblasTrans, M_, N_, K_, (Dtype)1., bottom_data, weight, (Dtype)0., top_data); if (bias_term_) caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., bias_multiplier_.gpu_data(), this->blobs_[1]->gpu_data(), (Dtype)1., top_data); } // compute ||x|| num=M_ caffe_gpu_powx(bottom[0]->count(), bottom_data, Dtype(2), sqr_bottom_.mutable_gpu_data()); for (int i = 0; i < M_; i++) { Dtype a; caffe_gpu_asum<Dtype>(K_, sqr_bottom_.gpu_data() + i * K_, &a); caffe_gpu_set<Dtype>(1, std::sqrt(a), x_norm_.mutable_gpu_data() + i); } // compute ||w|| num=M_ for (int i = 0; i < M_; i++) { Dtype a; caffe_gpu_dot<Dtype>(K_, weight + static_cast<int>(bottom[1]->cpu_data()[i]) * K_, weight + static_cast<int>(bottom[1]->cpu_data()[i]) * K_, &a); caffe_gpu_set<Dtype>(1,std::sqrt(a),w_norm_.mutable_gpu_data() + i); } ////compute loss1 for top[1] int nthreads = M_*K_; hipLaunchKernelGGL(( compute_loss<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, K_, x_norm_.gpu_data(), w_norm_.gpu_data(), weight, bottom_data, bottom[1]->gpu_data(), diff_temp_.mutable_gpu_data()); Dtype dot; caffe_gpu_dot(M_ * K_, diff_temp_.gpu_data(), diff_temp_.gpu_data(), &dot); top[1]->mutable_cpu_data()[0] = Dtype(dot/M_/Dtype(2)); } template <typename Dtype> __global__ void scale(int nthreads, Dtype* memory, const Dtype* w_norm, const Dtype* w_norm1) { CUDA_KERNEL_LOOP(index, nthreads) { memory[index] = memory[index]/(w_norm[0] * w_norm1[0]); } } template <typename Dtype> __global__ void scale1(int nthreads, int M, Dtype* memory1, const Dtype* w_norm, const Dtype* x_norm, const Dtype* top1_diff) { CUDA_KERNEL_LOOP(index, nthreads) { memory1[index] = memory1[index] * x_norm[0] * top1_diff[0]/ w_norm[0] / M; } } template <typename Dtype> void InnerProductForLMCenterLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { //Backward_cpu(top,propagate_down,bottom); //return; if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight if (transpose_) { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype)1., bottom_data, top_diff, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); } else { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); } Dtype* memory = memory_.mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); const Dtype* label = bottom[1]->cpu_data(); int nthreads; for (int i = 0; i < M_; i++) { caffe_gpu_gemm<Dtype>(CblasTrans,CblasNoTrans,K_,K_,1,Dtype(1), weight + static_cast<int>(label[i]) * K_, weight + static_cast<int>(label[i]) * K_, Dtype(0), memory); nthreads = K_*K_; hipLaunchKernelGGL(( scale<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, memory, w_norm_.gpu_data() + i, w_norm_.gpu_data() + i); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, K_, K_, Dtype(1), diff_temp_.gpu_data() + i * K_, memory, Dtype(0), memory1_.mutable_gpu_data() + i * K_); caffe_gpu_sub<Dtype>(K_, memory1_.gpu_data() + i * K_, diff_temp_.gpu_data() + i * K_, memory1_.mutable_gpu_data() + i * K_); nthreads = K_; hipLaunchKernelGGL(( scale1<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, M_, memory1_.mutable_gpu_data() + i * K_, w_norm_.gpu_data() + i, x_norm_.gpu_data() + i, top[1]->gpu_diff()); caffe_gpu_axpby<Dtype>(K_, Dtype(1), memory1_.mutable_gpu_data() + i * K_, Dtype(1), this->blobs_[0]->mutable_gpu_diff() + static_cast<int>(label[i]) * K_); } } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff, bias_multiplier_.gpu_data(), (Dtype)1., this->blobs_[1]->mutable_gpu_diff()); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bottom data if (transpose_) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype)1., top_diff, this->blobs_[0]->gpu_data(), (Dtype)0., bottom[0]->mutable_gpu_diff()); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., top_diff, this->blobs_[0]->gpu_data(), (Dtype)0., bottom[0]->mutable_gpu_diff()); } Dtype* memory = memory_.mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); const Dtype* label = bottom[1]->cpu_data(); int nthreads; for (int i = 0; i < M_; i++) { caffe_gpu_gemm<Dtype>(CblasTrans,CblasNoTrans,K_,K_,1,Dtype(1), weight + static_cast<int>(label[i]) * K_, bottom[0]->gpu_data() + i * K_, Dtype(0), memory); nthreads = K_*K_; hipLaunchKernelGGL(( scale<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, memory, w_norm_.gpu_data() + i, x_norm_.gpu_data() + i); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, K_, K_, Dtype(1), diff_temp_.gpu_data() + i * K_, memory_.gpu_data(), Dtype(0), memory1_.mutable_gpu_data() + i * K_); caffe_gpu_sub<Dtype>(K_, diff_temp_.gpu_data() + i * K_, memory1_.gpu_data() + i * K_, memory1_.mutable_gpu_data() + i * K_); caffe_gpu_axpby<Dtype>(K_, top[1]->cpu_diff()[0]/M_, memory1_.gpu_data() + i * K_, Dtype(1), bottom[0]->mutable_gpu_diff() + i * K_); } } } INSTANTIATE_LAYER_GPU_FUNCS(InnerProductForLMCenterLayer); }
47c7628b85907f3c589623acf82a6b2c202fafd2.cu
#include <vector> //类中心是softmax层的参数 w 配合inner_produc_loss层用 超参数在loss层指定loss_weight #include "caffe/filler.hpp" #include "caffe/layers/inner_product_for_lmcenter_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe{ template <typename Dtype> __global__ void compute_loss(int nthreads, const int K, const Dtype* x_norm, const Dtype* w_norm ,const Dtype* weight, const Dtype* bottom_data, const Dtype* label, Dtype* diff_temp) { CUDA_KERNEL_LOOP(index, nthreads) { int i = index / K; int j = index % K; const int label_value = static_cast<int>(label[i]); diff_temp[i*K+j] = bottom_data[i*K+j] - weight[label_value*K+j] * x_norm[i] / w_norm[i]; } } template <typename Dtype> void InnerProductForLMCenterLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // Forward_cpu(bottom,top); // return; const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); if (M_ == 1) { caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype)1., weight, bottom_data, (Dtype)0., top_data); if (bias_term_) caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0], this->blobs_[1]->gpu_data(), top_data); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, transpose_ ? CblasNoTrans : CblasTrans, M_, N_, K_, (Dtype)1., bottom_data, weight, (Dtype)0., top_data); if (bias_term_) caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., bias_multiplier_.gpu_data(), this->blobs_[1]->gpu_data(), (Dtype)1., top_data); } // compute ||x|| num=M_ caffe_gpu_powx(bottom[0]->count(), bottom_data, Dtype(2), sqr_bottom_.mutable_gpu_data()); for (int i = 0; i < M_; i++) { Dtype a; caffe_gpu_asum<Dtype>(K_, sqr_bottom_.gpu_data() + i * K_, &a); caffe_gpu_set<Dtype>(1, std::sqrt(a), x_norm_.mutable_gpu_data() + i); } // compute ||w|| num=M_ for (int i = 0; i < M_; i++) { Dtype a; caffe_gpu_dot<Dtype>(K_, weight + static_cast<int>(bottom[1]->cpu_data()[i]) * K_, weight + static_cast<int>(bottom[1]->cpu_data()[i]) * K_, &a); caffe_gpu_set<Dtype>(1,std::sqrt(a),w_norm_.mutable_gpu_data() + i); } ////compute loss1 for top[1] int nthreads = M_*K_; compute_loss<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, x_norm_.gpu_data(), w_norm_.gpu_data(), weight, bottom_data, bottom[1]->gpu_data(), diff_temp_.mutable_gpu_data()); Dtype dot; caffe_gpu_dot(M_ * K_, diff_temp_.gpu_data(), diff_temp_.gpu_data(), &dot); top[1]->mutable_cpu_data()[0] = Dtype(dot/M_/Dtype(2)); } template <typename Dtype> __global__ void scale(int nthreads, Dtype* memory, const Dtype* w_norm, const Dtype* w_norm1) { CUDA_KERNEL_LOOP(index, nthreads) { memory[index] = memory[index]/(w_norm[0] * w_norm1[0]); } } template <typename Dtype> __global__ void scale1(int nthreads, int M, Dtype* memory1, const Dtype* w_norm, const Dtype* x_norm, const Dtype* top1_diff) { CUDA_KERNEL_LOOP(index, nthreads) { memory1[index] = memory1[index] * x_norm[0] * top1_diff[0]/ w_norm[0] / M; } } template <typename Dtype> void InnerProductForLMCenterLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { //Backward_cpu(top,propagate_down,bottom); //return; if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight if (transpose_) { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype)1., bottom_data, top_diff, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); } else { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); } Dtype* memory = memory_.mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); const Dtype* label = bottom[1]->cpu_data(); int nthreads; for (int i = 0; i < M_; i++) { caffe_gpu_gemm<Dtype>(CblasTrans,CblasNoTrans,K_,K_,1,Dtype(1), weight + static_cast<int>(label[i]) * K_, weight + static_cast<int>(label[i]) * K_, Dtype(0), memory); nthreads = K_*K_; scale<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, memory, w_norm_.gpu_data() + i, w_norm_.gpu_data() + i); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, K_, K_, Dtype(1), diff_temp_.gpu_data() + i * K_, memory, Dtype(0), memory1_.mutable_gpu_data() + i * K_); caffe_gpu_sub<Dtype>(K_, memory1_.gpu_data() + i * K_, diff_temp_.gpu_data() + i * K_, memory1_.mutable_gpu_data() + i * K_); nthreads = K_; scale1<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, M_, memory1_.mutable_gpu_data() + i * K_, w_norm_.gpu_data() + i, x_norm_.gpu_data() + i, top[1]->gpu_diff()); caffe_gpu_axpby<Dtype>(K_, Dtype(1), memory1_.mutable_gpu_data() + i * K_, Dtype(1), this->blobs_[0]->mutable_gpu_diff() + static_cast<int>(label[i]) * K_); } } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff, bias_multiplier_.gpu_data(), (Dtype)1., this->blobs_[1]->mutable_gpu_diff()); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bottom data if (transpose_) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype)1., top_diff, this->blobs_[0]->gpu_data(), (Dtype)0., bottom[0]->mutable_gpu_diff()); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., top_diff, this->blobs_[0]->gpu_data(), (Dtype)0., bottom[0]->mutable_gpu_diff()); } Dtype* memory = memory_.mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); const Dtype* label = bottom[1]->cpu_data(); int nthreads; for (int i = 0; i < M_; i++) { caffe_gpu_gemm<Dtype>(CblasTrans,CblasNoTrans,K_,K_,1,Dtype(1), weight + static_cast<int>(label[i]) * K_, bottom[0]->gpu_data() + i * K_, Dtype(0), memory); nthreads = K_*K_; scale<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, memory, w_norm_.gpu_data() + i, x_norm_.gpu_data() + i); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, K_, K_, Dtype(1), diff_temp_.gpu_data() + i * K_, memory_.gpu_data(), Dtype(0), memory1_.mutable_gpu_data() + i * K_); caffe_gpu_sub<Dtype>(K_, diff_temp_.gpu_data() + i * K_, memory1_.gpu_data() + i * K_, memory1_.mutable_gpu_data() + i * K_); caffe_gpu_axpby<Dtype>(K_, top[1]->cpu_diff()[0]/M_, memory1_.gpu_data() + i * K_, Dtype(1), bottom[0]->mutable_gpu_diff() + i * K_); } } } INSTANTIATE_LAYER_GPU_FUNCS(InnerProductForLMCenterLayer); }
0d2dee05af5f0a90d5baad340ba46c5894fce747.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <time.h> /******************************************************************** CUDA Kernel *********************************************************************/ __global__ void matrixMul (float* C, float* A, float* B, int TA) { /* calcul des coordonnees du point de C a calculer */ int i = blockIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; /* calcul de C[i][j] */ int cc = 0; for (int k = 0; k < TA; ++ k) cc += A[i * TA + k] * B[k * TA + j]; /* stockage */ C[i * TA + j] = cc; } /******************************************************************** Programme main *********************************************************************/ int main (int argc, char** argv) { int i, j, TM, GRID_SIZE_X, GRID_SIZE_Y, BLOCK_SIZE_X; hipError_t cerror; const int THREADS_PER_BLOCK = 1024; // /* pour le calcul du temps de traitement sur GPU */ float tc; hipEvent_t depart, arret; hipEventCreate(&depart); hipEventCreate(&arret); /* valeurs par defaut */ TM = 2048; /* TM peut etre lu comme arg1 de la commande */ if (argc > 1) { TM = atoi(argv[1]); } GRID_SIZE_X = TM / THREADS_PER_BLOCK; GRID_SIZE_Y = TM; BLOCK_SIZE_X = THREADS_PER_BLOCK; /* definiton de la grille et des blocs */ dim3 grid(GRID_SIZE_X, GRID_SIZE_Y); dim3 block(BLOCK_SIZE_X); printf("taille grille : %d - %d \n", GRID_SIZE_X, GRID_SIZE_Y); printf("taille bloc : %d \n", BLOCK_SIZE_X); /* allocation des matrices sur CPU */ unsigned int msize_A = TM * TM * sizeof(float); unsigned int msize_B = TM * TM * sizeof(float); unsigned int msize_C = TM * TM * sizeof(float); float* h_A = (float*) malloc(msize_A); float* h_B = (float*) malloc(msize_B); float* h_C = (float*) malloc(msize_C); /* initialisation des matrices avec des valeurs permettant de verifier le resultat*/ for (i = 0; i < TM; i++){ for (j = 0; j < TM; j++){ h_A[i * TM + j] = 1.0; h_B[i * TM + j] = 1.0; h_C[i * TM + j] = 0.0; if (i == j) { h_A[i * TM + j] = (float) (i + 1); h_B[i * TM + j] = (float) (i + 1); } } } /* allocation des matrices sur GPU */ float *d_A; hipMalloc((void**) &d_A, msize_A); float *d_B; hipMalloc((void**) &d_B, msize_B); float *d_C; hipMalloc((void**) &d_C, msize_C); /* mesure du temps : top depart */ hipEventRecord(depart, 0); /* copie des matrives A et B depuis le CPU vers le GPU */ hipMemcpy(d_A, h_A, msize_A, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, msize_B, hipMemcpyHostToDevice); hipMemcpy(d_C, h_C, msize_C, hipMemcpyHostToDevice); /* lancement des threads */ hipLaunchKernelGGL(( matrixMul), dim3(grid), dim3(block) , 0, 0, d_C, d_A, d_B, TM); /* Recuperation valeur de retour GPU */ cerror = hipGetLastError(); printf(" retour %d \n", (int) cerror); /* copie de la matrice C depuis le GPU */ hipMemcpy(h_C, d_C, msize_C, hipMemcpyDeviceToHost); /* mesure du temps */ hipEventRecord(arret, 0); hipEventSynchronize(arret); hipEventElapsedTime(&tc, depart, arret); printf("Temps calcul : %f seconde\n", tc / 1000.0); /* verification du resultat */ for (i = 0; i < TM; i++) { for (j = 0; j < TM; j++) { if ((i == j) && (h_C[i * TM + j] != (float)((i + 1) * (i + 1) + TM - 1))) { printf("Erreur i: %d j: %d %f\n", i, j, h_C[i * TM + j] ); exit(1); } else if ((i != j) && (h_C[i * TM + j] != (float)(i + j + TM))) { printf("Erreur i: %d j: %d\n", i, j); exit(1); } } } /* liberation de la memoire */ free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); hipEventDestroy(depart); hipEventDestroy(arret); }
0d2dee05af5f0a90d5baad340ba46c5894fce747.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #include <time.h> /******************************************************************** CUDA Kernel *********************************************************************/ __global__ void matrixMul (float* C, float* A, float* B, int TA) { /* calcul des coordonnees du point de C a calculer */ int i = blockIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; /* calcul de C[i][j] */ int cc = 0; for (int k = 0; k < TA; ++ k) cc += A[i * TA + k] * B[k * TA + j]; /* stockage */ C[i * TA + j] = cc; } /******************************************************************** Programme main *********************************************************************/ int main (int argc, char** argv) { int i, j, TM, GRID_SIZE_X, GRID_SIZE_Y, BLOCK_SIZE_X; cudaError_t cerror; const int THREADS_PER_BLOCK = 1024; // /* pour le calcul du temps de traitement sur GPU */ float tc; cudaEvent_t depart, arret; cudaEventCreate(&depart); cudaEventCreate(&arret); /* valeurs par defaut */ TM = 2048; /* TM peut etre lu comme arg1 de la commande */ if (argc > 1) { TM = atoi(argv[1]); } GRID_SIZE_X = TM / THREADS_PER_BLOCK; GRID_SIZE_Y = TM; BLOCK_SIZE_X = THREADS_PER_BLOCK; /* definiton de la grille et des blocs */ dim3 grid(GRID_SIZE_X, GRID_SIZE_Y); dim3 block(BLOCK_SIZE_X); printf("taille grille : %d - %d \n", GRID_SIZE_X, GRID_SIZE_Y); printf("taille bloc : %d \n", BLOCK_SIZE_X); /* allocation des matrices sur CPU */ unsigned int msize_A = TM * TM * sizeof(float); unsigned int msize_B = TM * TM * sizeof(float); unsigned int msize_C = TM * TM * sizeof(float); float* h_A = (float*) malloc(msize_A); float* h_B = (float*) malloc(msize_B); float* h_C = (float*) malloc(msize_C); /* initialisation des matrices avec des valeurs permettant de verifier le resultat*/ for (i = 0; i < TM; i++){ for (j = 0; j < TM; j++){ h_A[i * TM + j] = 1.0; h_B[i * TM + j] = 1.0; h_C[i * TM + j] = 0.0; if (i == j) { h_A[i * TM + j] = (float) (i + 1); h_B[i * TM + j] = (float) (i + 1); } } } /* allocation des matrices sur GPU */ float *d_A; cudaMalloc((void**) &d_A, msize_A); float *d_B; cudaMalloc((void**) &d_B, msize_B); float *d_C; cudaMalloc((void**) &d_C, msize_C); /* mesure du temps : top depart */ cudaEventRecord(depart, 0); /* copie des matrives A et B depuis le CPU vers le GPU */ cudaMemcpy(d_A, h_A, msize_A, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, msize_B, cudaMemcpyHostToDevice); cudaMemcpy(d_C, h_C, msize_C, cudaMemcpyHostToDevice); /* lancement des threads */ matrixMul<<< grid, block >>>(d_C, d_A, d_B, TM); /* Recuperation valeur de retour GPU */ cerror = cudaGetLastError(); printf(" retour %d \n", (int) cerror); /* copie de la matrice C depuis le GPU */ cudaMemcpy(h_C, d_C, msize_C, cudaMemcpyDeviceToHost); /* mesure du temps */ cudaEventRecord(arret, 0); cudaEventSynchronize(arret); cudaEventElapsedTime(&tc, depart, arret); printf("Temps calcul : %f seconde\n", tc / 1000.0); /* verification du resultat */ for (i = 0; i < TM; i++) { for (j = 0; j < TM; j++) { if ((i == j) && (h_C[i * TM + j] != (float)((i + 1) * (i + 1) + TM - 1))) { printf("Erreur i: %d j: %d %f\n", i, j, h_C[i * TM + j] ); exit(1); } else if ((i != j) && (h_C[i * TM + j] != (float)(i + j + TM))) { printf("Erreur i: %d j: %d\n", i, j); exit(1); } } } /* liberation de la memoire */ free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaEventDestroy(depart); cudaEventDestroy(arret); }
2211bcd5831e8396c76ab699bd041aa1be6b9a69.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sReduceSingle(int *idata,int *single,unsigned int ncols) { int i; unsigned int tid = threadIdx.x; extern __shared__ int sdata[]; unsigned int startPos = blockDim.x + threadIdx.x; int colsPerThread = ncols/blockDim.x; int myPart = 0; for(i=0;i<colsPerThread;i++) { myPart+=idata[startPos+i]; } sdata[tid]=myPart; __syncthreads(); unsigned int s; for(s=blockDim.x/2;s>0;s>>=1) { if(tid<s) { sdata[tid] += sdata[tid+s]; } __syncthreads(); } if(tid==0)*single=sdata[0]; }
2211bcd5831e8396c76ab699bd041aa1be6b9a69.cu
#include "includes.h" __global__ void sReduceSingle(int *idata,int *single,unsigned int ncols) { int i; unsigned int tid = threadIdx.x; extern __shared__ int sdata[]; unsigned int startPos = blockDim.x + threadIdx.x; int colsPerThread = ncols/blockDim.x; int myPart = 0; for(i=0;i<colsPerThread;i++) { myPart+=idata[startPos+i]; } sdata[tid]=myPart; __syncthreads(); unsigned int s; for(s=blockDim.x/2;s>0;s>>=1) { if(tid<s) { sdata[tid] += sdata[tid+s]; } __syncthreads(); } if(tid==0)*single=sdata[0]; }
28e937bda20123680f866382bbb3017bb05d4ca1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdint.h> #include <stdio.h> #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ hipGetErrorString(error)); \ exit(1); \ } \ } struct GpuTimer { hipEvent_t start; hipEvent_t stop; GpuTimer() { hipEventCreate(&start); hipEventCreate(&stop); } ~GpuTimer() { hipEventDestroy(start); hipEventDestroy(stop); } void Start() { hipEventRecord(start, 0); hipEventSynchronize(start); } void Stop() { hipEventRecord(stop, 0); } float Elapsed() { float elapsed; hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); return elapsed; } }; void printArray(uint32_t *a, int n) { for (int i = 0; i < n; i++) printf("%i ", a[i]); printf("\n"); } // Sequential radix sort // Assume: nBits (k in slides) in {1, 2, 4, 8, 16} void sortByHost(const uint32_t *in, int n, uint32_t *out, int nBits) { int nBins = 1 << nBits; // 2^nBits int *hist = (int *)malloc(nBins * sizeof(int)); int *histScan = (int *)malloc(nBins * sizeof(int)); // In each counting sort, we sort data in "src" and write result to "dst" // Then, we swap these 2 pointers and go to the next counting sort // At first, we assign "src = in" and "dest = out" // However, the data pointed by "in" is read-only // --> we create a copy of this data and assign "src" to the address of this // copy uint32_t *src = (uint32_t *)malloc(n * sizeof(uint32_t)); memcpy(src, in, n * sizeof(uint32_t)); uint32_t *originalSrc = src; // Use originalSrc to free memory later uint32_t *dst = out; // Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit) // (Each digit consists of nBits bits) // In each loop, sort elements according to the current digit // (using STABLE counting sort) for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { // TODO: Compute "hist" of the current digit memset(hist, 0, nBins * sizeof(int)); for (int i = 0; i < n; ++i) { int bin = (src[i] >> bit) & (nBins - 1); hist[bin]++; } // TODO: Scan "hist" (exclusively) and save the result to "histScan" histScan[0] = 0; for (int bin = 1; bin < nBins; ++bin) { histScan[bin] = histScan[bin - 1] + hist[bin - 1]; } // TODO: From "histScan", scatter elements in "src" to correct locations in // "dst" for (int i = 0; i < n; ++i) { int bin = (src[i] >> bit) & (nBins - 1); dst[histScan[bin]] = src[i]; histScan[bin]++; } // TODO: Swap "src" and "dst" uint32_t *temp = src; src = dst; dst = temp; } // TODO: Copy result to "out" if (src != out) { memcpy(out, src, n * sizeof(uint32_t)); } // Free memories free(hist); free(histScan); free(originalSrc); } __global__ void computeHistKernel(uint32_t *in, int n, int *hist, int nBins, int bit) { // TODO extern __shared__ int s_hist[]; const int i = blockIdx.x * blockDim.x + threadIdx.x; for (int s_i = threadIdx.x; s_i < nBins; s_i += blockDim.x) { s_hist[s_i] = 0; } __syncthreads(); // Each block computes its local hist using atomic on SMEM if (i < n) { int bin = (in[i] >> bit) & (nBins - 1); atomicAdd(&s_hist[bin], 1); } __syncthreads(); // Each block adds its local hist to global hist using atomic on GMEM for (int s_i = threadIdx.x; s_i < nBins; s_i += blockDim.x) { atomicAdd(&hist[s_i], s_hist[s_i]); } } __global__ void scanBlkKernel(uint32_t *in, int n, uint32_t *out, uint32_t *blkSums, int bit) { // TODO int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n) { return; } extern __shared__ uint32_t s_in[]; s_in[threadIdx.x] = (in[i] >> bit) & 1; __syncthreads(); for (int stride = 1; stride < blockDim.x; stride *= 2) { int strideVal; if (threadIdx.x >= stride) { strideVal = s_in[threadIdx.x - stride]; } __syncthreads(); if (threadIdx.x >= stride) { s_in[threadIdx.x] += strideVal; } __syncthreads(); } if (blkSums && threadIdx.x == blockDim.x - 1) { blkSums[blockIdx.x] = s_in[threadIdx.x]; } out[i] = s_in[threadIdx.x]; } // TODO: You can define necessary functions here __global__ void addBlkSums(uint32_t *in, int n, uint32_t *blkSums) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n) { return; } in[i] += blkSums[blockIdx.x]; } __global__ void scatter(const uint32_t *in, int n, const uint32_t *inScan, uint32_t *out, int bit) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n) { return; } int rank; if ((in[i] >> bit) & 1) { const int nZeros = n - inScan[n - 1] - ((in[n - 1] >> bit) & 1); rank = nZeros + inScan[i]; } else { rank = i - inScan[i]; } out[rank] = in[i]; } // (Partially) Parallel radix sort: implement parallel histogram and parallel // scan in counting sort Assume: nBits (k in slides) in {1, 2, 4, 8, 16} Why // "int * blockSizes"? Because we may want different block sizes for diffrent // kernels: // blockSizes[0] for the histogram kernel // blockSizes[1] for the scan kernel void sortByDevice(const uint32_t *in, int n, uint32_t *out, int nBits, int *blockSizes) { // TODO dim3 scanBlockSize(blockSizes[1]); int scanBlockCount = (n - 1) / scanBlockSize.x + 1; dim3 scanGridSize(scanBlockCount); size_t scanSmemSize = scanBlockSize.x * sizeof(uint32_t); uint32_t *d_in; uint32_t *d_out; uint32_t *d_inScan; uint32_t *d_blkSums; CHECK(hipMalloc(&d_in, n * sizeof(uint32_t))); CHECK(hipMalloc(&d_out, n * sizeof(uint32_t))); CHECK(hipMalloc(&d_inScan, n * sizeof(uint32_t))); CHECK(hipMalloc(&d_blkSums, scanBlockCount * sizeof(uint32_t))); uint32_t *blkSums = (uint32_t *)malloc(scanBlockCount * sizeof(uint32_t)); CHECK(hipMemcpy(d_in, in, n * sizeof(uint32_t), hipMemcpyHostToDevice)); for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { // scan each block CHECK(hipMemset(d_inScan, 0, sizeof(uint32_t))); hipLaunchKernelGGL(( scanBlkKernel), dim3(scanGridSize), dim3(scanBlockSize), scanSmemSize, 0, d_in, n - 1, d_inScan + 1, d_blkSums, bit); CHECK(hipPeekAtLastError()); // scan block sums CHECK(hipMemcpy(blkSums, d_blkSums, scanBlockCount * sizeof(uint32_t), hipMemcpyDeviceToHost)); for (int i = 1; i < scanBlockCount; ++i) { blkSums[i] = blkSums[i - 1] + blkSums[i]; } CHECK(hipMemcpy(d_blkSums, blkSums, (scanBlockCount - 1) * sizeof(uint32_t), hipMemcpyHostToDevice)); // add scanned block sums to 2nd+ block hipLaunchKernelGGL(( addBlkSums), dim3(scanGridSize), dim3(scanBlockSize), 0, 0, d_inScan + scanBlockSize.x + 1, n - scanBlockSize.x - 1, d_blkSums); CHECK(hipPeekAtLastError()); // scatter hipLaunchKernelGGL(( scatter), dim3(scanGridSize), dim3(scanBlockSize), 0, 0, d_in, n, d_inScan, d_out, bit); CHECK(hipPeekAtLastError()); // Swap "src" and "dst" uint32_t *temp = d_in; d_in = d_out; d_out = temp; } // Copy result to "out" CHECK(hipMemcpy(out, d_in, n * sizeof(uint32_t), hipMemcpyDeviceToHost)); CHECK(hipFree(d_in)); CHECK(hipFree(d_out)); CHECK(hipFree(d_inScan)); CHECK(hipFree(d_blkSums)); free(blkSums); } // Radix sort void sort(const uint32_t *in, int n, uint32_t *out, int nBits, bool useDevice = false, int *blockSizes = NULL) { GpuTimer timer; timer.Start(); if (useDevice == false) { printf("\nRadix sort by host\n"); sortByHost(in, n, out, nBits); } else // use device { printf("\nRadix sort by device\n"); sortByDevice(in, n, out, nBits, blockSizes); } timer.Stop(); printf("Time: %.3f ms\n", timer.Elapsed()); } void printDeviceInfo() { hipDeviceProp_t devProv; CHECK(hipGetDeviceProperties(&devProv, 0)); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %zu byte\n", devProv.totalGlobalMem); printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor); printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock); printf("****************************\n"); } void checkCorrectness(uint32_t *out, uint32_t *correctOut, int n) { for (int i = 0; i < n; i++) { if (out[i] != correctOut[i]) { printf("INCORRECT :(\n"); return; } } printf("CORRECT :)\n"); } int main(int argc, char **argv) { // PRINT OUT DEVICE INFO printDeviceInfo(); // SET UP INPUT SIZE int n = (1 << 24) + 1; /* n = 600; */ printf("\nInput size: %d\n", n); // ALLOCATE MEMORIES size_t bytes = n * sizeof(uint32_t); uint32_t *in = (uint32_t *)malloc(bytes); uint32_t *out = (uint32_t *)malloc(bytes); // Device result uint32_t *correctOut = (uint32_t *)malloc(bytes); // Host result // SET UP INPUT DATA for (int i = 0; i < n; i++) in[i] = rand(); // SET UP NBITS int nBits = 4; // Default if (argc > 1) nBits = atoi(argv[1]); printf("\nNum bits per digit: %d\n", nBits); // DETERMINE BLOCK SIZES int blockSizes[2] = {512, 512}; // One for histogram, one for scan if (argc == 4) { blockSizes[0] = atoi(argv[2]); blockSizes[1] = atoi(argv[3]); } printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]); // SORT BY HOST sort(in, n, correctOut, nBits); nBits = 1; // SORT BY DEVICE sort(in, n, out, nBits, true, blockSizes); checkCorrectness(out, correctOut, n); // FREE MEMORIES free(in); free(out); free(correctOut); return EXIT_SUCCESS; }
28e937bda20123680f866382bbb3017bb05d4ca1.cu
#include <stdint.h> #include <stdio.h> #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } struct GpuTimer { cudaEvent_t start; cudaEvent_t stop; GpuTimer() { cudaEventCreate(&start); cudaEventCreate(&stop); } ~GpuTimer() { cudaEventDestroy(start); cudaEventDestroy(stop); } void Start() { cudaEventRecord(start, 0); cudaEventSynchronize(start); } void Stop() { cudaEventRecord(stop, 0); } float Elapsed() { float elapsed; cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); return elapsed; } }; void printArray(uint32_t *a, int n) { for (int i = 0; i < n; i++) printf("%i ", a[i]); printf("\n"); } // Sequential radix sort // Assume: nBits (k in slides) in {1, 2, 4, 8, 16} void sortByHost(const uint32_t *in, int n, uint32_t *out, int nBits) { int nBins = 1 << nBits; // 2^nBits int *hist = (int *)malloc(nBins * sizeof(int)); int *histScan = (int *)malloc(nBins * sizeof(int)); // In each counting sort, we sort data in "src" and write result to "dst" // Then, we swap these 2 pointers and go to the next counting sort // At first, we assign "src = in" and "dest = out" // However, the data pointed by "in" is read-only // --> we create a copy of this data and assign "src" to the address of this // copy uint32_t *src = (uint32_t *)malloc(n * sizeof(uint32_t)); memcpy(src, in, n * sizeof(uint32_t)); uint32_t *originalSrc = src; // Use originalSrc to free memory later uint32_t *dst = out; // Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit) // (Each digit consists of nBits bits) // In each loop, sort elements according to the current digit // (using STABLE counting sort) for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { // TODO: Compute "hist" of the current digit memset(hist, 0, nBins * sizeof(int)); for (int i = 0; i < n; ++i) { int bin = (src[i] >> bit) & (nBins - 1); hist[bin]++; } // TODO: Scan "hist" (exclusively) and save the result to "histScan" histScan[0] = 0; for (int bin = 1; bin < nBins; ++bin) { histScan[bin] = histScan[bin - 1] + hist[bin - 1]; } // TODO: From "histScan", scatter elements in "src" to correct locations in // "dst" for (int i = 0; i < n; ++i) { int bin = (src[i] >> bit) & (nBins - 1); dst[histScan[bin]] = src[i]; histScan[bin]++; } // TODO: Swap "src" and "dst" uint32_t *temp = src; src = dst; dst = temp; } // TODO: Copy result to "out" if (src != out) { memcpy(out, src, n * sizeof(uint32_t)); } // Free memories free(hist); free(histScan); free(originalSrc); } __global__ void computeHistKernel(uint32_t *in, int n, int *hist, int nBins, int bit) { // TODO extern __shared__ int s_hist[]; const int i = blockIdx.x * blockDim.x + threadIdx.x; for (int s_i = threadIdx.x; s_i < nBins; s_i += blockDim.x) { s_hist[s_i] = 0; } __syncthreads(); // Each block computes its local hist using atomic on SMEM if (i < n) { int bin = (in[i] >> bit) & (nBins - 1); atomicAdd(&s_hist[bin], 1); } __syncthreads(); // Each block adds its local hist to global hist using atomic on GMEM for (int s_i = threadIdx.x; s_i < nBins; s_i += blockDim.x) { atomicAdd(&hist[s_i], s_hist[s_i]); } } __global__ void scanBlkKernel(uint32_t *in, int n, uint32_t *out, uint32_t *blkSums, int bit) { // TODO int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n) { return; } extern __shared__ uint32_t s_in[]; s_in[threadIdx.x] = (in[i] >> bit) & 1; __syncthreads(); for (int stride = 1; stride < blockDim.x; stride *= 2) { int strideVal; if (threadIdx.x >= stride) { strideVal = s_in[threadIdx.x - stride]; } __syncthreads(); if (threadIdx.x >= stride) { s_in[threadIdx.x] += strideVal; } __syncthreads(); } if (blkSums && threadIdx.x == blockDim.x - 1) { blkSums[blockIdx.x] = s_in[threadIdx.x]; } out[i] = s_in[threadIdx.x]; } // TODO: You can define necessary functions here __global__ void addBlkSums(uint32_t *in, int n, uint32_t *blkSums) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n) { return; } in[i] += blkSums[blockIdx.x]; } __global__ void scatter(const uint32_t *in, int n, const uint32_t *inScan, uint32_t *out, int bit) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n) { return; } int rank; if ((in[i] >> bit) & 1) { const int nZeros = n - inScan[n - 1] - ((in[n - 1] >> bit) & 1); rank = nZeros + inScan[i]; } else { rank = i - inScan[i]; } out[rank] = in[i]; } // (Partially) Parallel radix sort: implement parallel histogram and parallel // scan in counting sort Assume: nBits (k in slides) in {1, 2, 4, 8, 16} Why // "int * blockSizes"? Because we may want different block sizes for diffrent // kernels: // blockSizes[0] for the histogram kernel // blockSizes[1] for the scan kernel void sortByDevice(const uint32_t *in, int n, uint32_t *out, int nBits, int *blockSizes) { // TODO dim3 scanBlockSize(blockSizes[1]); int scanBlockCount = (n - 1) / scanBlockSize.x + 1; dim3 scanGridSize(scanBlockCount); size_t scanSmemSize = scanBlockSize.x * sizeof(uint32_t); uint32_t *d_in; uint32_t *d_out; uint32_t *d_inScan; uint32_t *d_blkSums; CHECK(cudaMalloc(&d_in, n * sizeof(uint32_t))); CHECK(cudaMalloc(&d_out, n * sizeof(uint32_t))); CHECK(cudaMalloc(&d_inScan, n * sizeof(uint32_t))); CHECK(cudaMalloc(&d_blkSums, scanBlockCount * sizeof(uint32_t))); uint32_t *blkSums = (uint32_t *)malloc(scanBlockCount * sizeof(uint32_t)); CHECK(cudaMemcpy(d_in, in, n * sizeof(uint32_t), cudaMemcpyHostToDevice)); for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { // scan each block CHECK(cudaMemset(d_inScan, 0, sizeof(uint32_t))); scanBlkKernel<<<scanGridSize, scanBlockSize, scanSmemSize>>>( d_in, n - 1, d_inScan + 1, d_blkSums, bit); CHECK(cudaPeekAtLastError()); // scan block sums CHECK(cudaMemcpy(blkSums, d_blkSums, scanBlockCount * sizeof(uint32_t), cudaMemcpyDeviceToHost)); for (int i = 1; i < scanBlockCount; ++i) { blkSums[i] = blkSums[i - 1] + blkSums[i]; } CHECK(cudaMemcpy(d_blkSums, blkSums, (scanBlockCount - 1) * sizeof(uint32_t), cudaMemcpyHostToDevice)); // add scanned block sums to 2nd+ block addBlkSums<<<scanGridSize, scanBlockSize>>>( d_inScan + scanBlockSize.x + 1, n - scanBlockSize.x - 1, d_blkSums); CHECK(cudaPeekAtLastError()); // scatter scatter<<<scanGridSize, scanBlockSize>>>(d_in, n, d_inScan, d_out, bit); CHECK(cudaPeekAtLastError()); // Swap "src" and "dst" uint32_t *temp = d_in; d_in = d_out; d_out = temp; } // Copy result to "out" CHECK(cudaMemcpy(out, d_in, n * sizeof(uint32_t), cudaMemcpyDeviceToHost)); CHECK(cudaFree(d_in)); CHECK(cudaFree(d_out)); CHECK(cudaFree(d_inScan)); CHECK(cudaFree(d_blkSums)); free(blkSums); } // Radix sort void sort(const uint32_t *in, int n, uint32_t *out, int nBits, bool useDevice = false, int *blockSizes = NULL) { GpuTimer timer; timer.Start(); if (useDevice == false) { printf("\nRadix sort by host\n"); sortByHost(in, n, out, nBits); } else // use device { printf("\nRadix sort by device\n"); sortByDevice(in, n, out, nBits, blockSizes); } timer.Stop(); printf("Time: %.3f ms\n", timer.Elapsed()); } void printDeviceInfo() { cudaDeviceProp devProv; CHECK(cudaGetDeviceProperties(&devProv, 0)); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %zu byte\n", devProv.totalGlobalMem); printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor); printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock); printf("****************************\n"); } void checkCorrectness(uint32_t *out, uint32_t *correctOut, int n) { for (int i = 0; i < n; i++) { if (out[i] != correctOut[i]) { printf("INCORRECT :(\n"); return; } } printf("CORRECT :)\n"); } int main(int argc, char **argv) { // PRINT OUT DEVICE INFO printDeviceInfo(); // SET UP INPUT SIZE int n = (1 << 24) + 1; /* n = 600; */ printf("\nInput size: %d\n", n); // ALLOCATE MEMORIES size_t bytes = n * sizeof(uint32_t); uint32_t *in = (uint32_t *)malloc(bytes); uint32_t *out = (uint32_t *)malloc(bytes); // Device result uint32_t *correctOut = (uint32_t *)malloc(bytes); // Host result // SET UP INPUT DATA for (int i = 0; i < n; i++) in[i] = rand(); // SET UP NBITS int nBits = 4; // Default if (argc > 1) nBits = atoi(argv[1]); printf("\nNum bits per digit: %d\n", nBits); // DETERMINE BLOCK SIZES int blockSizes[2] = {512, 512}; // One for histogram, one for scan if (argc == 4) { blockSizes[0] = atoi(argv[2]); blockSizes[1] = atoi(argv[3]); } printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]); // SORT BY HOST sort(in, n, correctOut, nBits); nBits = 1; // SORT BY DEVICE sort(in, n, out, nBits, true, blockSizes); checkCorrectness(out, correctOut, n); // FREE MEMORIES free(in); free(out); free(correctOut); return EXIT_SUCCESS; }
1b1b51e5d203bbcaf31f65e95281e1f5ce80d4ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void scatterSum(int N, float *input, float *output){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= N) return; for(int j=0;j<N;++j){ atomicAdd(output+j, input[i]); // if(i<N/2) atomicAdd(output+j, input[i]); // atomicAdd(output+j, i<N/2: input[i]: 0.); } return; }
1b1b51e5d203bbcaf31f65e95281e1f5ce80d4ac.cu
#include "includes.h" __global__ void scatterSum(int N, float *input, float *output){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= N) return; for(int j=0;j<N;++j){ atomicAdd(output+j, input[i]); // if(i<N/2) atomicAdd(output+j, input[i]); // atomicAdd(output+j, i<N/2: input[i]: 0.); } return; }
42e6d80acf67cc7971af5f8ec179818ee315d6c5.hip
// !!! This is a file automatically generated by hipify!!! #include <THH/THHTensor.hpp> #include <THH/THHStorage.hpp> #include <THH/generic/THHTensor.hip> #include <THH/THHGenerateAllTypes.h>
42e6d80acf67cc7971af5f8ec179818ee315d6c5.cu
#include <THC/THCTensor.hpp> #include <THC/THCStorage.hpp> #include <THC/generic/THCTensor.cu> #include <THC/THCGenerateAllTypes.h>
6cb6d721d911b371f050e5ccf26a4e9c9409bfdb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include "util.h" #include "ref_2dhisto.h" #include "opt_2dhisto.h" unsigned int* d_int_histo; uint8_t* d_histo; uint32_t* d_input; __global__ void histogram_kernel(uint32_t *buff, long size, unsigned int *histo){ //start index into buffer int index = threadIdx.x + blockIdx.x * blockDim.x; //stride is however many threads we have running int stride = blockDim.x * gridDim.x; //memory coalescing __shared__ unsigned int histo_private[HISTO_WIDTH]; if (threadIdx.x < HISTO_WIDTH) histo_private[threadIdx.x] = 0; __syncthreads(); //use private bins to work from shared memory, not global while(index < size){ if (histo_private[buff[index]] < UINT8_MAXIMUM){ //Don't waste time in atomic add if not necessary atomicAdd(&(histo_private[buff[index]]), 1); index += stride; } } __syncthreads(); //sum up private bins if(threadIdx.x < HISTO_WIDTH){ if (histo[threadIdx.x] < UINT8_MAXIMUM){ //Don't waste time in atomic add if not necessary atomicAdd(&(histo[threadIdx.x]), histo_private[threadIdx.x]); } } } __global__ void convert_int2uint8(unsigned int *int_histo, uint8_t *histo){ int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < HISTO_HEIGHT*HISTO_WIDTH){ if (int_histo[index]>UINT8_MAXIMUM){ histo[index] = UINT8_MAXIMUM; }else{ histo[index] = (uint8_t)int_histo[index]; } } } void opt_2dhisto(size_t height, size_t width, uint8_t bins[HISTO_HEIGHT*HISTO_WIDTH]) { long input_size = height*width; int histo_size = HISTO_HEIGHT*HISTO_WIDTH; //init bins to zero memset(bins, 0, histo_size*sizeof(bins[0])); hipMemset(d_histo, 0, histo_size*sizeof(uint8_t)); hipMemset(d_int_histo, 0, histo_size*sizeof(unsigned int)); dim3 dimGrid(ceil((float)input_size/(float)BLOCK_SIZE),1,1); dim3 dimBlock(BLOCK_SIZE,1,1); hipLaunchKernelGGL(( histogram_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input,input_size,d_int_histo); hipDeviceSynchronize(); dimGrid.x = (ceil((float)histo_size/(float)BLOCK_SIZE),1,1); hipLaunchKernelGGL(( convert_int2uint8), dim3(dimGrid),dim3(dimBlock), 0, 0, d_int_histo,d_histo); hipDeviceSynchronize(); CopyBinsFromDeviceArray(bins,HISTO_HEIGHT,HISTO_WIDTH,d_histo); } void initData(uint32_t *input[], size_t height, size_t width){ //copy input long input_size = height*width; int histo_size = HISTO_HEIGHT*HISTO_WIDTH; hipMalloc((void**)&d_input, input_size*sizeof(uint32_t)); for(int i = 0; i < height; ++i){ //copy each row of input array to device CopyInputToDeviceArray((d_input+i*width), width, input[i]); } //uint8_t histogram for output hipMalloc((void**)&d_histo, histo_size*sizeof(uint8_t)); //int histogram for calculation. atomicadd works with this hipMalloc((void**)&d_int_histo, histo_size*sizeof(unsigned int)); } void destructData(){ hipFree(d_histo); hipFree(d_int_histo); hipFree(d_input); } /* Include below the implementation of any other functions you need */ //copy data (input) from the host to our device void CopyInputToDeviceArray(uint32_t* Adevice, size_t width, uint32_t* Ahost){ int size = width * sizeof(uint32_t); hipMemcpy(Adevice, Ahost, size, hipMemcpyHostToDevice); } //copy data back from device to our host void CopyBinsFromDeviceArray( uint8_t* Ahost, size_t height, size_t width,uint8_t* Adevice){ int size = width * height * sizeof(uint8_t); hipMemcpy(Ahost, Adevice, size, hipMemcpyDeviceToHost); }
6cb6d721d911b371f050e5ccf26a4e9c9409bfdb.cu
#include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include "util.h" #include "ref_2dhisto.h" #include "opt_2dhisto.h" unsigned int* d_int_histo; uint8_t* d_histo; uint32_t* d_input; __global__ void histogram_kernel(uint32_t *buff, long size, unsigned int *histo){ //start index into buffer int index = threadIdx.x + blockIdx.x * blockDim.x; //stride is however many threads we have running int stride = blockDim.x * gridDim.x; //memory coalescing __shared__ unsigned int histo_private[HISTO_WIDTH]; if (threadIdx.x < HISTO_WIDTH) histo_private[threadIdx.x] = 0; __syncthreads(); //use private bins to work from shared memory, not global while(index < size){ if (histo_private[buff[index]] < UINT8_MAXIMUM){ //Don't waste time in atomic add if not necessary atomicAdd(&(histo_private[buff[index]]), 1); index += stride; } } __syncthreads(); //sum up private bins if(threadIdx.x < HISTO_WIDTH){ if (histo[threadIdx.x] < UINT8_MAXIMUM){ //Don't waste time in atomic add if not necessary atomicAdd(&(histo[threadIdx.x]), histo_private[threadIdx.x]); } } } __global__ void convert_int2uint8(unsigned int *int_histo, uint8_t *histo){ int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < HISTO_HEIGHT*HISTO_WIDTH){ if (int_histo[index]>UINT8_MAXIMUM){ histo[index] = UINT8_MAXIMUM; }else{ histo[index] = (uint8_t)int_histo[index]; } } } void opt_2dhisto(size_t height, size_t width, uint8_t bins[HISTO_HEIGHT*HISTO_WIDTH]) { long input_size = height*width; int histo_size = HISTO_HEIGHT*HISTO_WIDTH; //init bins to zero memset(bins, 0, histo_size*sizeof(bins[0])); cudaMemset(d_histo, 0, histo_size*sizeof(uint8_t)); cudaMemset(d_int_histo, 0, histo_size*sizeof(unsigned int)); dim3 dimGrid(ceil((float)input_size/(float)BLOCK_SIZE),1,1); dim3 dimBlock(BLOCK_SIZE,1,1); histogram_kernel<<<dimGrid,dimBlock>>>(d_input,input_size,d_int_histo); cudaDeviceSynchronize(); dimGrid.x = (ceil((float)histo_size/(float)BLOCK_SIZE),1,1); convert_int2uint8<<<dimGrid,dimBlock>>>(d_int_histo,d_histo); cudaDeviceSynchronize(); CopyBinsFromDeviceArray(bins,HISTO_HEIGHT,HISTO_WIDTH,d_histo); } void initData(uint32_t *input[], size_t height, size_t width){ //copy input long input_size = height*width; int histo_size = HISTO_HEIGHT*HISTO_WIDTH; cudaMalloc((void**)&d_input, input_size*sizeof(uint32_t)); for(int i = 0; i < height; ++i){ //copy each row of input array to device CopyInputToDeviceArray((d_input+i*width), width, input[i]); } //uint8_t histogram for output cudaMalloc((void**)&d_histo, histo_size*sizeof(uint8_t)); //int histogram for calculation. atomicadd works with this cudaMalloc((void**)&d_int_histo, histo_size*sizeof(unsigned int)); } void destructData(){ cudaFree(d_histo); cudaFree(d_int_histo); cudaFree(d_input); } /* Include below the implementation of any other functions you need */ //copy data (input) from the host to our device void CopyInputToDeviceArray(uint32_t* Adevice, size_t width, uint32_t* Ahost){ int size = width * sizeof(uint32_t); cudaMemcpy(Adevice, Ahost, size, cudaMemcpyHostToDevice); } //copy data back from device to our host void CopyBinsFromDeviceArray( uint8_t* Ahost, size_t height, size_t width,uint8_t* Adevice){ int size = width * height * sizeof(uint8_t); cudaMemcpy(Ahost, Adevice, size, cudaMemcpyDeviceToHost); }
85df4180bc23fa3aaea42dcbacb0202eb24ea943.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" #define TILE_WIDTH 16 // Compute C = A * B __global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { __shared__ float ds_A[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_B[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; float cValue = 0; for (int t = 0; t < (numAColumns - 1) / TILE_WIDTH + 1; t++) { if (row < numARows && t * TILE_WIDTH + tx < numAColumns) { ds_A[ty][tx] = A[row * numAColumns + t * TILE_WIDTH + tx]; } else { ds_A[ty][tx] = 0.0; } if (t * TILE_WIDTH + ty < numBRows && col < numBColumns) { ds_B[ty][tx] = B[(t * TILE_WIDTH + ty) * numBColumns + col]; } else { ds_B[ty][tx] = 0.0; } __syncthreads(); for (int i = 0; i < TILE_WIDTH; i++) { cValue += ds_A[ty][i] * ds_B[i][tx]; } __syncthreads(); } if (row < numCRows && col < numCColumns) { C[row * numCColumns + col] = cValue; } }
85df4180bc23fa3aaea42dcbacb0202eb24ea943.cu
extern "C" #define TILE_WIDTH 16 // Compute C = A * B __global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { __shared__ float ds_A[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_B[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; float cValue = 0; for (int t = 0; t < (numAColumns - 1) / TILE_WIDTH + 1; t++) { if (row < numARows && t * TILE_WIDTH + tx < numAColumns) { ds_A[ty][tx] = A[row * numAColumns + t * TILE_WIDTH + tx]; } else { ds_A[ty][tx] = 0.0; } if (t * TILE_WIDTH + ty < numBRows && col < numBColumns) { ds_B[ty][tx] = B[(t * TILE_WIDTH + ty) * numBColumns + col]; } else { ds_B[ty][tx] = 0.0; } __syncthreads(); for (int i = 0; i < TILE_WIDTH; i++) { cValue += ds_A[ty][i] * ds_B[i][tx]; } __syncthreads(); } if (row < numCRows && col < numCColumns) { C[row * numCColumns + col] = cValue; } }
7477acbcaffff9abdc9fd5b73c0280c2bba7100f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // -------------------------------------------------------- // Fast R-CNN // Copyright (c) Microsoft. All rights reserved. // Written by Ross Girshick, 2015. // Licensed under the BSD 2-clause "Simplified" license. // See LICENSE in the Fast R-CNN project root for license // information. // -------------------------------------------------------- #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; int roi_start_w = round(bottom_rois[1] * spatial_scale); int roi_start_h = round(bottom_rois[2] * spatial_scale); int roi_end_w = round(bottom_rois[3] * spatial_scale); int roi_end_h = round(bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename Dtype> void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_w = round(offset_bottom_rois[1] * spatial_scale); int roi_start_h = round(offset_bottom_rois[2] * spatial_scale); int roi_end_w = round(offset_bottom_rois[3] * spatial_scale); int roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const int* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) { gradient += offset_top_diff[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* argmax_data = max_idx_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer); } // namespace caffe
7477acbcaffff9abdc9fd5b73c0280c2bba7100f.cu
// -------------------------------------------------------- // Fast R-CNN // Copyright (c) Microsoft. All rights reserved. // Written by Ross Girshick, 2015. // Licensed under the BSD 2-clause "Simplified" license. // See LICENSE in the Fast R-CNN project root for license // information. // -------------------------------------------------------- #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; int roi_start_w = round(bottom_rois[1] * spatial_scale); int roi_start_h = round(bottom_rois[2] * spatial_scale); int roi_end_w = round(bottom_rois[3] * spatial_scale); int roi_end_h = round(bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename Dtype> void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) ROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_w = round(offset_bottom_rois[1] * spatial_scale); int roi_start_h = round(offset_bottom_rois[2] * spatial_scale); int roi_end_w = round(offset_bottom_rois[3] * spatial_scale); int roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const int* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) { gradient += offset_top_diff[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* argmax_data = max_idx_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) ROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer); } // namespace caffe
177b23f709e896a114beb74b656303b0dc6bc426.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <fstream> #include <chrono> typedef unsigned int uint; void GenInput(int size, int num_steps); void Process_GPU( bool* field_in, bool* field_out, uint field_size, uint num_steps ); __global__ void ProcessCell_GPU( bool* field_in, bool* field_out, int field_size ); void Process_CPU( bool* field_in, bool* field_out, uint field_size, uint num_steps ); void ProcessCell_CPU( bool* field_in, bool* field_out, uint i, int field_size ); void WriteResults( bool* out, uint field_size, const std::string& name ); int main(int argc, char* argv[]) { GenInput(100, 500); std::ifstream f( "input.txt" ); if( !f ) { std::cout << "Cannot open 'input.txt'" << std::endl; return -1; } uint field_size = 0; f >> field_size; uint num_steps = 0; f >> num_steps; uint cells_count = field_size*field_size; bool* field_in = new bool[cells_count]; for( uint i = 0; i < cells_count; ++i ) f >> field_in[i]; bool* field_out_gpu = new bool[cells_count]; bool* field_out_cpu = new bool[cells_count]; std::chrono::time_point<std::chrono::steady_clock> t; std::chrono::microseconds delta; std::cout << "Starting GPU simulation..." << std::endl; t = std::chrono::steady_clock::now(); Process_GPU( field_in, field_out_gpu, field_size, num_steps ); delta = std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::steady_clock::now() - t ); std::cout << "GPU time: " << delta.count() << " microseconds" << std::endl; std::cout << "----------------------------------------" << std::endl; std::cout << "Starting CPU single thread simulation..." << std::endl; t = std::chrono::steady_clock::now(); Process_CPU( field_in, field_out_cpu, field_size, num_steps ); delta = std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::steady_clock::now() - t ); std::cout << "CPU time: " << delta.count() << " microseconds" << std::endl; for( uint i = 0; i < cells_count; ++i ) if( field_out_cpu[i] != field_out_gpu[i] ) { std::cout << "Validation fail" << std::endl; return -1; } WriteResults( field_out_cpu, field_size, "result.txt" ); delete[] field_in; delete[] field_out_gpu; delete[] field_out_cpu; return 0; } void GenInput(int size, int num_steps) { std::ofstream f( "input.txt" ); f << size << ' ' << num_steps << '\n'; for( int i = 0; i < size; ++i ) { for( int j = 0; j < size; ++j ) { f << rand() % 2 << ' '; } f << '\n'; } } void Process_GPU( bool* field_in, bool* field_out, uint field_size, uint num_steps ) { uint cells_count = field_size*field_size; size_t array_size = sizeof( bool ) * cells_count; bool* fields[2]; hipMalloc( (void**)( &fields[0] ), array_size ); hipMalloc( (void**)( &fields[1] ), array_size ); hipMemcpy( fields[0], field_in, array_size, hipMemcpyHostToDevice ); const int threads_per_block = 100; int in, out; for( uint i = 0; i < num_steps; ++i ) { in = i % 2; out = ( i + 1 ) % 2; hipLaunchKernelGGL(( ProcessCell_GPU) , dim3(cells_count / threads_per_block), dim3(threads_per_block) , 0, 0, fields[in], fields[out], field_size ); hipMemcpy( fields[in], fields[out], array_size, hipMemcpyDeviceToDevice ); } hipMemcpy( field_out, fields[in], array_size, hipMemcpyDeviceToHost ); hipFree( fields[0] ); hipFree( fields[1] ); } __global__ void ProcessCell_GPU( bool* field_in, bool* field_out, int field_size ) { uint i = blockIdx.x * blockDim.x + threadIdx.x; int x = i % field_size; int y = i / field_size; int neighbours_count = 0; for( int cx = x - 1; cx <= x + 1; ++cx ) for( int cy = y - 1; cy <= y + 1; ++cy ) { if( cx == x && cy == y ) continue; int nx = cx; int ny = cy; if( nx < 0 ) nx = field_size - 1; if( nx >= field_size ) nx = 0; if( ny < 0 ) ny = field_size - 1; if( ny >= field_size ) ny = 0; neighbours_count += (int)field_in[nx + ny * field_size]; } if( field_in[i] ) { if( neighbours_count == 2 || neighbours_count == 3 ) field_out[i] = true; else field_out[i] = false; } else { if( neighbours_count == 3 ) field_out[i] = true; else field_out[i] = false; } } void Process_CPU( bool* field_in, bool* field_out, uint field_size, uint num_steps ) { uint cells_count = field_size*field_size; size_t array_size = sizeof( bool ) * cells_count; bool* fields[2]; fields[0] = new bool[cells_count]; fields[1] = new bool[cells_count]; memcpy( fields[0], field_in, array_size ); int in, out; for( uint i = 0; i < num_steps; ++i ) { in = i % 2; out = ( i + 1 ) % 2; for( uint j = 0; j < cells_count; ++j ) ProcessCell_CPU( fields[in], fields[out], j, field_size ); memcpy( fields[in], fields[out], array_size ); } memcpy( field_out, fields[out], array_size ); delete[] fields[0]; delete[] fields[1]; } void ProcessCell_CPU( bool* field_in, bool* field_out, uint i, int field_size ) { int x = i % field_size; int y = i / field_size; int neighbours_count = 0; for( int cx = x - 1; cx <= x + 1; ++cx ) for( int cy = y - 1; cy <= y + 1; ++cy ) { if( cx == x && cy == y ) continue; int nx = cx; int ny = cy; if( nx < 0 ) nx = field_size - 1; if( nx >= field_size ) nx = 0; if( ny < 0 ) ny = field_size - 1; if( ny >= field_size ) ny = 0; neighbours_count += (int)field_in[nx + ny * field_size]; } if( field_in[i] ) { if( neighbours_count == 2 || neighbours_count == 3 ) field_out[i] = true; else field_out[i] = false; } else { if( neighbours_count == 3 ) field_out[i] = true; else field_out[i] = false; } } void WriteResults( bool* out, uint field_size, const std::string& name ) { std::ofstream of( name ); for( uint i = 0; i < field_size; ++i ) { for( uint j = 0; j < field_size; ++j ) of << out[i * field_size + j] << ' '; of << '\n'; } }
177b23f709e896a114beb74b656303b0dc6bc426.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <fstream> #include <chrono> typedef unsigned int uint; void GenInput(int size, int num_steps); void Process_GPU( bool* field_in, bool* field_out, uint field_size, uint num_steps ); __global__ void ProcessCell_GPU( bool* field_in, bool* field_out, int field_size ); void Process_CPU( bool* field_in, bool* field_out, uint field_size, uint num_steps ); void ProcessCell_CPU( bool* field_in, bool* field_out, uint i, int field_size ); void WriteResults( bool* out, uint field_size, const std::string& name ); int main(int argc, char* argv[]) { GenInput(100, 500); std::ifstream f( "input.txt" ); if( !f ) { std::cout << "Cannot open 'input.txt'" << std::endl; return -1; } uint field_size = 0; f >> field_size; uint num_steps = 0; f >> num_steps; uint cells_count = field_size*field_size; bool* field_in = new bool[cells_count]; for( uint i = 0; i < cells_count; ++i ) f >> field_in[i]; bool* field_out_gpu = new bool[cells_count]; bool* field_out_cpu = new bool[cells_count]; std::chrono::time_point<std::chrono::steady_clock> t; std::chrono::microseconds delta; std::cout << "Starting GPU simulation..." << std::endl; t = std::chrono::steady_clock::now(); Process_GPU( field_in, field_out_gpu, field_size, num_steps ); delta = std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::steady_clock::now() - t ); std::cout << "GPU time: " << delta.count() << " microseconds" << std::endl; std::cout << "----------------------------------------" << std::endl; std::cout << "Starting CPU single thread simulation..." << std::endl; t = std::chrono::steady_clock::now(); Process_CPU( field_in, field_out_cpu, field_size, num_steps ); delta = std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::steady_clock::now() - t ); std::cout << "CPU time: " << delta.count() << " microseconds" << std::endl; for( uint i = 0; i < cells_count; ++i ) if( field_out_cpu[i] != field_out_gpu[i] ) { std::cout << "Validation fail" << std::endl; return -1; } WriteResults( field_out_cpu, field_size, "result.txt" ); delete[] field_in; delete[] field_out_gpu; delete[] field_out_cpu; return 0; } void GenInput(int size, int num_steps) { std::ofstream f( "input.txt" ); f << size << ' ' << num_steps << '\n'; for( int i = 0; i < size; ++i ) { for( int j = 0; j < size; ++j ) { f << rand() % 2 << ' '; } f << '\n'; } } void Process_GPU( bool* field_in, bool* field_out, uint field_size, uint num_steps ) { uint cells_count = field_size*field_size; size_t array_size = sizeof( bool ) * cells_count; bool* fields[2]; cudaMalloc( (void**)( &fields[0] ), array_size ); cudaMalloc( (void**)( &fields[1] ), array_size ); cudaMemcpy( fields[0], field_in, array_size, cudaMemcpyHostToDevice ); const int threads_per_block = 100; int in, out; for( uint i = 0; i < num_steps; ++i ) { in = i % 2; out = ( i + 1 ) % 2; ProcessCell_GPU <<< cells_count / threads_per_block, threads_per_block >>> ( fields[in], fields[out], field_size ); cudaMemcpy( fields[in], fields[out], array_size, cudaMemcpyDeviceToDevice ); } cudaMemcpy( field_out, fields[in], array_size, cudaMemcpyDeviceToHost ); cudaFree( fields[0] ); cudaFree( fields[1] ); } __global__ void ProcessCell_GPU( bool* field_in, bool* field_out, int field_size ) { uint i = blockIdx.x * blockDim.x + threadIdx.x; int x = i % field_size; int y = i / field_size; int neighbours_count = 0; for( int cx = x - 1; cx <= x + 1; ++cx ) for( int cy = y - 1; cy <= y + 1; ++cy ) { if( cx == x && cy == y ) continue; int nx = cx; int ny = cy; if( nx < 0 ) nx = field_size - 1; if( nx >= field_size ) nx = 0; if( ny < 0 ) ny = field_size - 1; if( ny >= field_size ) ny = 0; neighbours_count += (int)field_in[nx + ny * field_size]; } if( field_in[i] ) { if( neighbours_count == 2 || neighbours_count == 3 ) field_out[i] = true; else field_out[i] = false; } else { if( neighbours_count == 3 ) field_out[i] = true; else field_out[i] = false; } } void Process_CPU( bool* field_in, bool* field_out, uint field_size, uint num_steps ) { uint cells_count = field_size*field_size; size_t array_size = sizeof( bool ) * cells_count; bool* fields[2]; fields[0] = new bool[cells_count]; fields[1] = new bool[cells_count]; memcpy( fields[0], field_in, array_size ); int in, out; for( uint i = 0; i < num_steps; ++i ) { in = i % 2; out = ( i + 1 ) % 2; for( uint j = 0; j < cells_count; ++j ) ProcessCell_CPU( fields[in], fields[out], j, field_size ); memcpy( fields[in], fields[out], array_size ); } memcpy( field_out, fields[out], array_size ); delete[] fields[0]; delete[] fields[1]; } void ProcessCell_CPU( bool* field_in, bool* field_out, uint i, int field_size ) { int x = i % field_size; int y = i / field_size; int neighbours_count = 0; for( int cx = x - 1; cx <= x + 1; ++cx ) for( int cy = y - 1; cy <= y + 1; ++cy ) { if( cx == x && cy == y ) continue; int nx = cx; int ny = cy; if( nx < 0 ) nx = field_size - 1; if( nx >= field_size ) nx = 0; if( ny < 0 ) ny = field_size - 1; if( ny >= field_size ) ny = 0; neighbours_count += (int)field_in[nx + ny * field_size]; } if( field_in[i] ) { if( neighbours_count == 2 || neighbours_count == 3 ) field_out[i] = true; else field_out[i] = false; } else { if( neighbours_count == 3 ) field_out[i] = true; else field_out[i] = false; } } void WriteResults( bool* out, uint field_size, const std::string& name ) { std::ofstream of( name ); for( uint i = 0; i < field_size; ++i ) { for( uint j = 0; j < field_size; ++j ) of << out[i * field_size + j] << ' '; of << '\n'; } }
a6fdfe0ca2a068ad9140afa7e1068f3cb259a74c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> #include <R.h> #include "mstnrUtils.h" void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } }
a6fdfe0ca2a068ad9140afa7e1068f3cb259a74c.cu
#include <stdio.h> #include <assert.h> #include <cuda.h> #include <R.h> #include "mstnrUtils.h" void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
e16cfb726ccb01a85bb588c87a61fe1fe62018ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zmdot.cu normal z -> s, Sat Nov 15 19:54:21 2014 @author Hartwig Anzt */ #include "common_magma.h" #define BLOCK_SIZE 256 #define PRECISION_s // initialize arrays with zero __global__ void magma_sgpumemzero( magmaFloat_ptr d, int n, int k ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i < n ){ for( int j=0; j<k; j++) d[ i+j*n ] = MAGMA_S_MAKE( 0.0, 0.0 ); } } // dot product __global__ void magma_sdot_kernel( int Gs, int n, magmaFloat_ptr v, magmaFloat_ptr r, magmaFloat_ptr vtmp) { extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_S_MAKE( 0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // dot product for multiple vectors __global__ void magma_sblockdot_kernel( int Gs, int n, int k, magmaFloat_ptr v, magmaFloat_ptr r, magmaFloat_ptr vtmp) { extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // k vectors v(i) if (i<n){ for( j=0; j<k; j++) temp[Idx+j*blockDim.x] = v[i+j*n] * r[i]; } else{ for( j=0; j<k; j++) temp[Idx+j*blockDim.x] =MAGMA_S_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for multiple vectors __global__ void magma_sblockreduce_kernel( int Gs, int n, int k, magmaFloat_ptr vtmp, magmaFloat_ptr vtmp2 ) { extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ] : MAGMA_S_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // accelerated reduction for one vector __global__ void magma_sreduce_kernel_fast( int Gs, int n, magmaFloat_ptr vtmp, magmaFloat_ptr vtmp2 ){ extern __shared__ float temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_S_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_S_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // accelerated block reduction for multiple vectors __global__ void magma_sblockreduce_kernel_fast( int Gs, int n, int k, magmaFloat_ptr vtmp, magmaFloat_ptr vtmp2 ) { extern __shared__ float temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<k; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_S_MAKE( 0.0, 0.0); while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_S_MAKE( 0.0, 0.0); i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of vectors v_i such that skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaFloat_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaFloat_ptr r @param[in] d1 magmaFloat_ptr workspace @param[in] d2 magmaFloat_ptr workspace @param[out] skp magmaFloat_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_smdotc( int n, int k, magmaFloat_ptr v, magmaFloat_ptr r, magmaFloat_ptr d1, magmaFloat_ptr d2, magmaFloat_ptr skp, magma_queue_t queue ) { // set queue for old dense routines magma_queue_t orig_queue; magmablasGetKernelStream( &orig_queue ); int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( (n+local_block_size-1)/local_block_size ); dim3 Gs_next; int Ms = (k)* (local_block_size) * sizeof( float ); // k vecs magmaFloat_ptr aux1 = d1, aux2 = d2; int b = 1; if (k>1) { hipLaunchKernelGGL(( magma_sblockdot_kernel), dim3(Gs), dim3(Bs), Ms, 0, Gs.x, n, k, v, r, d1 ); } else { hipLaunchKernelGGL(( magma_sdot_kernel), dim3(Gs), dim3(Bs), Ms, 0, Gs.x, n, v, r, d1 ); } /* // not necessary to zero GPU mem magma_sgpumemzero<<<Gs, Bs, 0>>>( d1, n*k,1 ); magma_sgpumemzero<<<Gs, Bs, 0>>>( d2, n*k,1 ); //magmablas_slaset( MagmaUpperLower, n, k, d1, n ); //magmablas_slaset( MagmaUpperLower, n, k, d2, n ); while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; magma_sblockreduce_kernel<<< Gs_next.x, Bs.x, Ms >>> ( Gs.x, n, k, aux1, aux2 ); Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } for( int j=0; j<k; j++) { magma_scopyvector( 1, aux1+j*n, 1, skp+j, 1 ); } */ if ( k>1) { while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_sblockreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0, Gs.x, n, k, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } } else { while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_sreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0, Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } } for( int j=0; j<k; j++) { magma_scopyvector( 1, aux1+j*n, 1, skp+j, 1 ); } magmablasSetKernelStream( orig_queue ); return MAGMA_SUCCESS; } /** Purpose ------- This is an extension of the merged dot product above by chunking the set of vectors v_i such that the data always fits into cache. It is equivalent to a matrix vecor product Vr where V contains few rows and many columns. The computation is the same: skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaFloat_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaFloat_ptr r @param[in] d1 magmaFloat_ptr workspace @param[in] d2 magmaFloat_ptr workspace @param[out] skp magmaFloat_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_s ********************************************************************/ extern "C" magma_int_t magma_sgemvmdot( int n, int k, magmaFloat_ptr v, magmaFloat_ptr r, magmaFloat_ptr d1, magmaFloat_ptr d2, magmaFloat_ptr skp, magma_queue_t queue ) { int rows_left = k; int offset = 0; int chunk_size = 4; // process in chunks of 10 - has to be adapted to hardware and precision while( rows_left > (chunk_size) ) { magma_smdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue ); offset = offset + chunk_size; rows_left = rows_left-chunk_size; } // process rest magma_smdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue ); return MAGMA_SUCCESS; }
e16cfb726ccb01a85bb588c87a61fe1fe62018ab.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zmdot.cu normal z -> s, Sat Nov 15 19:54:21 2014 @author Hartwig Anzt */ #include "common_magma.h" #define BLOCK_SIZE 256 #define PRECISION_s // initialize arrays with zero __global__ void magma_sgpumemzero( magmaFloat_ptr d, int n, int k ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i < n ){ for( int j=0; j<k; j++) d[ i+j*n ] = MAGMA_S_MAKE( 0.0, 0.0 ); } } // dot product __global__ void magma_sdot_kernel( int Gs, int n, magmaFloat_ptr v, magmaFloat_ptr r, magmaFloat_ptr vtmp) { extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_S_MAKE( 0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // dot product for multiple vectors __global__ void magma_sblockdot_kernel( int Gs, int n, int k, magmaFloat_ptr v, magmaFloat_ptr r, magmaFloat_ptr vtmp) { extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // k vectors v(i) if (i<n){ for( j=0; j<k; j++) temp[Idx+j*blockDim.x] = v[i+j*n] * r[i]; } else{ for( j=0; j<k; j++) temp[Idx+j*blockDim.x] =MAGMA_S_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for multiple vectors __global__ void magma_sblockreduce_kernel( int Gs, int n, int k, magmaFloat_ptr vtmp, magmaFloat_ptr vtmp2 ) { extern __shared__ float temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ] : MAGMA_S_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // accelerated reduction for one vector __global__ void magma_sreduce_kernel_fast( int Gs, int n, magmaFloat_ptr vtmp, magmaFloat_ptr vtmp2 ){ extern __shared__ float temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_S_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_S_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // accelerated block reduction for multiple vectors __global__ void magma_sblockreduce_kernel_fast( int Gs, int n, int k, magmaFloat_ptr vtmp, magmaFloat_ptr vtmp2 ) { extern __shared__ float temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<k; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_S_MAKE( 0.0, 0.0); while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_S_MAKE( 0.0, 0.0); i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of vectors v_i such that skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaFloat_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaFloat_ptr r @param[in] d1 magmaFloat_ptr workspace @param[in] d2 magmaFloat_ptr workspace @param[out] skp magmaFloat_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_smdotc( int n, int k, magmaFloat_ptr v, magmaFloat_ptr r, magmaFloat_ptr d1, magmaFloat_ptr d2, magmaFloat_ptr skp, magma_queue_t queue ) { // set queue for old dense routines magma_queue_t orig_queue; magmablasGetKernelStream( &orig_queue ); int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( (n+local_block_size-1)/local_block_size ); dim3 Gs_next; int Ms = (k)* (local_block_size) * sizeof( float ); // k vecs magmaFloat_ptr aux1 = d1, aux2 = d2; int b = 1; if (k>1) { magma_sblockdot_kernel<<<Gs, Bs, Ms>>>( Gs.x, n, k, v, r, d1 ); } else { magma_sdot_kernel<<<Gs, Bs, Ms>>>( Gs.x, n, v, r, d1 ); } /* // not necessary to zero GPU mem magma_sgpumemzero<<<Gs, Bs, 0>>>( d1, n*k,1 ); magma_sgpumemzero<<<Gs, Bs, 0>>>( d2, n*k,1 ); //magmablas_slaset( MagmaUpperLower, n, k, d1, n ); //magmablas_slaset( MagmaUpperLower, n, k, d2, n ); while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; magma_sblockreduce_kernel<<< Gs_next.x, Bs.x, Ms >>> ( Gs.x, n, k, aux1, aux2 ); Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } for( int j=0; j<k; j++) { magma_scopyvector( 1, aux1+j*n, 1, skp+j, 1 ); } */ if ( k>1) { while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_sblockreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2 >>> ( Gs.x, n, k, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } } else { while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_sreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2 >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } } for( int j=0; j<k; j++) { magma_scopyvector( 1, aux1+j*n, 1, skp+j, 1 ); } magmablasSetKernelStream( orig_queue ); return MAGMA_SUCCESS; } /** Purpose ------- This is an extension of the merged dot product above by chunking the set of vectors v_i such that the data always fits into cache. It is equivalent to a matrix vecor product Vr where V contains few rows and many columns. The computation is the same: skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaFloat_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaFloat_ptr r @param[in] d1 magmaFloat_ptr workspace @param[in] d2 magmaFloat_ptr workspace @param[out] skp magmaFloat_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_s ********************************************************************/ extern "C" magma_int_t magma_sgemvmdot( int n, int k, magmaFloat_ptr v, magmaFloat_ptr r, magmaFloat_ptr d1, magmaFloat_ptr d2, magmaFloat_ptr skp, magma_queue_t queue ) { int rows_left = k; int offset = 0; int chunk_size = 4; // process in chunks of 10 - has to be adapted to hardware and precision while( rows_left > (chunk_size) ) { magma_smdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue ); offset = offset + chunk_size; rows_left = rows_left-chunk_size; } // process rest magma_smdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue ); return MAGMA_SUCCESS; }
c5eac41c73fe3d7766d1ecc26d09468fd290dca8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "nanoTime.h" __global__ void productAB(int N, float *a, float *b, float *c); __global__ void sumAB(int N, float *c, float *d); __global__ void test(int N, float *c, float *d); int main (void) { printf("2x2matrixMult_v0\n"); // Initialize parameters int N = 2<<12; size_t bytes = N*sizeof(float); // Declare Host variables float *a, *b, *c, *d; // Declare Device variables float *d_a, *d_b, *d_c, *d_d; // Allocate Host variables a = (float*)malloc(bytes); b = (float*)malloc(bytes); c = (float*)malloc(bytes); d = (float*)malloc(bytes); // Allocate Device variables hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); hipMalloc(&d_d, bytes); // Set Host variables for (int i=0; i<N; i++) { a[i] = 1.0f; b[i] = 5.0f; } // Set Device variables hipMemcpy(d_a, a, bytes, hipMemcpyHostToDevice); hipMemcpy(d_b, b, bytes, hipMemcpyHostToDevice); hipMemcpy(d_c, c, bytes, hipMemcpyHostToDevice); hipMemcpy(d_d, d, bytes, hipMemcpyHostToDevice); // Initialize Host Event Handeling struct timespec vartime; float time_elapsed_nanos; // Initialize Device Event Handeling hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); ///////////////////////////////////////////////////////// //// GPU Calculation ////////////////////////////////// dim3 threadsPerBlock(8,8); dim3 numBlocks(N/threadsPerBlock.x,N/threadsPerBlock.y); hipLaunchKernelGGL(( productAB), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, N, d_a, d_b, d_c); hipLaunchKernelGGL(( sumAB), dim3(1),dim3(4), 0, 0, N, d_c, d_d); float milli0 = 0; float micro0 = 0; float milli1 = 0; float micro1 = 0; float milli2 = 0; float micro2 = 0; hipStream_t stream0, stream1, stream2; hipStreamCreate(&stream0); hipStreamCreate(&stream1); hipStreamCreate(&stream2); hipEventRecord(start,stream0); hipLaunchKernelGGL(( productAB), dim3(numBlocks),dim3(threadsPerBlock),0,stream0, N, d_a, d_b, d_c); hipEventRecord(stop,stream0); hipStreamSynchronize(stream0); hipEventElapsedTime(&milli0, start, stop); micro0 = milli0*1000; hipEventRecord(start,stream1); hipLaunchKernelGGL(( sumAB), dim3(32),dim3(100),0,stream1, N, d_c, d_d); hipEventRecord(stop,stream1); hipStreamSynchronize(stream1); hipEventElapsedTime(&milli1, start, stop); micro1 = milli1*1000; hipEventRecord(start,stream2); hipLaunchKernelGGL(( test), dim3(1),dim3(4),0,stream2, N, d_c, d_d); hipEventRecord(stop,stream2); hipStreamSynchronize(stream2); hipEventElapsedTime(&milli2, start, stop); micro2 = (milli2*1000); hipEventDestroy(start); hipEventDestroy(stop); hipStreamDestroy(stream0); hipStreamDestroy(stream1); hipStreamDestroy(stream2); vartime = timer_start(); hipLaunchKernelGGL(( productAB), dim3(1),dim3(8), 0, 0, N, d_a, d_b, d_c); hipLaunchKernelGGL(( sumAB), dim3(1),dim3(4), 0, 0, N, d_c, d_d); time_elapsed_nanos = timer_end(vartime); float time_elapsed_micro = time_elapsed_nanos/1000; ///////////////////////////////////////////////////////// ///////////////////////////////////////////////////////// // Get Device variables hipMemcpy(d, d_d, bytes, hipMemcpyDeviceToHost); // Results (GPU) printf("GPU Calculation:\n"); //printf("%f %f\n", d[0], d[1]); //printf("%f %f\n", d[2], d[3]); printf("Kernel Prod Elapsed Time (microseconds): %f\n", micro0); printf("Kernel Sum Elapsed Time (microseconds): %f\n", micro1); printf("Kernel Test Elapsed Time (microseconds): %f\n", micro2); ///////////////////////////////////////////////////////// //// CPU Calculation ////////////////////////////////// //vartime = timer_start(); for (int i=0; i<8; i++) c[i] = a[i]*b[i]; for (int i=0; i<4; i++) d[i] = c[i]+c[i+4]; //time_elapsed_nanos = timer_end(vartime); //float time_elapsed_micro = time_elapsed_nanos/1000; ///////////////////////////////////////////////////////// ///////////////////////////////////////////////////////// // Results (CPU) //printf("CPU Calculation:\n"); //printf("%f %f\n", d[0], d[1]); //printf("%f %f\n", d[2], d[3]); //printf("Elapsed Time (microseconds): %f\n", time_elapsed_micro); // Deallocate Host variables free(a); free(b); free(c); free(d); // Deallocate Device variables hipFree(d_a); hipFree(d_b); hipFree(d_c); hipFree(d_d); return 0; } __global__ void productAB(int N, float *a, float *b, float *c) { //int i = threadIdx.x; int i = (blockIdx.x*blockDim.x) + threadIdx.x; int j = (blockIdx.y*blockDim.y) + threadIdx.y; if (i<N) c[i] = a[i]*b[j]; } __global__ void sumAB(int N, float *c, float *d) { int i = threadIdx.x; if (i<N) d[i] = c[i]+c[i+(N/2)]; } __global__ void test(int N, float *c, float *d) { int i = threadIdx.x; if (i<N) d[i] = (1/(c[i]+3)); }
c5eac41c73fe3d7766d1ecc26d09468fd290dca8.cu
#include <stdio.h> #include "nanoTime.h" __global__ void productAB(int N, float *a, float *b, float *c); __global__ void sumAB(int N, float *c, float *d); __global__ void test(int N, float *c, float *d); int main (void) { printf("2x2matrixMult_v0\n"); // Initialize parameters int N = 2<<12; size_t bytes = N*sizeof(float); // Declare Host variables float *a, *b, *c, *d; // Declare Device variables float *d_a, *d_b, *d_c, *d_d; // Allocate Host variables a = (float*)malloc(bytes); b = (float*)malloc(bytes); c = (float*)malloc(bytes); d = (float*)malloc(bytes); // Allocate Device variables cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); cudaMalloc(&d_d, bytes); // Set Host variables for (int i=0; i<N; i++) { a[i] = 1.0f; b[i] = 5.0f; } // Set Device variables cudaMemcpy(d_a, a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_d, d, bytes, cudaMemcpyHostToDevice); // Initialize Host Event Handeling struct timespec vartime; float time_elapsed_nanos; // Initialize Device Event Handeling cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); ///////////////////////////////////////////////////////// //// GPU Calculation ////////////////////////////////// dim3 threadsPerBlock(8,8); dim3 numBlocks(N/threadsPerBlock.x,N/threadsPerBlock.y); productAB<<<numBlocks,threadsPerBlock>>>(N, d_a, d_b, d_c); sumAB<<<1,4>>>(N, d_c, d_d); float milli0 = 0; float micro0 = 0; float milli1 = 0; float micro1 = 0; float milli2 = 0; float micro2 = 0; cudaStream_t stream0, stream1, stream2; cudaStreamCreate(&stream0); cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); cudaEventRecord(start,stream0); productAB<<<numBlocks,threadsPerBlock,0,stream0>>>(N, d_a, d_b, d_c); cudaEventRecord(stop,stream0); cudaStreamSynchronize(stream0); cudaEventElapsedTime(&milli0, start, stop); micro0 = milli0*1000; cudaEventRecord(start,stream1); sumAB<<<32,100,0,stream1>>>(N, d_c, d_d); cudaEventRecord(stop,stream1); cudaStreamSynchronize(stream1); cudaEventElapsedTime(&milli1, start, stop); micro1 = milli1*1000; cudaEventRecord(start,stream2); test<<<1,4,0,stream2>>>(N, d_c, d_d); cudaEventRecord(stop,stream2); cudaStreamSynchronize(stream2); cudaEventElapsedTime(&milli2, start, stop); micro2 = (milli2*1000); cudaEventDestroy(start); cudaEventDestroy(stop); cudaStreamDestroy(stream0); cudaStreamDestroy(stream1); cudaStreamDestroy(stream2); vartime = timer_start(); productAB<<<1,8>>>(N, d_a, d_b, d_c); sumAB<<<1,4>>>(N, d_c, d_d); time_elapsed_nanos = timer_end(vartime); float time_elapsed_micro = time_elapsed_nanos/1000; ///////////////////////////////////////////////////////// ///////////////////////////////////////////////////////// // Get Device variables cudaMemcpy(d, d_d, bytes, cudaMemcpyDeviceToHost); // Results (GPU) printf("GPU Calculation:\n"); //printf("%f %f\n", d[0], d[1]); //printf("%f %f\n", d[2], d[3]); printf("Kernel Prod Elapsed Time (microseconds): %f\n", micro0); printf("Kernel Sum Elapsed Time (microseconds): %f\n", micro1); printf("Kernel Test Elapsed Time (microseconds): %f\n", micro2); ///////////////////////////////////////////////////////// //// CPU Calculation ////////////////////////////////// //vartime = timer_start(); for (int i=0; i<8; i++) c[i] = a[i]*b[i]; for (int i=0; i<4; i++) d[i] = c[i]+c[i+4]; //time_elapsed_nanos = timer_end(vartime); //float time_elapsed_micro = time_elapsed_nanos/1000; ///////////////////////////////////////////////////////// ///////////////////////////////////////////////////////// // Results (CPU) //printf("CPU Calculation:\n"); //printf("%f %f\n", d[0], d[1]); //printf("%f %f\n", d[2], d[3]); //printf("Elapsed Time (microseconds): %f\n", time_elapsed_micro); // Deallocate Host variables free(a); free(b); free(c); free(d); // Deallocate Device variables cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFree(d_d); return 0; } __global__ void productAB(int N, float *a, float *b, float *c) { //int i = threadIdx.x; int i = (blockIdx.x*blockDim.x) + threadIdx.x; int j = (blockIdx.y*blockDim.y) + threadIdx.y; if (i<N) c[i] = a[i]*b[j]; } __global__ void sumAB(int N, float *c, float *d) { int i = threadIdx.x; if (i<N) d[i] = c[i]+c[i+(N/2)]; } __global__ void test(int N, float *c, float *d) { int i = threadIdx.x; if (i<N) d[i] = (1/(c[i]+3)); }
598a94c2456b2190e88828db9dc026170e071164.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#special-registers-globaltimer #include <stdint.h> extern "C" __global__ void get_global_timer( uint32_t *OUT32_LO, uint32_t *OUT32_HI, uint64_t *OUT64, uint64_t *EST_LATENCY) { auto id = blockDim.x * blockIdx.x + threadIdx.x; uint64_t t64; asm("mov.u64 %0,%globaltimer;" : "=l"(t64):); OUT64[id] = t64; const int LATENCY_SAMPLES = 2; uint64_t latency_sum = 0; uint64_t dummy = 0; for (int i = 0; i < LATENCY_SAMPLES; i++) { /* asm volatile( ".reg .u64 st, et;\n" "mov.u64 st, %clock64;\n" "mov.u64 %0,%globaltimer;\n" "mov.u64 et, %clock64;\n" "sub.u64 %0, st, et" : "=l"(t64), "=":); */ auto st = clock64(); asm volatile("mov.u64 %0,%globaltimer;" : "=l"(t64):); latency_sum += (uint64_t)(clock64() - st); dummy += t64; } if (dummy < 1) latency_sum += 1; EST_LATENCY[id] = latency_sum/LATENCY_SAMPLES; // TODO: test latency of global timer64 uint32_t t32_lo, t32_hi; asm("mov.u32 %0,%globaltimer_lo;" : "=r"(t32_lo):); asm("mov.u32 %0,%globaltimer_hi;" : "=r"(t32_hi):); OUT32_LO[id] = t32_lo; OUT32_HI[id] = t32_hi; } // get_smem_sizes<1,64,4*64>(A,B,TOT_SMEM,DYN_SMEM) extern "C" __global__ void get_smem_sizes( const uint32_t *A, uint32_t *B, uint32_t *TOT_SMEM, uint32_t *DYN_SMEM) { __shared__ uint32_t STILE[32]; extern __shared__ uint32_t DTILE[]; auto id = blockDim.x * blockIdx.x + threadIdx.x; STILE[threadIdx.x] = A[id]; DTILE[threadIdx.x] = A[id] + 1; __syncthreads(); uint32_t dsmem; asm("mov.u32 %0, %dynamic_smem_size;" : "=r"(dsmem)); DYN_SMEM[id] = dsmem; uint32_t tsmem; asm("mov.u32 %0, %total_smem_size;" : "=r"(tsmem)); TOT_SMEM[id] = tsmem; // use the SLM B[id] = STILE[(id+1) % (sizeof(STILE)/sizeof(STILE[0]))] + DTILE[(id+1) % (dsmem/sizeof(DTILE[0]))]; } // get_envs<1,32> extern "C" __global__ void get_envs( uint32_t *ENV) { auto id = blockDim.x * blockIdx.x + threadIdx.x; #define GET_ENV_REG(N)\ do {\ if (threadIdx.x == (N)) {\ asm("mov.u32 %0, %envreg" #N ";" : "=r"(ENV[id]));\ } \ } while (0) GET_ENV_REG(0); GET_ENV_REG(1); GET_ENV_REG(2); GET_ENV_REG(3); GET_ENV_REG(4); GET_ENV_REG(5); GET_ENV_REG(6); GET_ENV_REG(7); GET_ENV_REG(8); GET_ENV_REG(9); GET_ENV_REG(10); GET_ENV_REG(11); GET_ENV_REG(12); GET_ENV_REG(13); GET_ENV_REG(14); GET_ENV_REG(15); GET_ENV_REG(16); GET_ENV_REG(17); GET_ENV_REG(18); GET_ENV_REG(19); GET_ENV_REG(20); GET_ENV_REG(21); GET_ENV_REG(22); GET_ENV_REG(23); GET_ENV_REG(24); GET_ENV_REG(25); GET_ENV_REG(26); GET_ENV_REG(27); GET_ENV_REG(28); GET_ENV_REG(29); GET_ENV_REG(30); GET_ENV_REG(31); } extern "C" __global__ void get_pm32( uint32_t *PMs) { auto id = blockDim.x * blockIdx.x + threadIdx.x; if (threadIdx.x == 0) { asm("mov.u32 %0, %pm0;" : "=r"(PMs[id+0])); asm("mov.u32 %0, %pm1;" : "=r"(PMs[id+1])); asm("mov.u32 %0, %pm2;" : "=r"(PMs[id+2])); asm("mov.u32 %0, %pm3;" : "=r"(PMs[id+3])); asm("mov.u32 %0, %pm4;" : "=r"(PMs[id+4])); asm("mov.u32 %0, %pm5;" : "=r"(PMs[id+5])); asm("mov.u32 %0, %pm6;" : "=r"(PMs[id+6])); asm("mov.u32 %0, %pm7;" : "=r"(PMs[id+7])); } } extern "C" __global__ void get_pm64( uint64_t *PMs) { auto id = blockDim.x * blockIdx.x + threadIdx.x; if (threadIdx.x == 0) { asm("mov.u64 %0, %pm0_64;" : "=l"(PMs[id+0])); asm("mov.u64 %0, %pm1_64;" : "=l"(PMs[id+1])); asm("mov.u64 %0, %pm2_64;" : "=l"(PMs[id+2])); asm("mov.u64 %0, %pm3_64;" : "=l"(PMs[id+3])); asm("mov.u64 %0, %pm4_64;" : "=l"(PMs[id+4])); asm("mov.u64 %0, %pm5_64;" : "=l"(PMs[id+5])); asm("mov.u64 %0, %pm6_64;" : "=l"(PMs[id+6])); asm("mov.u64 %0, %pm7_64;" : "=l"(PMs[id+7])); } }
598a94c2456b2190e88828db9dc026170e071164.cu
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#special-registers-globaltimer #include <stdint.h> extern "C" __global__ void get_global_timer( uint32_t *OUT32_LO, uint32_t *OUT32_HI, uint64_t *OUT64, uint64_t *EST_LATENCY) { auto id = blockDim.x * blockIdx.x + threadIdx.x; uint64_t t64; asm("mov.u64 %0,%globaltimer;" : "=l"(t64):); OUT64[id] = t64; const int LATENCY_SAMPLES = 2; uint64_t latency_sum = 0; uint64_t dummy = 0; for (int i = 0; i < LATENCY_SAMPLES; i++) { /* asm volatile( ".reg .u64 st, et;\n" "mov.u64 st, %clock64;\n" "mov.u64 %0,%globaltimer;\n" "mov.u64 et, %clock64;\n" "sub.u64 %0, st, et" : "=l"(t64), "=":); */ auto st = clock64(); asm volatile("mov.u64 %0,%globaltimer;" : "=l"(t64):); latency_sum += (uint64_t)(clock64() - st); dummy += t64; } if (dummy < 1) latency_sum += 1; EST_LATENCY[id] = latency_sum/LATENCY_SAMPLES; // TODO: test latency of global timer64 uint32_t t32_lo, t32_hi; asm("mov.u32 %0,%globaltimer_lo;" : "=r"(t32_lo):); asm("mov.u32 %0,%globaltimer_hi;" : "=r"(t32_hi):); OUT32_LO[id] = t32_lo; OUT32_HI[id] = t32_hi; } // get_smem_sizes<1,64,4*64>(A,B,TOT_SMEM,DYN_SMEM) extern "C" __global__ void get_smem_sizes( const uint32_t *A, uint32_t *B, uint32_t *TOT_SMEM, uint32_t *DYN_SMEM) { __shared__ uint32_t STILE[32]; extern __shared__ uint32_t DTILE[]; auto id = blockDim.x * blockIdx.x + threadIdx.x; STILE[threadIdx.x] = A[id]; DTILE[threadIdx.x] = A[id] + 1; __syncthreads(); uint32_t dsmem; asm("mov.u32 %0, %dynamic_smem_size;" : "=r"(dsmem)); DYN_SMEM[id] = dsmem; uint32_t tsmem; asm("mov.u32 %0, %total_smem_size;" : "=r"(tsmem)); TOT_SMEM[id] = tsmem; // use the SLM B[id] = STILE[(id+1) % (sizeof(STILE)/sizeof(STILE[0]))] + DTILE[(id+1) % (dsmem/sizeof(DTILE[0]))]; } // get_envs<1,32> extern "C" __global__ void get_envs( uint32_t *ENV) { auto id = blockDim.x * blockIdx.x + threadIdx.x; #define GET_ENV_REG(N)\ do {\ if (threadIdx.x == (N)) {\ asm("mov.u32 %0, %envreg" #N ";" : "=r"(ENV[id]));\ } \ } while (0) GET_ENV_REG(0); GET_ENV_REG(1); GET_ENV_REG(2); GET_ENV_REG(3); GET_ENV_REG(4); GET_ENV_REG(5); GET_ENV_REG(6); GET_ENV_REG(7); GET_ENV_REG(8); GET_ENV_REG(9); GET_ENV_REG(10); GET_ENV_REG(11); GET_ENV_REG(12); GET_ENV_REG(13); GET_ENV_REG(14); GET_ENV_REG(15); GET_ENV_REG(16); GET_ENV_REG(17); GET_ENV_REG(18); GET_ENV_REG(19); GET_ENV_REG(20); GET_ENV_REG(21); GET_ENV_REG(22); GET_ENV_REG(23); GET_ENV_REG(24); GET_ENV_REG(25); GET_ENV_REG(26); GET_ENV_REG(27); GET_ENV_REG(28); GET_ENV_REG(29); GET_ENV_REG(30); GET_ENV_REG(31); } extern "C" __global__ void get_pm32( uint32_t *PMs) { auto id = blockDim.x * blockIdx.x + threadIdx.x; if (threadIdx.x == 0) { asm("mov.u32 %0, %pm0;" : "=r"(PMs[id+0])); asm("mov.u32 %0, %pm1;" : "=r"(PMs[id+1])); asm("mov.u32 %0, %pm2;" : "=r"(PMs[id+2])); asm("mov.u32 %0, %pm3;" : "=r"(PMs[id+3])); asm("mov.u32 %0, %pm4;" : "=r"(PMs[id+4])); asm("mov.u32 %0, %pm5;" : "=r"(PMs[id+5])); asm("mov.u32 %0, %pm6;" : "=r"(PMs[id+6])); asm("mov.u32 %0, %pm7;" : "=r"(PMs[id+7])); } } extern "C" __global__ void get_pm64( uint64_t *PMs) { auto id = blockDim.x * blockIdx.x + threadIdx.x; if (threadIdx.x == 0) { asm("mov.u64 %0, %pm0_64;" : "=l"(PMs[id+0])); asm("mov.u64 %0, %pm1_64;" : "=l"(PMs[id+1])); asm("mov.u64 %0, %pm2_64;" : "=l"(PMs[id+2])); asm("mov.u64 %0, %pm3_64;" : "=l"(PMs[id+3])); asm("mov.u64 %0, %pm4_64;" : "=l"(PMs[id+4])); asm("mov.u64 %0, %pm5_64;" : "=l"(PMs[id+5])); asm("mov.u64 %0, %pm6_64;" : "=l"(PMs[id+6])); asm("mov.u64 %0, %pm7_64;" : "=l"(PMs[id+7])); } }
42aa44e10c12fbed1313e2ac74696d30f4c634b5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <read_gauge.h> #include <gauge_field.h> #include "gauge_force_quda.h" #ifdef MULTI_GPU #include "face_quda.h" #endif namespace quda { #define GF_SITE_MATRIX_LOAD_TEX 1 //single precsison, 12-reconstruct #if (GF_SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX(siteLink0TexSingle_recon, dir, idx, var, gf.site_ga_stride) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX(siteLink1TexSingle_recon, dir, idx, var, gf.site_ga_stride) #else #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE(linkEven, dir, idx, var, gf.site_ga_stride) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE(linkOdd, dir, idx, var, gf.site_ga_stride) #endif #define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var, gf.mom_ga_stride) #define RECONSTRUCT_MATRIX(sign, var) RECONSTRUCT_LINK_12(sign,var) #define DECLARE_LINK_VARS(var) FloatN var##0, var##1, var##2, var##3, var##4 #define N_IN_FLOATN 4 #define GAUGE_FORCE_KERN_NAME parity_compute_gauge_force_kernel_sp12 #include "gauge_force_core.h" #undef LOAD_EVEN_MATRIX #undef LOAD_ODD_MATRIX #undef LOAD_ANTI_HERMITIAN #undef RECONSTRUCT_MATRIX #undef DECLARE_LINK_VARS #undef N_IN_FLOATN #undef GAUGE_FORCE_KERN_NAME //double precsison, 12-reconstruct #if (GF_SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE_TEX(siteLink0TexDouble, linkEven, dir, idx, var, gf.site_ga_stride) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE_TEX(siteLink1TexDouble, linkOdd, dir, idx, var, gf.site_ga_stride) #else #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE(linkEven, dir, idx, var, gf.site_ga_stride) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE(linkOdd, dir, idx, var, gf.site_ga_stride) #endif #define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var, gf.mom_ga_stride) #define RECONSTRUCT_MATRIX(sign, var) RECONSTRUCT_LINK_12(sign,var) #define DECLARE_LINK_VARS(var) FloatN var##0, var##1, var##2, var##3, var##4, var##5, var##6, var##7, var##8 #define N_IN_FLOATN 2 #define GAUGE_FORCE_KERN_NAME parity_compute_gauge_force_kernel_dp12 #include "gauge_force_core.h" #undef LOAD_EVEN_MATRIX #undef LOAD_ODD_MATRIX #undef LOAD_ANTI_HERMITIAN #undef RECONSTRUCT_MATRIX #undef DECLARE_LINK_VARS #undef N_IN_FLOATN #undef GAUGE_FORCE_KERN_NAME //single precision, 18-reconstruct #if (GF_SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_TEX(siteLink0TexSingle, dir, idx, var, gf.site_ga_stride) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_TEX(siteLink1TexSingle, dir, idx, var, gf.site_ga_stride) #else #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_18(linkEven, dir, idx, var, gf.site_ga_stride) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_18(linkOdd, dir, idx, var, gf.site_ga_stride) #endif #define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var,gf.mom_ga_stride) #define RECONSTRUCT_MATRIX(sign, var) #define DECLARE_LINK_VARS(var) FloatN var##0, var##1, var##2, var##3, var##4, var##5, var##6, var##7, var##8 #define N_IN_FLOATN 2 #define GAUGE_FORCE_KERN_NAME parity_compute_gauge_force_kernel_sp18 #include "gauge_force_core.h" #undef LOAD_EVEN_MATRIX #undef LOAD_ODD_MATRIX #undef LOAD_ANTI_HERMITIAN #undef RECONSTRUCT_MATRIX #undef DECLARE_LINK_VARS #undef N_IN_FLOATN #undef GAUGE_FORCE_KERN_NAME //double precision, 18-reconstruct #if (GF_SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_TEX(siteLink0TexDouble, linkEven, dir, idx, var, gf.site_ga_stride) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_TEX(siteLink1TexDouble, linkOdd, dir, idx, var, gf.site_ga_stride) #else #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_18(linkEven, dir, idx, var, gf.site_ga_stride) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_18(linkOdd, dir, idx, var, gf.site_ga_stride) #endif #define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var, gf.mom_ga_stride) #define RECONSTRUCT_MATRIX(sign, var) #define DECLARE_LINK_VARS(var) FloatN var##0, var##1, var##2, var##3, var##4, var##5, var##6, var##7, var##8 #define N_IN_FLOATN 2 #define GAUGE_FORCE_KERN_NAME parity_compute_gauge_force_kernel_dp18 #include "gauge_force_core.h" #undef LOAD_EVEN_MATRIX #undef LOAD_ODD_MATRIX #undef LOAD_ANTI_HERMITIAN #undef RECONSTRUCT_MATRIX #undef DECLARE_LINK_VARS #undef N_IN_FLOATN #undef GAUGE_FORCE_KERN_NAME void gauge_force_init_cuda(QudaGaugeParam* param, int path_max_length) { static int gauge_force_init_cuda_flag = 0; if (gauge_force_init_cuda_flag){ return; } gauge_force_init_cuda_flag=1; int* X = param->X; int Vh = X[0]*X[1]*X[2]*X[3]/2; fat_force_const_t gf_h; gf_h.path_max_length = path_max_length; #ifdef MULTI_GPU int Vh_ex = (X[0]+4)*(X[1]+4)*(X[2]+4)*(X[3]+4)/2; gf_h.site_ga_stride = param->site_ga_pad + Vh_ex; #else gf_h.site_ga_stride = param->site_ga_pad + Vh; #endif gf_h.mom_ga_stride = param->mom_ga_pad + Vh; hipMemcpyToSymbol(gf, &gf_h, sizeof(fat_force_const_t)); } class GaugeForceCuda : public Tunable { private: cudaGaugeField &mom; const int dir; const double &eb3; const cudaGaugeField &link; const int *input_path; const int *length; const void *path_coeff; const int num_paths; const kernel_param_t &kparam; int sharedBytesPerThread() const { return 0; } int sharedBytesPerBlock(const TuneParam &) const { return 0; } // don't tune the grid dimension bool advanceGridDim(TuneParam &param) const { return false; } bool advanceBlockDim(TuneParam &param) const { bool rtn = Tunable::advanceBlockDim(param); param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1); return rtn; } public: GaugeForceCuda(cudaGaugeField &mom, const int dir, const double &eb3, const cudaGaugeField &link, const int *input_path, const int *length, const void *path_coeff, const int num_paths, const kernel_param_t &kparam) : mom(mom), dir(dir), eb3(eb3), link(link), input_path(input_path), length(length), path_coeff(path_coeff), num_paths(num_paths), kparam(kparam) { if(link.Precision() == QUDA_DOUBLE_PRECISION){ hipBindTexture(0, siteLink0TexDouble, link.Even_p(), link.Bytes()/2); hipBindTexture(0, siteLink1TexDouble, link.Odd_p(), link.Bytes()/2); }else{ //QUDA_SINGLE_PRECISION if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){ hipBindTexture(0, siteLink0TexSingle, link.Even_p(), link.Bytes()/2); hipBindTexture(0, siteLink1TexSingle, link.Odd_p(), link.Bytes()/2); }else{//QUDA_RECONSTRUCT_12 hipBindTexture(0, siteLink0TexSingle_recon, link.Even_p(), link.Bytes()/2); hipBindTexture(0, siteLink1TexSingle_recon, link.Odd_p(), link.Bytes()/2); } } } virtual ~GaugeForceCuda() { if(link.Precision() == QUDA_DOUBLE_PRECISION){ hipBindTexture(0, siteLink0TexDouble, link.Even_p(), link.Bytes()/2); hipBindTexture(0, siteLink1TexDouble, link.Odd_p(), link.Bytes()/2); }else{ //QUDA_SINGLE_PRECISION if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){ hipBindTexture(0, siteLink0TexSingle, link.Even_p(), link.Bytes()/2); hipBindTexture(0, siteLink1TexSingle, link.Odd_p(), link.Bytes()/2); }else{//QUDA_RECONSTRUCT_12 hipBindTexture(0, siteLink0TexSingle_recon, link.Even_p(), link.Bytes()/2); hipBindTexture(0, siteLink1TexSingle_recon, link.Odd_p(), link.Bytes()/2); } } } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, dslashTuning, verbosity); if(link.Precision() == QUDA_DOUBLE_PRECISION){ if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){ hipLaunchKernelGGL(( parity_compute_gauge_force_kernel_dp18<0>), dim3(tp.grid), dim3(tp.block), 0, 0, (double2*)mom.Even_p(), (double2*)mom.Odd_p(), dir, eb3, (double2*)link.Even_p(), (double2*)link.Odd_p(), input_path, length, (double*)path_coeff, num_paths, kparam); hipLaunchKernelGGL(( parity_compute_gauge_force_kernel_dp18<1>), dim3(tp.grid), dim3(tp.block), 0, 0, (double2*)mom.Even_p(), (double2*)mom.Odd_p(), dir, eb3, (double2*)link.Even_p(), (double2*)link.Odd_p(), input_path, length, (double*)path_coeff, num_paths, kparam); }else{ //QUDA_RECONSTRUCT_12 hipLaunchKernelGGL(( parity_compute_gauge_force_kernel_dp12<0>), dim3(tp.grid), dim3(tp.block), 0, 0, (double2*)mom.Even_p(), (double2*)mom.Odd_p(), dir, eb3, (double2*)link.Even_p(), (double2*)link.Odd_p(), input_path, length, (double*)path_coeff, num_paths, kparam); hipLaunchKernelGGL(( parity_compute_gauge_force_kernel_dp12<1>), dim3(tp.grid), dim3(tp.block), 0, 0, (double2*)mom.Even_p(), (double2*)mom.Odd_p(), dir, eb3, (double2*)link.Even_p(), (double2*)link.Odd_p(), input_path, length, (double*)path_coeff, num_paths, kparam); } }else{ //QUDA_SINGLE_PRECISION if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){ hipLaunchKernelGGL(( parity_compute_gauge_force_kernel_sp18<0>), dim3(tp.grid), dim3(tp.block), 0, 0, (float2*)mom.Even_p(), (float2*)mom.Odd_p(), dir, eb3, (float2*)link.Even_p(), (float2*)link.Odd_p(), input_path, length, (float*)path_coeff, num_paths, kparam); hipLaunchKernelGGL(( parity_compute_gauge_force_kernel_sp18<1>), dim3(tp.grid), dim3(tp.block), 0, 0, (float2*)mom.Even_p(), (float2*)mom.Odd_p(), dir, eb3, (float2*)link.Even_p(), (float2*)link.Odd_p(), input_path, length, (float*)path_coeff, num_paths, kparam); }else{ //QUDA_RECONSTRUCT_12 hipLaunchKernelGGL(( parity_compute_gauge_force_kernel_sp12<0>), dim3(tp.grid), dim3(tp.block), 0, 0, (float2*)mom.Even_p(), (float2*)mom.Odd_p(), dir, eb3, (float4*)link.Even_p(), (float4*)link.Odd_p(), input_path, length, (float*)path_coeff, num_paths, kparam); //odd /* The reason we do not switch the even/odd function input paramemters and the texture binding * is that we use the oddbit to decided where to load, in the kernel function */ hipLaunchKernelGGL(( parity_compute_gauge_force_kernel_sp12<1>), dim3(tp.grid), dim3(tp.block), 0, 0, (float2*)mom.Even_p(), (float2*)mom.Odd_p(), dir, eb3, (float4*)link.Even_p(), (float4*)link.Odd_p(), input_path, length, (float*)path_coeff, num_paths, kparam); } } } void preTune() { mom.backup(); } void postTune() { mom.restore(); } void initTuneParam(TuneParam &param) const { Tunable::initTuneParam(param); param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1); } /** sets default values for when tuning is disabled */ void defaultTuneParam(TuneParam &param) const { Tunable::defaultTuneParam(param); param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1); } long long flops() const { return 0; } // FIXME: add flops counter TuneKey tuneKey() const { std::stringstream vol, aux; vol << link.X()[0] << "x"; vol << link.X()[1] << "x"; vol << link.X()[2] << "x"; vol << link.X()[3] << "x"; aux << "threads=" << link.Volume() << ",prec=" << link.Precision(); aux << "stride=" << link.Stride() << ",recon=" << link.Reconstruct(); aux << "dir=" << dir << "num_paths=" << num_paths; return TuneKey(vol.str(), typeid(*this).name(), aux.str()); } }; void gauge_force_cuda_dir(cudaGaugeField& cudaMom, const int dir, const double eb3, const cudaGaugeField& cudaSiteLink, const QudaGaugeParam* param, int** input_path, const int* length, const void* path_coeff, const int num_paths, const int max_length) { //input_path size_t bytes = num_paths*max_length*sizeof(int); int *input_path_d = (int *) device_malloc(bytes); hipMemset(input_path_d, 0, bytes); checkCudaError(); int* input_path_h = (int *) safe_malloc(bytes); memset(input_path_h, 0, bytes); for(int i=0; i < num_paths; i++) { for(int j=0; j < length[i]; j++) { input_path_h[i*max_length + j] = input_path[i][j]; } } hipMemcpy(input_path_d, input_path_h, bytes, hipMemcpyHostToDevice); //length int* length_d = (int *) device_malloc(num_paths*sizeof(int)); hipMemcpy(length_d, length, num_paths*sizeof(int), hipMemcpyHostToDevice); //path_coeff int gsize = param->cuda_prec; void* path_coeff_d = device_malloc(num_paths*gsize); hipMemcpy(path_coeff_d, path_coeff, num_paths*gsize, hipMemcpyHostToDevice); //compute the gauge forces int volume = param->X[0]*param->X[1]*param->X[2]*param->X[3]; kernel_param_t kparam; #ifdef MULTI_GPU for(int i=0; i<4; i++) { kparam.ghostDim[i] = commDimPartitioned(i); } #endif kparam.threads = volume/2; GaugeForceCuda gaugeForce(cudaMom, dir, eb3, cudaSiteLink, input_path_d, length_d, path_coeff_d, num_paths, kparam); gaugeForce.apply(0); checkCudaError(); host_free(input_path_h); device_free(input_path_d); device_free(length_d); device_free(path_coeff_d); } void gauge_force_cuda(cudaGaugeField& cudaMom, double eb3, cudaGaugeField& cudaSiteLink, QudaGaugeParam* param, int*** input_path, int* length, void* path_coeff, int num_paths, int max_length) { for(int dir=0; dir < 4; dir++){ gauge_force_cuda_dir(cudaMom, dir, eb3, cudaSiteLink, param, input_path[dir], length, path_coeff, num_paths, max_length); } } } // namespace quda
42aa44e10c12fbed1313e2ac74696d30f4c634b5.cu
#include <read_gauge.h> #include <gauge_field.h> #include "gauge_force_quda.h" #ifdef MULTI_GPU #include "face_quda.h" #endif namespace quda { #define GF_SITE_MATRIX_LOAD_TEX 1 //single precsison, 12-reconstruct #if (GF_SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX(siteLink0TexSingle_recon, dir, idx, var, gf.site_ga_stride) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX(siteLink1TexSingle_recon, dir, idx, var, gf.site_ga_stride) #else #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE(linkEven, dir, idx, var, gf.site_ga_stride) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE(linkOdd, dir, idx, var, gf.site_ga_stride) #endif #define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var, gf.mom_ga_stride) #define RECONSTRUCT_MATRIX(sign, var) RECONSTRUCT_LINK_12(sign,var) #define DECLARE_LINK_VARS(var) FloatN var##0, var##1, var##2, var##3, var##4 #define N_IN_FLOATN 4 #define GAUGE_FORCE_KERN_NAME parity_compute_gauge_force_kernel_sp12 #include "gauge_force_core.h" #undef LOAD_EVEN_MATRIX #undef LOAD_ODD_MATRIX #undef LOAD_ANTI_HERMITIAN #undef RECONSTRUCT_MATRIX #undef DECLARE_LINK_VARS #undef N_IN_FLOATN #undef GAUGE_FORCE_KERN_NAME //double precsison, 12-reconstruct #if (GF_SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE_TEX(siteLink0TexDouble, linkEven, dir, idx, var, gf.site_ga_stride) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE_TEX(siteLink1TexDouble, linkOdd, dir, idx, var, gf.site_ga_stride) #else #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE(linkEven, dir, idx, var, gf.site_ga_stride) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_DOUBLE(linkOdd, dir, idx, var, gf.site_ga_stride) #endif #define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var, gf.mom_ga_stride) #define RECONSTRUCT_MATRIX(sign, var) RECONSTRUCT_LINK_12(sign,var) #define DECLARE_LINK_VARS(var) FloatN var##0, var##1, var##2, var##3, var##4, var##5, var##6, var##7, var##8 #define N_IN_FLOATN 2 #define GAUGE_FORCE_KERN_NAME parity_compute_gauge_force_kernel_dp12 #include "gauge_force_core.h" #undef LOAD_EVEN_MATRIX #undef LOAD_ODD_MATRIX #undef LOAD_ANTI_HERMITIAN #undef RECONSTRUCT_MATRIX #undef DECLARE_LINK_VARS #undef N_IN_FLOATN #undef GAUGE_FORCE_KERN_NAME //single precision, 18-reconstruct #if (GF_SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_TEX(siteLink0TexSingle, dir, idx, var, gf.site_ga_stride) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_18_SINGLE_TEX(siteLink1TexSingle, dir, idx, var, gf.site_ga_stride) #else #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_18(linkEven, dir, idx, var, gf.site_ga_stride) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_18(linkOdd, dir, idx, var, gf.site_ga_stride) #endif #define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var,gf.mom_ga_stride) #define RECONSTRUCT_MATRIX(sign, var) #define DECLARE_LINK_VARS(var) FloatN var##0, var##1, var##2, var##3, var##4, var##5, var##6, var##7, var##8 #define N_IN_FLOATN 2 #define GAUGE_FORCE_KERN_NAME parity_compute_gauge_force_kernel_sp18 #include "gauge_force_core.h" #undef LOAD_EVEN_MATRIX #undef LOAD_ODD_MATRIX #undef LOAD_ANTI_HERMITIAN #undef RECONSTRUCT_MATRIX #undef DECLARE_LINK_VARS #undef N_IN_FLOATN #undef GAUGE_FORCE_KERN_NAME //double precision, 18-reconstruct #if (GF_SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_TEX(siteLink0TexDouble, linkEven, dir, idx, var, gf.site_ga_stride) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_18_DOUBLE_TEX(siteLink1TexDouble, linkOdd, dir, idx, var, gf.site_ga_stride) #else #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_18(linkEven, dir, idx, var, gf.site_ga_stride) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_18(linkOdd, dir, idx, var, gf.site_ga_stride) #endif #define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var, gf.mom_ga_stride) #define RECONSTRUCT_MATRIX(sign, var) #define DECLARE_LINK_VARS(var) FloatN var##0, var##1, var##2, var##3, var##4, var##5, var##6, var##7, var##8 #define N_IN_FLOATN 2 #define GAUGE_FORCE_KERN_NAME parity_compute_gauge_force_kernel_dp18 #include "gauge_force_core.h" #undef LOAD_EVEN_MATRIX #undef LOAD_ODD_MATRIX #undef LOAD_ANTI_HERMITIAN #undef RECONSTRUCT_MATRIX #undef DECLARE_LINK_VARS #undef N_IN_FLOATN #undef GAUGE_FORCE_KERN_NAME void gauge_force_init_cuda(QudaGaugeParam* param, int path_max_length) { static int gauge_force_init_cuda_flag = 0; if (gauge_force_init_cuda_flag){ return; } gauge_force_init_cuda_flag=1; int* X = param->X; int Vh = X[0]*X[1]*X[2]*X[3]/2; fat_force_const_t gf_h; gf_h.path_max_length = path_max_length; #ifdef MULTI_GPU int Vh_ex = (X[0]+4)*(X[1]+4)*(X[2]+4)*(X[3]+4)/2; gf_h.site_ga_stride = param->site_ga_pad + Vh_ex; #else gf_h.site_ga_stride = param->site_ga_pad + Vh; #endif gf_h.mom_ga_stride = param->mom_ga_pad + Vh; cudaMemcpyToSymbol(gf, &gf_h, sizeof(fat_force_const_t)); } class GaugeForceCuda : public Tunable { private: cudaGaugeField &mom; const int dir; const double &eb3; const cudaGaugeField &link; const int *input_path; const int *length; const void *path_coeff; const int num_paths; const kernel_param_t &kparam; int sharedBytesPerThread() const { return 0; } int sharedBytesPerBlock(const TuneParam &) const { return 0; } // don't tune the grid dimension bool advanceGridDim(TuneParam &param) const { return false; } bool advanceBlockDim(TuneParam &param) const { bool rtn = Tunable::advanceBlockDim(param); param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1); return rtn; } public: GaugeForceCuda(cudaGaugeField &mom, const int dir, const double &eb3, const cudaGaugeField &link, const int *input_path, const int *length, const void *path_coeff, const int num_paths, const kernel_param_t &kparam) : mom(mom), dir(dir), eb3(eb3), link(link), input_path(input_path), length(length), path_coeff(path_coeff), num_paths(num_paths), kparam(kparam) { if(link.Precision() == QUDA_DOUBLE_PRECISION){ cudaBindTexture(0, siteLink0TexDouble, link.Even_p(), link.Bytes()/2); cudaBindTexture(0, siteLink1TexDouble, link.Odd_p(), link.Bytes()/2); }else{ //QUDA_SINGLE_PRECISION if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){ cudaBindTexture(0, siteLink0TexSingle, link.Even_p(), link.Bytes()/2); cudaBindTexture(0, siteLink1TexSingle, link.Odd_p(), link.Bytes()/2); }else{//QUDA_RECONSTRUCT_12 cudaBindTexture(0, siteLink0TexSingle_recon, link.Even_p(), link.Bytes()/2); cudaBindTexture(0, siteLink1TexSingle_recon, link.Odd_p(), link.Bytes()/2); } } } virtual ~GaugeForceCuda() { if(link.Precision() == QUDA_DOUBLE_PRECISION){ cudaBindTexture(0, siteLink0TexDouble, link.Even_p(), link.Bytes()/2); cudaBindTexture(0, siteLink1TexDouble, link.Odd_p(), link.Bytes()/2); }else{ //QUDA_SINGLE_PRECISION if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){ cudaBindTexture(0, siteLink0TexSingle, link.Even_p(), link.Bytes()/2); cudaBindTexture(0, siteLink1TexSingle, link.Odd_p(), link.Bytes()/2); }else{//QUDA_RECONSTRUCT_12 cudaBindTexture(0, siteLink0TexSingle_recon, link.Even_p(), link.Bytes()/2); cudaBindTexture(0, siteLink1TexSingle_recon, link.Odd_p(), link.Bytes()/2); } } } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, dslashTuning, verbosity); if(link.Precision() == QUDA_DOUBLE_PRECISION){ if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){ parity_compute_gauge_force_kernel_dp18<0><<<tp.grid, tp.block>>>((double2*)mom.Even_p(), (double2*)mom.Odd_p(), dir, eb3, (double2*)link.Even_p(), (double2*)link.Odd_p(), input_path, length, (double*)path_coeff, num_paths, kparam); parity_compute_gauge_force_kernel_dp18<1><<<tp.grid, tp.block>>>((double2*)mom.Even_p(), (double2*)mom.Odd_p(), dir, eb3, (double2*)link.Even_p(), (double2*)link.Odd_p(), input_path, length, (double*)path_coeff, num_paths, kparam); }else{ //QUDA_RECONSTRUCT_12 parity_compute_gauge_force_kernel_dp12<0><<<tp.grid, tp.block>>>((double2*)mom.Even_p(), (double2*)mom.Odd_p(), dir, eb3, (double2*)link.Even_p(), (double2*)link.Odd_p(), input_path, length, (double*)path_coeff, num_paths, kparam); parity_compute_gauge_force_kernel_dp12<1><<<tp.grid, tp.block>>>((double2*)mom.Even_p(), (double2*)mom.Odd_p(), dir, eb3, (double2*)link.Even_p(), (double2*)link.Odd_p(), input_path, length, (double*)path_coeff, num_paths, kparam); } }else{ //QUDA_SINGLE_PRECISION if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){ parity_compute_gauge_force_kernel_sp18<0><<<tp.grid, tp.block>>>((float2*)mom.Even_p(), (float2*)mom.Odd_p(), dir, eb3, (float2*)link.Even_p(), (float2*)link.Odd_p(), input_path, length, (float*)path_coeff, num_paths, kparam); parity_compute_gauge_force_kernel_sp18<1><<<tp.grid, tp.block>>>((float2*)mom.Even_p(), (float2*)mom.Odd_p(), dir, eb3, (float2*)link.Even_p(), (float2*)link.Odd_p(), input_path, length, (float*)path_coeff, num_paths, kparam); }else{ //QUDA_RECONSTRUCT_12 parity_compute_gauge_force_kernel_sp12<0><<<tp.grid, tp.block>>>((float2*)mom.Even_p(), (float2*)mom.Odd_p(), dir, eb3, (float4*)link.Even_p(), (float4*)link.Odd_p(), input_path, length, (float*)path_coeff, num_paths, kparam); //odd /* The reason we do not switch the even/odd function input paramemters and the texture binding * is that we use the oddbit to decided where to load, in the kernel function */ parity_compute_gauge_force_kernel_sp12<1><<<tp.grid, tp.block>>>((float2*)mom.Even_p(), (float2*)mom.Odd_p(), dir, eb3, (float4*)link.Even_p(), (float4*)link.Odd_p(), input_path, length, (float*)path_coeff, num_paths, kparam); } } } void preTune() { mom.backup(); } void postTune() { mom.restore(); } void initTuneParam(TuneParam &param) const { Tunable::initTuneParam(param); param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1); } /** sets default values for when tuning is disabled */ void defaultTuneParam(TuneParam &param) const { Tunable::defaultTuneParam(param); param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1); } long long flops() const { return 0; } // FIXME: add flops counter TuneKey tuneKey() const { std::stringstream vol, aux; vol << link.X()[0] << "x"; vol << link.X()[1] << "x"; vol << link.X()[2] << "x"; vol << link.X()[3] << "x"; aux << "threads=" << link.Volume() << ",prec=" << link.Precision(); aux << "stride=" << link.Stride() << ",recon=" << link.Reconstruct(); aux << "dir=" << dir << "num_paths=" << num_paths; return TuneKey(vol.str(), typeid(*this).name(), aux.str()); } }; void gauge_force_cuda_dir(cudaGaugeField& cudaMom, const int dir, const double eb3, const cudaGaugeField& cudaSiteLink, const QudaGaugeParam* param, int** input_path, const int* length, const void* path_coeff, const int num_paths, const int max_length) { //input_path size_t bytes = num_paths*max_length*sizeof(int); int *input_path_d = (int *) device_malloc(bytes); cudaMemset(input_path_d, 0, bytes); checkCudaError(); int* input_path_h = (int *) safe_malloc(bytes); memset(input_path_h, 0, bytes); for(int i=0; i < num_paths; i++) { for(int j=0; j < length[i]; j++) { input_path_h[i*max_length + j] = input_path[i][j]; } } cudaMemcpy(input_path_d, input_path_h, bytes, cudaMemcpyHostToDevice); //length int* length_d = (int *) device_malloc(num_paths*sizeof(int)); cudaMemcpy(length_d, length, num_paths*sizeof(int), cudaMemcpyHostToDevice); //path_coeff int gsize = param->cuda_prec; void* path_coeff_d = device_malloc(num_paths*gsize); cudaMemcpy(path_coeff_d, path_coeff, num_paths*gsize, cudaMemcpyHostToDevice); //compute the gauge forces int volume = param->X[0]*param->X[1]*param->X[2]*param->X[3]; kernel_param_t kparam; #ifdef MULTI_GPU for(int i=0; i<4; i++) { kparam.ghostDim[i] = commDimPartitioned(i); } #endif kparam.threads = volume/2; GaugeForceCuda gaugeForce(cudaMom, dir, eb3, cudaSiteLink, input_path_d, length_d, path_coeff_d, num_paths, kparam); gaugeForce.apply(0); checkCudaError(); host_free(input_path_h); device_free(input_path_d); device_free(length_d); device_free(path_coeff_d); } void gauge_force_cuda(cudaGaugeField& cudaMom, double eb3, cudaGaugeField& cudaSiteLink, QudaGaugeParam* param, int*** input_path, int* length, void* path_coeff, int num_paths, int max_length) { for(int dir=0; dir < 4; dir++){ gauge_force_cuda_dir(cudaMom, dir, eb3, cudaSiteLink, param, input_path[dir], length, path_coeff, num_paths, max_length); } } } // namespace quda
f50bfe3934a7656d4b780b78a2b5a98b5c951950.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "row_filter.h" namespace filter { template void linearRow<int, float>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream); } #endif /* CUDA_DISABLER */
f50bfe3934a7656d4b780b78a2b5a98b5c951950.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "row_filter.h" namespace filter { template void linearRow<int, float>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream); } #endif /* CUDA_DISABLER */
d06f214fa58947c99d153ae94ea46fb1a5a2d8d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <helper_cuda.h> #include "wavepacketson1device.h" #include "cudaUtils.h" #include "matlabUtils.h" #include "matlabData.h" #include "evolutionUtils.h" #include "evolutionAux.cu" /*** * https://github.com/mohamso/icpads14 * https://raw.githubusercontent.com/mohamso/icpads14/master/4/omp/src/Async.c * DOI: 10.1109/PADSW.2014.7097919 ***/ __constant__ EvolutionUtils::RadialCoordinate r1_dev; __constant__ EvolutionUtils::RadialCoordinate r2_dev; __constant__ double energies_dev[_EnergiesMaxSize_]; WavepacketsOnSingleDevice:: WavepacketsOnSingleDevice(const int device_index_, const int omega_start_, const int n_omegas_) : _device_index(device_index_), omega_start(omega_start_), n_omegas(n_omegas_), potential_dev(0), device_work_dev(0), omega_wavepacket_from_left_device(0), omega_wavepacket_from_right_device(0), _has_created_cublas_handle(0), _has_cufft_plans(0), computation_stream(0), computation_event_for_left(0), computation_event_for_right(0), copy_to_left_event(0), copy_to_right_event(0), data_copy_stream(0), left(0), right(0) { insist(_device_index >= 0); setup_data_on_device(); } int WavepacketsOnSingleDevice::current_device_index() const { int dev_index = -1; checkCudaErrors(hipGetDevice(&dev_index)); return dev_index; } void WavepacketsOnSingleDevice::setup_device() const { if(current_device_index() != device_index()) checkCudaErrors(hipSetDevice(device_index())); } void WavepacketsOnSingleDevice::setup_data_on_device() { setup_device(); std::cout << " Setup data on device: " << device_index() << std::endl; setup_constant_memory_on_device(); setup_computation_stream_and_event(); setup_cublas_handle(); setup_cufft_plans(); setup_potential_on_device(); setup_omega_wavepackets(); } void WavepacketsOnSingleDevice::destroy_data_on_device() { setup_device(); std::cout << " Destroy data on device: " << device_index() << std::endl; for(int i = 0; i < omega_wavepackets.size(); i++) if(omega_wavepackets[i]) { delete omega_wavepackets[i]; omega_wavepackets[i] = 0; } omega_wavepackets.resize(0); _CUDA_FREE_(potential_dev); _CUDA_FREE_(device_work_dev); destroy_cublas_handle(); destroy_cufft_plans(); destroy_streams_and_events(); reaction_probabilities.resize(0); left = 0; right = 0; } void WavepacketsOnSingleDevice::setup_potential_on_device() { if(potential_dev) return; std::cout << " Allocate and copy potential on device: " << current_device_index() << std::endl; const double *potential = MatlabData::potential(); insist(potential); const int &n1 = MatlabData::r1()->n; const int &n2 = MatlabData::r2()->n; const int &n_theta = MatlabData::theta()->n; checkCudaErrors(hipMalloc(&potential_dev, n1*n2*n_theta*sizeof(double))); insist(potential_dev); checkCudaErrors(hipMemcpyAsync(potential_dev, potential, n1*n2*n_theta*sizeof(double), hipMemcpyHostToDevice)); } void WavepacketsOnSingleDevice::setup_cublas_handle() { if(_has_created_cublas_handle) return; std::cout << " Setup cuBLAS handle on device: " << current_device_index() << std::endl; insist(hipblasCreate(&cublas_handle) == HIPBLAS_STATUS_SUCCESS); _has_created_cublas_handle = 1; } void WavepacketsOnSingleDevice::destroy_cublas_handle() { if(!_has_created_cublas_handle) return; std::cout << " Destroy cuBLAS handle on device: " << current_device_index() << std::endl; insist(hipblasDestroy(cublas_handle) == HIPBLAS_STATUS_SUCCESS); _has_created_cublas_handle = 0; } void WavepacketsOnSingleDevice::setup_cufft_plans() { if(_has_cufft_plans) return; std::cout << " Setup cuFFT handles on device: " << current_device_index() << std::endl; const int &n1 = MatlabData::r1()->n; const int &n2 = MatlabData::r2()->n; const int &n_theta = MatlabData::theta()->n; /* wavepacket psi is from Matlab in column-major format, * while cuFFT is using row-major format, * so to switch dimensions, after D2Z FFT, the output data is { n2, n1/2+1 }, * it is still in column-major format */ const int dims [] = { n2, n1 }; insist(hipfftPlanMany(&cufft_plan_D2Z, 2, const_cast<int *>(dims), NULL, 1, n1*n2, NULL, 1, n1*n2, HIPFFT_D2Z, n_theta) == HIPFFT_SUCCESS); cudaUtils::cufft_work_size(cufft_plan_D2Z, "D2Z"); insist(hipfftPlanMany(&cufft_plan_Z2D, 2, const_cast<int *>(dims), NULL, 1, n1*n2, NULL, 1, n1*n2, HIPFFT_Z2D, n_theta) == HIPFFT_SUCCESS); cudaUtils::cufft_work_size(cufft_plan_Z2D, "Z2D"); _has_cufft_plans = 1; } void WavepacketsOnSingleDevice::destroy_cufft_plans() { if(!_has_cufft_plans) return; std::cout << " Destroy cuFFT handles on device: " << current_device_index() << std::endl; insist(hipfftDestroy(cufft_plan_D2Z) == HIPFFT_SUCCESS); insist(hipfftDestroy(cufft_plan_Z2D) == HIPFFT_SUCCESS); _has_cufft_plans = 0; } void WavepacketsOnSingleDevice::setup_omega_wavepackets() { insist(omega_wavepackets.size() == 0); omega_wavepackets.resize(n_omegas, 0); for(int i = 0; i < n_omegas; i++) { omega_wavepackets[i] = new OmegaWavepacket(i+omega_start, potential_dev, cublas_handle, cufft_plan_D2Z, cufft_plan_Z2D, computation_stream, device_work_dev); insist(omega_wavepackets[i]); } } void WavepacketsOnSingleDevice::setup_constant_memory_on_device() { std::cout << " Setup constant memory on device: " << current_device_index() << std::endl; EvolutionUtils::copy_radial_coordinate_to_device(r1_dev, MatlabData::r1()); EvolutionUtils::copy_radial_coordinate_to_device(r2_dev, MatlabData::r2()); copy_numerical_gradient_coefficients_to_device(); copy_reaction_probabity_energies_to_device(); } void WavepacketsOnSingleDevice::setup_device_work_dev_and_copy_streams_events() { setup_device(); if(device_work_dev) return; const int &n1 = MatlabData::r1()->n; const int &n2 = MatlabData::r2()->n; const int &n_theta = MatlabData::theta()->n; const int &l_max = MatlabData::wavepacket_parameters()->l_max; long size = 0; if(left) size += n1*n2*(l_max+1); if(right) size += n1*n2*(l_max+1); size = ::max(size, (n1/2+1)*2L*n2*n_theta); std::cout << " Setup device work on device: " << current_device_index() << " " << size << " " << size*sizeof(double)/1024.0/1024.0 << std::endl; checkCudaErrors(hipMalloc(&device_work_dev, size*sizeof(double))); insist(device_work_dev); long current = 0; if(left || right) _CUDA_STREAM_CREATE_(data_copy_stream); if(left && !omega_wavepacket_from_left_device) { omega_wavepacket_from_left_device = device_work_dev + current; current += n1*n2*(l_max+1); std::cout << " Setup wavepacket from left on device: " << current_device_index() << " " << omega_wavepacket_from_left_device << std::endl; _CUDA_EVENT_CREATE_(computation_event_for_left); _CUDA_EVENT_CREATE_(copy_to_left_event); } if(right && !omega_wavepacket_from_right_device) { omega_wavepacket_from_right_device = device_work_dev + current; current += n1*n2*(l_max+1); std::cout << " Setup wavepacket from right on device: " << current_device_index() << " " << omega_wavepacket_from_right_device << std::endl; _CUDA_EVENT_CREATE_(computation_event_for_right); _CUDA_EVENT_CREATE_(copy_to_right_event); } } void WavepacketsOnSingleDevice::setup_computation_stream_and_event() { if(computation_stream) return; std::cout << " Setup computation stream on device: " << current_device_index() << std::endl; _CUDA_STREAM_CREATE_(computation_stream); } void WavepacketsOnSingleDevice::destroy_streams_and_events() { if(cudaUtils::n_devices() == 1) return; std::cout << " Destroy streams and events on device: " << device_index() << std::endl; _CUDA_STREAM_DESTROY_(computation_stream); _CUDA_STREAM_DESTROY_(data_copy_stream); _CUDA_EVENT_DESTROY_(computation_event_for_left); _CUDA_EVENT_DESTROY_(computation_event_for_right); _CUDA_EVENT_DESTROY_(copy_to_left_event); _CUDA_EVENT_DESTROY_(copy_to_right_event); } void WavepacketsOnSingleDevice::setup_neighbours(const WavepacketsOnSingleDevice *left_, const WavepacketsOnSingleDevice *right_) { setup_device(); left = left_; right = right_; std::cout << " Neighbours on device: " << current_device_index() << ", pointers: " << this << " " << left << " " << right << std::endl; } void WavepacketsOnSingleDevice:: forward_legendre_transform_and_copy_data_to_neighbour_devices(const int part) { insist(part == _RealPart_ || part == _ImagPart_); setup_device(); const int &n1 = MatlabData::r1()->n; const int &n2 = MatlabData::r2()->n; const int n_Legs = MatlabData::wavepacket_parameters()->l_max + 1; insist(computation_stream); insist(hipblasSetStream(cublas_handle, *computation_stream) == HIPBLAS_STATUS_SUCCESS); for(int i = 0; i < n_omegas; i++) omega_wavepackets[i]->setup_weighted_psi_dev(part); OmegaWavepacket *wp_for_left = 0; OmegaWavepacket *wp_for_right = 0; int wp_start = 0; int wp_end = n_omegas; if(n_omegas == 1) { if(left || right) omega_wavepackets[0]->forward_legendre_transform(); if(left) { insist(computation_event_for_left); checkCudaErrors(hipEventRecord(*computation_event_for_left, *computation_stream)); wp_for_left = omega_wavepackets[0]; wp_start = 1; } if(right) { insist(computation_event_for_right); checkCudaErrors(hipEventRecord(*computation_event_for_right, *computation_stream)); wp_for_right = omega_wavepackets[0]; wp_end = n_omegas - 1; } } else { if(left) { insist(computation_event_for_left); wp_for_left = omega_wavepackets[0]; wp_for_left->forward_legendre_transform(); checkCudaErrors(hipEventRecord(*computation_event_for_left, *computation_stream)); wp_start = 1; } if(right) { insist(computation_event_for_right); wp_for_right = omega_wavepackets[n_omegas-1]; wp_for_right->forward_legendre_transform(); checkCudaErrors(hipEventRecord(*computation_event_for_right, *computation_stream)); wp_end = n_omegas - 1; } } int sent_to_left = left ? 0 : 1; int sent_to_right = right ? 0 : 1; while(true) { if(left && !sent_to_left) { if(left->ready_to_receive_data()) { checkCudaErrors(hipStreamWaitEvent(*data_copy_stream, *computation_event_for_left, 0)); checkCudaErrors(hipMemcpyPeerAsync(left->omega_wavepacket_from_right_device, left->device_index(), wp_for_left->legendre_psi_dev_(), device_index(), n1*n2*n_Legs*sizeof(double), *data_copy_stream)); checkCudaErrors(hipEventRecord(*copy_to_left_event, *data_copy_stream)); sent_to_left = 1; } } if(right && !sent_to_right) { if(right->ready_to_receive_data()) { checkCudaErrors(hipStreamWaitEvent(*data_copy_stream, *computation_event_for_right, 0)); checkCudaErrors(hipMemcpyPeerAsync(right->omega_wavepacket_from_left_device, right->device_index(), wp_for_right->legendre_psi_dev_(), device_index(), n1*n2*n_Legs*sizeof(double), *data_copy_stream)); checkCudaErrors(hipEventRecord(*copy_to_right_event, *data_copy_stream)); sent_to_right = 1; } } if(sent_to_left && sent_to_right) break; } for(int i = wp_start; i < wp_end; i++) omega_wavepackets[i]->forward_legendre_transform(); } void WavepacketsOnSingleDevice::calculate_T_asym_add_to_T_angle_legendre_psi_dev() { setup_device(); const OmegaWavepacket *wp = 0; for(int i = 0; i < n_omegas; i++) { if(i > 0) { wp = omega_wavepackets[i-1]; omega_wavepackets[i]->calculate_T_asym_add_to_T_angle_legendre_psi_dev(wp->legendre_psi_dev_(), wp->omega_()); } if(i < n_omegas-1) { wp = omega_wavepackets[i+1]; omega_wavepackets[i]->calculate_T_asym_add_to_T_angle_legendre_psi_dev(wp->legendre_psi_dev_(), wp->omega_()); } } #pragma omp barrier if(right) { insist(right->copy_to_left_event); checkCudaErrors(hipStreamWaitEvent(*computation_stream, *right->copy_to_left_event, 0)); wp = omega_wavepackets[n_omegas-1]; wp->calculate_T_asym_add_to_T_angle_legendre_psi_dev(omega_wavepacket_from_right_device, wp->omega_()+1); } if(left) { insist(left->copy_to_right_event); checkCudaErrors(hipStreamWaitEvent(*computation_stream, *left->copy_to_right_event, 0)); wp = omega_wavepackets[0]; wp->calculate_T_asym_add_to_T_angle_legendre_psi_dev(omega_wavepacket_from_left_device, wp->omega_()-1); } } void WavepacketsOnSingleDevice::calculate_H_weighted_psi_dev(const int part) { forward_legendre_transform_and_copy_data_to_neighbour_devices(part); for(int i = 0; i < n_omegas; i++) omega_wavepackets[i]->calculate_T_bend_T_sym_add_to_T_angle_legendre_psi_dev(); calculate_T_asym_add_to_T_angle_legendre_psi_dev(); checkCudaErrors(hipDeviceSynchronize()); insist(hipblasSetStream(cublas_handle, NULL) == HIPBLAS_STATUS_SUCCESS); for(int i = 0; i < n_omegas; i++) omega_wavepackets[i]->calculate_H_weighted_psi_dev(); } void WavepacketsOnSingleDevice::propagate_with_symplectic_integrator(const int i_step) { setup_device(); calculate_H_weighted_psi_dev(_RealPart_); for(int i = 0; i < n_omegas; i++) omega_wavepackets[i]->propagate_with_symplectic_integrator(i_step); checkCudaErrors(hipDeviceSynchronize()); #pragma omp barrier calculate_H_weighted_psi_dev(_ImagPart_); for(int i = 0; i < n_omegas; i++) omega_wavepackets[i]->propagate_with_symplectic_integrator(i_step); } void WavepacketsOnSingleDevice::print() { setup_device(); _module = 0.0; _total_energy = 0.0; for(int i = 0; i < n_omegas; i++) { _module += omega_wavepackets[i]->wavepacket_module(); _total_energy += omega_wavepackets[i]->energy(); std::cout << " " << omega_wavepackets[i]->omega_() << " " << omega_wavepackets[i]->wavepacket_module() << " " << omega_wavepackets[i]->energy() << std::endl; } } void WavepacketsOnSingleDevice::copy_weighted_psi_from_device_to_host() { setup_device(); for(int i = 0; i < n_omegas; i++) omega_wavepackets[i]->copy_weighted_psi_from_device_to_host(); } void WavepacketsOnSingleDevice::dump_wavepackets() const { setup_device(); for(int i = 0; i < n_omegas; i++) omega_wavepackets[i]->dump_wavepacket(); } int WavepacketsOnSingleDevice::copy_to_left_event_query() const { if(copy_to_left_event) return hipEventQuery(*copy_to_left_event) == hipSuccess ? 1 : 0; else return 1; } int WavepacketsOnSingleDevice::copy_to_right_event_query() const { if(copy_to_right_event) return hipEventQuery(*copy_to_right_event) == hipSuccess ? 1 : 0; else return 1; } int WavepacketsOnSingleDevice::ready_to_receive_data() const { int left_ok = 1; int right_ok = 1; if(left) left_ok = left->copy_to_right_event_query(); if(right) right_ok = right->copy_to_left_event_query(); return left_ok && right_ok ? 1 : 0; } void WavepacketsOnSingleDevice::copy_numerical_gradient_coefficients_to_device() const { if(!MatlabData::options()->calculate_reaction_probabilities) return; std::cout << " Copy numerical gradient coefficients on device: " << current_device_index() << std::endl; const int &n_points = MatlabData::crp_parameters()->n_gradient_points; Num1ststGradient::copy_gradient_coefficients_to_device(n_points); } void WavepacketsOnSingleDevice::test() { setup_device(); const int &n_points = MatlabData::crp_parameters()->n_gradient_points; std::cout << " Print " << n_points << " gradient coefficients on device: " << current_device_index() << std::endl; hipLaunchKernelGGL(( _print_gradient_coeffients_), dim3(1),dim3(1), 0, 0, n_points/2); const int &n_energies = MatlabData::crp_parameters()->n_energies; hipLaunchKernelGGL(( _print_energies_), dim3(1),dim3(1), 0, 0, n_energies); checkCudaErrors(hipDeviceSynchronize()); } void WavepacketsOnSingleDevice::copy_reaction_probabity_energies_to_device() const { if(!MatlabData::options()->calculate_reaction_probabilities) return; std::cout << " Copy reaction probability enegies to device: " << current_device_index() << std::endl; const int &n_energies = MatlabData::crp_parameters()->n_energies; const RVec &energies = MatlabData::crp_parameters()->energies; size_t size = 0; checkCudaErrors(hipGetSymbolSize(&size, energies_dev)); insist(size > n_energies*sizeof(double)); checkCudaErrors(hipMemcpyToSymbolAsync(energies_dev, energies, n_energies*sizeof(double), 0, hipMemcpyHostToDevice)); } void WavepacketsOnSingleDevice::calculate_reaction_probabilities(const int calculate) { if(!MatlabData::options()->calculate_reaction_probabilities) return; setup_device(); for(int i = 0; i < n_omegas; i++) omega_wavepackets[i]->calculate_reaction_probabilities(calculate); if(calculate) { const int &n_energies = MatlabData::crp_parameters()->n_energies; reaction_probabilities.resize(n_energies, 0); reaction_probabilities.zeros(); for(int i = 0; i < n_omegas; i++) { const RVec crp(n_energies, const_cast<double *>(omega_wavepackets[i]->reaction_probabilities())); reaction_probabilities += crp; } } }
d06f214fa58947c99d153ae94ea46fb1a5a2d8d2.cu
#include <iostream> #include <helper_cuda.h> #include "wavepacketson1device.h" #include "cudaUtils.h" #include "matlabUtils.h" #include "matlabData.h" #include "evolutionUtils.h" #include "evolutionAux.cu" /*** * https://github.com/mohamso/icpads14 * https://raw.githubusercontent.com/mohamso/icpads14/master/4/omp/src/Async.c * DOI: 10.1109/PADSW.2014.7097919 ***/ __constant__ EvolutionUtils::RadialCoordinate r1_dev; __constant__ EvolutionUtils::RadialCoordinate r2_dev; __constant__ double energies_dev[_EnergiesMaxSize_]; WavepacketsOnSingleDevice:: WavepacketsOnSingleDevice(const int device_index_, const int omega_start_, const int n_omegas_) : _device_index(device_index_), omega_start(omega_start_), n_omegas(n_omegas_), potential_dev(0), device_work_dev(0), omega_wavepacket_from_left_device(0), omega_wavepacket_from_right_device(0), _has_created_cublas_handle(0), _has_cufft_plans(0), computation_stream(0), computation_event_for_left(0), computation_event_for_right(0), copy_to_left_event(0), copy_to_right_event(0), data_copy_stream(0), left(0), right(0) { insist(_device_index >= 0); setup_data_on_device(); } int WavepacketsOnSingleDevice::current_device_index() const { int dev_index = -1; checkCudaErrors(cudaGetDevice(&dev_index)); return dev_index; } void WavepacketsOnSingleDevice::setup_device() const { if(current_device_index() != device_index()) checkCudaErrors(cudaSetDevice(device_index())); } void WavepacketsOnSingleDevice::setup_data_on_device() { setup_device(); std::cout << " Setup data on device: " << device_index() << std::endl; setup_constant_memory_on_device(); setup_computation_stream_and_event(); setup_cublas_handle(); setup_cufft_plans(); setup_potential_on_device(); setup_omega_wavepackets(); } void WavepacketsOnSingleDevice::destroy_data_on_device() { setup_device(); std::cout << " Destroy data on device: " << device_index() << std::endl; for(int i = 0; i < omega_wavepackets.size(); i++) if(omega_wavepackets[i]) { delete omega_wavepackets[i]; omega_wavepackets[i] = 0; } omega_wavepackets.resize(0); _CUDA_FREE_(potential_dev); _CUDA_FREE_(device_work_dev); destroy_cublas_handle(); destroy_cufft_plans(); destroy_streams_and_events(); reaction_probabilities.resize(0); left = 0; right = 0; } void WavepacketsOnSingleDevice::setup_potential_on_device() { if(potential_dev) return; std::cout << " Allocate and copy potential on device: " << current_device_index() << std::endl; const double *potential = MatlabData::potential(); insist(potential); const int &n1 = MatlabData::r1()->n; const int &n2 = MatlabData::r2()->n; const int &n_theta = MatlabData::theta()->n; checkCudaErrors(cudaMalloc(&potential_dev, n1*n2*n_theta*sizeof(double))); insist(potential_dev); checkCudaErrors(cudaMemcpyAsync(potential_dev, potential, n1*n2*n_theta*sizeof(double), cudaMemcpyHostToDevice)); } void WavepacketsOnSingleDevice::setup_cublas_handle() { if(_has_created_cublas_handle) return; std::cout << " Setup cuBLAS handle on device: " << current_device_index() << std::endl; insist(cublasCreate(&cublas_handle) == CUBLAS_STATUS_SUCCESS); _has_created_cublas_handle = 1; } void WavepacketsOnSingleDevice::destroy_cublas_handle() { if(!_has_created_cublas_handle) return; std::cout << " Destroy cuBLAS handle on device: " << current_device_index() << std::endl; insist(cublasDestroy(cublas_handle) == CUBLAS_STATUS_SUCCESS); _has_created_cublas_handle = 0; } void WavepacketsOnSingleDevice::setup_cufft_plans() { if(_has_cufft_plans) return; std::cout << " Setup cuFFT handles on device: " << current_device_index() << std::endl; const int &n1 = MatlabData::r1()->n; const int &n2 = MatlabData::r2()->n; const int &n_theta = MatlabData::theta()->n; /* wavepacket psi is from Matlab in column-major format, * while cuFFT is using row-major format, * so to switch dimensions, after D2Z FFT, the output data is { n2, n1/2+1 }, * it is still in column-major format */ const int dims [] = { n2, n1 }; insist(cufftPlanMany(&cufft_plan_D2Z, 2, const_cast<int *>(dims), NULL, 1, n1*n2, NULL, 1, n1*n2, CUFFT_D2Z, n_theta) == CUFFT_SUCCESS); cudaUtils::cufft_work_size(cufft_plan_D2Z, "D2Z"); insist(cufftPlanMany(&cufft_plan_Z2D, 2, const_cast<int *>(dims), NULL, 1, n1*n2, NULL, 1, n1*n2, CUFFT_Z2D, n_theta) == CUFFT_SUCCESS); cudaUtils::cufft_work_size(cufft_plan_Z2D, "Z2D"); _has_cufft_plans = 1; } void WavepacketsOnSingleDevice::destroy_cufft_plans() { if(!_has_cufft_plans) return; std::cout << " Destroy cuFFT handles on device: " << current_device_index() << std::endl; insist(cufftDestroy(cufft_plan_D2Z) == CUFFT_SUCCESS); insist(cufftDestroy(cufft_plan_Z2D) == CUFFT_SUCCESS); _has_cufft_plans = 0; } void WavepacketsOnSingleDevice::setup_omega_wavepackets() { insist(omega_wavepackets.size() == 0); omega_wavepackets.resize(n_omegas, 0); for(int i = 0; i < n_omegas; i++) { omega_wavepackets[i] = new OmegaWavepacket(i+omega_start, potential_dev, cublas_handle, cufft_plan_D2Z, cufft_plan_Z2D, computation_stream, device_work_dev); insist(omega_wavepackets[i]); } } void WavepacketsOnSingleDevice::setup_constant_memory_on_device() { std::cout << " Setup constant memory on device: " << current_device_index() << std::endl; EvolutionUtils::copy_radial_coordinate_to_device(r1_dev, MatlabData::r1()); EvolutionUtils::copy_radial_coordinate_to_device(r2_dev, MatlabData::r2()); copy_numerical_gradient_coefficients_to_device(); copy_reaction_probabity_energies_to_device(); } void WavepacketsOnSingleDevice::setup_device_work_dev_and_copy_streams_events() { setup_device(); if(device_work_dev) return; const int &n1 = MatlabData::r1()->n; const int &n2 = MatlabData::r2()->n; const int &n_theta = MatlabData::theta()->n; const int &l_max = MatlabData::wavepacket_parameters()->l_max; long size = 0; if(left) size += n1*n2*(l_max+1); if(right) size += n1*n2*(l_max+1); size = std::max(size, (n1/2+1)*2L*n2*n_theta); std::cout << " Setup device work on device: " << current_device_index() << " " << size << " " << size*sizeof(double)/1024.0/1024.0 << std::endl; checkCudaErrors(cudaMalloc(&device_work_dev, size*sizeof(double))); insist(device_work_dev); long current = 0; if(left || right) _CUDA_STREAM_CREATE_(data_copy_stream); if(left && !omega_wavepacket_from_left_device) { omega_wavepacket_from_left_device = device_work_dev + current; current += n1*n2*(l_max+1); std::cout << " Setup wavepacket from left on device: " << current_device_index() << " " << omega_wavepacket_from_left_device << std::endl; _CUDA_EVENT_CREATE_(computation_event_for_left); _CUDA_EVENT_CREATE_(copy_to_left_event); } if(right && !omega_wavepacket_from_right_device) { omega_wavepacket_from_right_device = device_work_dev + current; current += n1*n2*(l_max+1); std::cout << " Setup wavepacket from right on device: " << current_device_index() << " " << omega_wavepacket_from_right_device << std::endl; _CUDA_EVENT_CREATE_(computation_event_for_right); _CUDA_EVENT_CREATE_(copy_to_right_event); } } void WavepacketsOnSingleDevice::setup_computation_stream_and_event() { if(computation_stream) return; std::cout << " Setup computation stream on device: " << current_device_index() << std::endl; _CUDA_STREAM_CREATE_(computation_stream); } void WavepacketsOnSingleDevice::destroy_streams_and_events() { if(cudaUtils::n_devices() == 1) return; std::cout << " Destroy streams and events on device: " << device_index() << std::endl; _CUDA_STREAM_DESTROY_(computation_stream); _CUDA_STREAM_DESTROY_(data_copy_stream); _CUDA_EVENT_DESTROY_(computation_event_for_left); _CUDA_EVENT_DESTROY_(computation_event_for_right); _CUDA_EVENT_DESTROY_(copy_to_left_event); _CUDA_EVENT_DESTROY_(copy_to_right_event); } void WavepacketsOnSingleDevice::setup_neighbours(const WavepacketsOnSingleDevice *left_, const WavepacketsOnSingleDevice *right_) { setup_device(); left = left_; right = right_; std::cout << " Neighbours on device: " << current_device_index() << ", pointers: " << this << " " << left << " " << right << std::endl; } void WavepacketsOnSingleDevice:: forward_legendre_transform_and_copy_data_to_neighbour_devices(const int part) { insist(part == _RealPart_ || part == _ImagPart_); setup_device(); const int &n1 = MatlabData::r1()->n; const int &n2 = MatlabData::r2()->n; const int n_Legs = MatlabData::wavepacket_parameters()->l_max + 1; insist(computation_stream); insist(cublasSetStream(cublas_handle, *computation_stream) == CUBLAS_STATUS_SUCCESS); for(int i = 0; i < n_omegas; i++) omega_wavepackets[i]->setup_weighted_psi_dev(part); OmegaWavepacket *wp_for_left = 0; OmegaWavepacket *wp_for_right = 0; int wp_start = 0; int wp_end = n_omegas; if(n_omegas == 1) { if(left || right) omega_wavepackets[0]->forward_legendre_transform(); if(left) { insist(computation_event_for_left); checkCudaErrors(cudaEventRecord(*computation_event_for_left, *computation_stream)); wp_for_left = omega_wavepackets[0]; wp_start = 1; } if(right) { insist(computation_event_for_right); checkCudaErrors(cudaEventRecord(*computation_event_for_right, *computation_stream)); wp_for_right = omega_wavepackets[0]; wp_end = n_omegas - 1; } } else { if(left) { insist(computation_event_for_left); wp_for_left = omega_wavepackets[0]; wp_for_left->forward_legendre_transform(); checkCudaErrors(cudaEventRecord(*computation_event_for_left, *computation_stream)); wp_start = 1; } if(right) { insist(computation_event_for_right); wp_for_right = omega_wavepackets[n_omegas-1]; wp_for_right->forward_legendre_transform(); checkCudaErrors(cudaEventRecord(*computation_event_for_right, *computation_stream)); wp_end = n_omegas - 1; } } int sent_to_left = left ? 0 : 1; int sent_to_right = right ? 0 : 1; while(true) { if(left && !sent_to_left) { if(left->ready_to_receive_data()) { checkCudaErrors(cudaStreamWaitEvent(*data_copy_stream, *computation_event_for_left, 0)); checkCudaErrors(cudaMemcpyPeerAsync(left->omega_wavepacket_from_right_device, left->device_index(), wp_for_left->legendre_psi_dev_(), device_index(), n1*n2*n_Legs*sizeof(double), *data_copy_stream)); checkCudaErrors(cudaEventRecord(*copy_to_left_event, *data_copy_stream)); sent_to_left = 1; } } if(right && !sent_to_right) { if(right->ready_to_receive_data()) { checkCudaErrors(cudaStreamWaitEvent(*data_copy_stream, *computation_event_for_right, 0)); checkCudaErrors(cudaMemcpyPeerAsync(right->omega_wavepacket_from_left_device, right->device_index(), wp_for_right->legendre_psi_dev_(), device_index(), n1*n2*n_Legs*sizeof(double), *data_copy_stream)); checkCudaErrors(cudaEventRecord(*copy_to_right_event, *data_copy_stream)); sent_to_right = 1; } } if(sent_to_left && sent_to_right) break; } for(int i = wp_start; i < wp_end; i++) omega_wavepackets[i]->forward_legendre_transform(); } void WavepacketsOnSingleDevice::calculate_T_asym_add_to_T_angle_legendre_psi_dev() { setup_device(); const OmegaWavepacket *wp = 0; for(int i = 0; i < n_omegas; i++) { if(i > 0) { wp = omega_wavepackets[i-1]; omega_wavepackets[i]->calculate_T_asym_add_to_T_angle_legendre_psi_dev(wp->legendre_psi_dev_(), wp->omega_()); } if(i < n_omegas-1) { wp = omega_wavepackets[i+1]; omega_wavepackets[i]->calculate_T_asym_add_to_T_angle_legendre_psi_dev(wp->legendre_psi_dev_(), wp->omega_()); } } #pragma omp barrier if(right) { insist(right->copy_to_left_event); checkCudaErrors(cudaStreamWaitEvent(*computation_stream, *right->copy_to_left_event, 0)); wp = omega_wavepackets[n_omegas-1]; wp->calculate_T_asym_add_to_T_angle_legendre_psi_dev(omega_wavepacket_from_right_device, wp->omega_()+1); } if(left) { insist(left->copy_to_right_event); checkCudaErrors(cudaStreamWaitEvent(*computation_stream, *left->copy_to_right_event, 0)); wp = omega_wavepackets[0]; wp->calculate_T_asym_add_to_T_angle_legendre_psi_dev(omega_wavepacket_from_left_device, wp->omega_()-1); } } void WavepacketsOnSingleDevice::calculate_H_weighted_psi_dev(const int part) { forward_legendre_transform_and_copy_data_to_neighbour_devices(part); for(int i = 0; i < n_omegas; i++) omega_wavepackets[i]->calculate_T_bend_T_sym_add_to_T_angle_legendre_psi_dev(); calculate_T_asym_add_to_T_angle_legendre_psi_dev(); checkCudaErrors(cudaDeviceSynchronize()); insist(cublasSetStream(cublas_handle, NULL) == CUBLAS_STATUS_SUCCESS); for(int i = 0; i < n_omegas; i++) omega_wavepackets[i]->calculate_H_weighted_psi_dev(); } void WavepacketsOnSingleDevice::propagate_with_symplectic_integrator(const int i_step) { setup_device(); calculate_H_weighted_psi_dev(_RealPart_); for(int i = 0; i < n_omegas; i++) omega_wavepackets[i]->propagate_with_symplectic_integrator(i_step); checkCudaErrors(cudaDeviceSynchronize()); #pragma omp barrier calculate_H_weighted_psi_dev(_ImagPart_); for(int i = 0; i < n_omegas; i++) omega_wavepackets[i]->propagate_with_symplectic_integrator(i_step); } void WavepacketsOnSingleDevice::print() { setup_device(); _module = 0.0; _total_energy = 0.0; for(int i = 0; i < n_omegas; i++) { _module += omega_wavepackets[i]->wavepacket_module(); _total_energy += omega_wavepackets[i]->energy(); std::cout << " " << omega_wavepackets[i]->omega_() << " " << omega_wavepackets[i]->wavepacket_module() << " " << omega_wavepackets[i]->energy() << std::endl; } } void WavepacketsOnSingleDevice::copy_weighted_psi_from_device_to_host() { setup_device(); for(int i = 0; i < n_omegas; i++) omega_wavepackets[i]->copy_weighted_psi_from_device_to_host(); } void WavepacketsOnSingleDevice::dump_wavepackets() const { setup_device(); for(int i = 0; i < n_omegas; i++) omega_wavepackets[i]->dump_wavepacket(); } int WavepacketsOnSingleDevice::copy_to_left_event_query() const { if(copy_to_left_event) return cudaEventQuery(*copy_to_left_event) == cudaSuccess ? 1 : 0; else return 1; } int WavepacketsOnSingleDevice::copy_to_right_event_query() const { if(copy_to_right_event) return cudaEventQuery(*copy_to_right_event) == cudaSuccess ? 1 : 0; else return 1; } int WavepacketsOnSingleDevice::ready_to_receive_data() const { int left_ok = 1; int right_ok = 1; if(left) left_ok = left->copy_to_right_event_query(); if(right) right_ok = right->copy_to_left_event_query(); return left_ok && right_ok ? 1 : 0; } void WavepacketsOnSingleDevice::copy_numerical_gradient_coefficients_to_device() const { if(!MatlabData::options()->calculate_reaction_probabilities) return; std::cout << " Copy numerical gradient coefficients on device: " << current_device_index() << std::endl; const int &n_points = MatlabData::crp_parameters()->n_gradient_points; Num1ststGradient::copy_gradient_coefficients_to_device(n_points); } void WavepacketsOnSingleDevice::test() { setup_device(); const int &n_points = MatlabData::crp_parameters()->n_gradient_points; std::cout << " Print " << n_points << " gradient coefficients on device: " << current_device_index() << std::endl; _print_gradient_coeffients_<<<1,1>>>(n_points/2); const int &n_energies = MatlabData::crp_parameters()->n_energies; _print_energies_<<<1,1>>>(n_energies); checkCudaErrors(cudaDeviceSynchronize()); } void WavepacketsOnSingleDevice::copy_reaction_probabity_energies_to_device() const { if(!MatlabData::options()->calculate_reaction_probabilities) return; std::cout << " Copy reaction probability enegies to device: " << current_device_index() << std::endl; const int &n_energies = MatlabData::crp_parameters()->n_energies; const RVec &energies = MatlabData::crp_parameters()->energies; size_t size = 0; checkCudaErrors(cudaGetSymbolSize(&size, energies_dev)); insist(size > n_energies*sizeof(double)); checkCudaErrors(cudaMemcpyToSymbolAsync(energies_dev, energies, n_energies*sizeof(double), 0, cudaMemcpyHostToDevice)); } void WavepacketsOnSingleDevice::calculate_reaction_probabilities(const int calculate) { if(!MatlabData::options()->calculate_reaction_probabilities) return; setup_device(); for(int i = 0; i < n_omegas; i++) omega_wavepackets[i]->calculate_reaction_probabilities(calculate); if(calculate) { const int &n_energies = MatlabData::crp_parameters()->n_energies; reaction_probabilities.resize(n_energies, 0); reaction_probabilities.zeros(); for(int i = 0; i < n_omegas; i++) { const RVec crp(n_energies, const_cast<double *>(omega_wavepackets[i]->reaction_probabilities())); reaction_probabilities += crp; } } }
7af2022b5bc4633d6cc79854cc13717477673f13.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /********************************************************************** * DESCRIPTION: * Serial Concurrent Wave Equation - C Version * This program implements the concurrent wave equation *********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define MAXPOINTS 1000000 #define MAXSTEPS 1000000 #define MINPOINTS 20 #define PI 3.14159265 void check_param(void); void init_line(void); void update (void); void printfinal (void); int nsteps, /* number of time steps */ tpoints, /* total points along string */ rcode; /* generic return code */ float *hostVal; float *devOldVal, *devNewVal; /********************************************************************** * Checks input values from parameters *********************************************************************/ void check_param(void) { char tchar[20]; /* check number of points, number of iterations */ while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) { printf("Enter number of points along vibrating string [%d-%d]: " ,MINPOINTS, MAXPOINTS); scanf("%s", tchar); tpoints = atoi(tchar); if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS); } while ((nsteps < 1) || (nsteps > MAXSTEPS)) { printf("Enter number of time steps [1-%d]: ", MAXSTEPS); scanf("%s", tchar); nsteps = atoi(tchar); if ((nsteps < 1) || (nsteps > MAXSTEPS)) printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS); } printf("Using points = %d, steps = %d\n", tpoints, nsteps); } /********************************************************************** * Initialize points on line *********************************************************************/ __global__ void init_line(int __tpoints, float* __oldval, float* __newval) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; /* Calculate initial values based on sine curve */ for (int i = index; i < __tpoints; i+=stride) { float x = (float)i / (__tpoints-1); __oldval[i] = __newval[i] = __sinf(2.0 * PI * x); } } /********************************************************************** * Calculate new values using wave equation *********************************************************************/ __device__ float do_math(float __newval, float __oldval) { float dtime, c, dx, tau, sqtau; dtime = 0.3; c = 1.0; dx = 1.0; tau = (c * dtime / dx); sqtau = tau * tau; return (2.0 * __newval) - __oldval + (sqtau * (-2.0)*__newval); } /********************************************************************** * Update all values along line a specified number of times *********************************************************************/ __global__ void update(int __tpoints, int __nsteps, float* __oldval, float* __newval) { float __localval; int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; /* Update values for each time step */ for (int i = 0; i< __nsteps; i++) { /* Update points along line for this time step */ for (int j = index; j < __tpoints; j+=stride) { /* global endpoints */ if ((j == 0) || (j == (__tpoints-1))) __newval[j] = 0.0; else __localval = do_math(__newval[j], __oldval[j]); __oldval[j] = __newval[j]; __newval[j] = __localval; } } } /********************************************************************** * Print final results *********************************************************************/ void printfinal() { int i; for (i = 0; i < tpoints; i++) { printf("%6.4f ", hostVal[i]); if ((i+1)%10 == 0) printf("\n"); } } /********************************************************************** * Main program *********************************************************************/ int main(int argc, char *argv[]) { sscanf(argv[1],"%d",&tpoints); sscanf(argv[2],"%d",&nsteps); int blockSize = 256; int numBlocks = (tpoints + blockSize + 1) / blockSize; hipMalloc(&devOldVal, (MAXPOINTS+2) * sizeof(float)); hipMalloc(&devNewVal, (MAXPOINTS+2) * sizeof(float)); check_param(); printf("Initializing points on the line...\n"); hipLaunchKernelGGL(( init_line), dim3(numBlocks), dim3(blockSize), 0, 0, tpoints, devOldVal, devNewVal); printf("Updating all points for all time steps...\n"); hipLaunchKernelGGL(( update), dim3(numBlocks), dim3(blockSize), 0, 0, tpoints, nsteps, devOldVal, devNewVal); hostVal = (float*) malloc(sizeof(float) * tpoints); hipDeviceSynchronize(); hipMemcpy(hostVal, devNewVal, sizeof(float) * tpoints, hipMemcpyDeviceToHost); printf("Printing final results...\n"); printfinal(); printf("\nDone.\n\n"); return 0; }
7af2022b5bc4633d6cc79854cc13717477673f13.cu
/********************************************************************** * DESCRIPTION: * Serial Concurrent Wave Equation - C Version * This program implements the concurrent wave equation *********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define MAXPOINTS 1000000 #define MAXSTEPS 1000000 #define MINPOINTS 20 #define PI 3.14159265 void check_param(void); void init_line(void); void update (void); void printfinal (void); int nsteps, /* number of time steps */ tpoints, /* total points along string */ rcode; /* generic return code */ float *hostVal; float *devOldVal, *devNewVal; /********************************************************************** * Checks input values from parameters *********************************************************************/ void check_param(void) { char tchar[20]; /* check number of points, number of iterations */ while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) { printf("Enter number of points along vibrating string [%d-%d]: " ,MINPOINTS, MAXPOINTS); scanf("%s", tchar); tpoints = atoi(tchar); if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS); } while ((nsteps < 1) || (nsteps > MAXSTEPS)) { printf("Enter number of time steps [1-%d]: ", MAXSTEPS); scanf("%s", tchar); nsteps = atoi(tchar); if ((nsteps < 1) || (nsteps > MAXSTEPS)) printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS); } printf("Using points = %d, steps = %d\n", tpoints, nsteps); } /********************************************************************** * Initialize points on line *********************************************************************/ __global__ void init_line(int __tpoints, float* __oldval, float* __newval) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; /* Calculate initial values based on sine curve */ for (int i = index; i < __tpoints; i+=stride) { float x = (float)i / (__tpoints-1); __oldval[i] = __newval[i] = __sinf(2.0 * PI * x); } } /********************************************************************** * Calculate new values using wave equation *********************************************************************/ __device__ float do_math(float __newval, float __oldval) { float dtime, c, dx, tau, sqtau; dtime = 0.3; c = 1.0; dx = 1.0; tau = (c * dtime / dx); sqtau = tau * tau; return (2.0 * __newval) - __oldval + (sqtau * (-2.0)*__newval); } /********************************************************************** * Update all values along line a specified number of times *********************************************************************/ __global__ void update(int __tpoints, int __nsteps, float* __oldval, float* __newval) { float __localval; int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; /* Update values for each time step */ for (int i = 0; i< __nsteps; i++) { /* Update points along line for this time step */ for (int j = index; j < __tpoints; j+=stride) { /* global endpoints */ if ((j == 0) || (j == (__tpoints-1))) __newval[j] = 0.0; else __localval = do_math(__newval[j], __oldval[j]); __oldval[j] = __newval[j]; __newval[j] = __localval; } } } /********************************************************************** * Print final results *********************************************************************/ void printfinal() { int i; for (i = 0; i < tpoints; i++) { printf("%6.4f ", hostVal[i]); if ((i+1)%10 == 0) printf("\n"); } } /********************************************************************** * Main program *********************************************************************/ int main(int argc, char *argv[]) { sscanf(argv[1],"%d",&tpoints); sscanf(argv[2],"%d",&nsteps); int blockSize = 256; int numBlocks = (tpoints + blockSize + 1) / blockSize; cudaMalloc(&devOldVal, (MAXPOINTS+2) * sizeof(float)); cudaMalloc(&devNewVal, (MAXPOINTS+2) * sizeof(float)); check_param(); printf("Initializing points on the line...\n"); init_line<<<numBlocks, blockSize>>>(tpoints, devOldVal, devNewVal); printf("Updating all points for all time steps...\n"); update<<<numBlocks, blockSize>>>(tpoints, nsteps, devOldVal, devNewVal); hostVal = (float*) malloc(sizeof(float) * tpoints); cudaDeviceSynchronize(); cudaMemcpy(hostVal, devNewVal, sizeof(float) * tpoints, cudaMemcpyDeviceToHost); printf("Printing final results...\n"); printfinal(); printf("\nDone.\n\n"); return 0; }
a941c25e95aed2f9bce7687424252457af872454.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> namespace { template <typename scalar_t> __global__ void sub_cuda_forward_kernel( const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> x1, const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> x2, torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> y) { const int n2 = x2.size(2); const int n1 = x1.size(2); const int c = x1.size(1); const int inc12 = blockIdx.x * blockDim.x + threadIdx.x; const int ic = inc12 / (n1*n2); const int in12 = inc12 % (n1*n2); const int in1 = in12 / n2; const int in2 = in12 % n2; if (inc12 < n1 * n2 * c ){ y[blockIdx.y][ic][in1][in2] = x1[blockIdx.y][ic][in1] - x2[blockIdx.y][ic][in2]; } } template <typename scalar_t> __global__ void sub_cuda_backward_kernel( torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> dx1, torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> dx2, const torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> dy) { const int n2 = dx2.size(2); const int n1 = dx1.size(2); const int c = dx1.size(1); const int inc12 = blockIdx.x * blockDim.x + threadIdx.x; const int ic = inc12 / (n1*n2); const int in12 = inc12 % (n1*n2); const int in1 = in12 / n2; const int in2 = in12 % n2; if (inc12 < n1 * n2 * c ){ dx1[blockIdx.y][ic][in1] += dy[blockIdx.y][ic][in1][in2]; dx2[blockIdx.y][ic][in2] -= dy[blockIdx.y][ic][in1][in2]; } } } // namespace torch::Tensor sub_cuda_forward( torch::Tensor x1, torch::Tensor x2) { const auto batch_size = x1.size(0); const auto channel_size = x1.size(1); const auto n1 = x1.size(2); const auto n2 = x2.size(2); auto y = torch::zeros({batch_size, channel_size, n1, n2}); const int threads = 1024; const dim3 blocks((n1 * n2 * channel_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(x1.type(), "sub_forward_cuda", ([&] { hipLaunchKernelGGL(( sub_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, x1.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), x2.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), y.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>()); })); return y; } std::vector<torch::Tensor> sub_cuda_backward( torch::Tensor dy) { const auto batch_size = dy.size(0); const auto channel_size = dy.size(1); const auto n1 = dy.size(2); const auto n2 = dy.size(3); auto dx1 = torch::zeros({batch_size, channel_size, n1}); auto dx2 = torch::zeros({batch_size, channel_size, n2}); const int threads = 1024; const dim3 blocks((n1 * n2 * channel_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(dy.type(), "sub_forward_cuda", ([&] { hipLaunchKernelGGL(( sub_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, dx1.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), dx2.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), dy.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>()); })); return {dx1, dx2}; }
a941c25e95aed2f9bce7687424252457af872454.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> namespace { template <typename scalar_t> __global__ void sub_cuda_forward_kernel( const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> x1, const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> x2, torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> y) { const int n2 = x2.size(2); const int n1 = x1.size(2); const int c = x1.size(1); const int inc12 = blockIdx.x * blockDim.x + threadIdx.x; const int ic = inc12 / (n1*n2); const int in12 = inc12 % (n1*n2); const int in1 = in12 / n2; const int in2 = in12 % n2; if (inc12 < n1 * n2 * c ){ y[blockIdx.y][ic][in1][in2] = x1[blockIdx.y][ic][in1] - x2[blockIdx.y][ic][in2]; } } template <typename scalar_t> __global__ void sub_cuda_backward_kernel( torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> dx1, torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> dx2, const torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> dy) { const int n2 = dx2.size(2); const int n1 = dx1.size(2); const int c = dx1.size(1); const int inc12 = blockIdx.x * blockDim.x + threadIdx.x; const int ic = inc12 / (n1*n2); const int in12 = inc12 % (n1*n2); const int in1 = in12 / n2; const int in2 = in12 % n2; if (inc12 < n1 * n2 * c ){ dx1[blockIdx.y][ic][in1] += dy[blockIdx.y][ic][in1][in2]; dx2[blockIdx.y][ic][in2] -= dy[blockIdx.y][ic][in1][in2]; } } } // namespace torch::Tensor sub_cuda_forward( torch::Tensor x1, torch::Tensor x2) { const auto batch_size = x1.size(0); const auto channel_size = x1.size(1); const auto n1 = x1.size(2); const auto n2 = x2.size(2); auto y = torch::zeros({batch_size, channel_size, n1, n2}); const int threads = 1024; const dim3 blocks((n1 * n2 * channel_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(x1.type(), "sub_forward_cuda", ([&] { sub_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( x1.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), x2.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), y.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>()); })); return y; } std::vector<torch::Tensor> sub_cuda_backward( torch::Tensor dy) { const auto batch_size = dy.size(0); const auto channel_size = dy.size(1); const auto n1 = dy.size(2); const auto n2 = dy.size(3); auto dx1 = torch::zeros({batch_size, channel_size, n1}); auto dx2 = torch::zeros({batch_size, channel_size, n2}); const int threads = 1024; const dim3 blocks((n1 * n2 * channel_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(dy.type(), "sub_forward_cuda", ([&] { sub_cuda_backward_kernel<scalar_t><<<blocks, threads>>>( dx1.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), dx2.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), dy.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>()); })); return {dx1, dx2}; }
a2110cd4dadcfe692e2b7ed8f1a894d2f3f520ef.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <hip/hip_runtime.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define ERRORCHECK 1 #define BLOCKSIZE 8 #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene *hst_scene = NULL; static glm::vec3 *dev_image = NULL; //Device variables static Camera *dev_camera = NULL; static Geom *dev_scene_geom = NULL; static Geom *dev_scene_lights = NULL; static Material *dev_scene_material = NULL; static RayState *dev_ray_array = NULL; void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); hipMalloc(&dev_camera, sizeof(Camera)); hipMemcpy(dev_camera, &cam, sizeof(Camera), hipMemcpyHostToDevice); checkCUDAError("Problem with camera memcpy"); hipMalloc(&dev_scene_geom, hst_scene->geoms.size() * sizeof(Geom)); hipMemcpy(dev_scene_geom, hst_scene->geoms.data(), hst_scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice); checkCUDAError("Problem with scene geometry memcpy"); hipMalloc(&dev_scene_lights, hst_scene->lights.size() * sizeof(Geom)); hipMemcpy(dev_scene_lights, hst_scene->lights.data(), hst_scene->lights.size() * sizeof(Geom), hipMemcpyHostToDevice); checkCUDAError("Problem with scene lights memcpy"); hipMalloc(&dev_scene_material, hst_scene->materials.size() * sizeof(Material)); hipMemcpy(dev_scene_material, hst_scene->materials.data(), hst_scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice); checkCUDAError("Problem with scene material memcpy"); hipMalloc(&dev_ray_array, pixelcount * sizeof(RayState)); checkCUDAError("pathtraceInit"); } void pathtraceFree() { hipFree(dev_image); // no-op if dev_image is null hipFree(dev_camera); hipFree(dev_scene_geom); hipFree(dev_scene_material); hipFree(dev_ray_array); checkCUDAError("pathtraceFree"); } /** * Example function to generate static and test the CUDA-GL interop. * Delete this once you're done looking at it! */ __global__ void generateNoiseDeleteMe(Camera cam, int iter, glm::vec3 *image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0); thrust::uniform_real_distribution<float> u01(0, 1); // CHECKITOUT: Note that on every iteration, noise gets added onto // the image (not replaced). As a result, the image smooths out over // time, since the output image is the contents of this array divided // by the number of iterations. // // Your renderer will do the same thing, and, over time, it will become // smoother. image[index] += glm::vec3(u01(rng)); } } /** * Generate gittered ray within pixel from camera to the scene */ __global__ void generateFirstLevelRays(Camera* cam, RayState* rays) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam->resolution.x && y < cam->resolution.y) { int index = x + (y * cam->resolution.x); //Set the ray start position which is the camera position rays[index].ray.origin = cam->position; //Set the ray direction // 1. Get the pixel position in the world coordinates glm::vec3 pixelWorld = cam->mPosition + (cam->hVector * ((2.0f * (x*1.0f/(cam->resolution.x - 1.0f)) ) - 1) ) + (cam->vVector * (1 - (2.0f * (y*1.0f/(cam->resolution.y - 1.0f))) ) ); // 2. Get the normalized ray direction from the ray origin (camera position) to the pixel world coordinates rays[index].ray.direction = glm::normalize(pixelWorld - cam->position); //Set the color carried by the ray to white rays[index].color = glm::vec3(1.0f, 1.0f, 1.0f); //Set the ray as alive rays[index].isTerminated = false; } } /** * */ __global__ void pathIteration(int iter, int depth, RayState *rays, Camera *cam, Geom *geom, Geom *lights, Material *mat, int geomCount, glm::vec3 *image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam->resolution.x && y < cam->resolution.y) { int index = x + (y * cam->resolution.x); if(!rays[index].isTerminated) { int intersectionT = -1; int materialIndex = 0; int geomIndex = 0; glm::vec3 intersectionPoint, intersectionNormal; for(int i = 0; i < geomCount; ++i) { glm::vec3 currentIntersectionPoint, currentNormal; bool outside = false; int t = -1; if(geom[i].type == GeomType::SPHERE) { t = sphereIntersectionTest(geom[i], rays[index].ray, currentIntersectionPoint, currentNormal, outside); } else if(geom[i].type == GeomType::CUBE) { t = boxIntersectionTest(geom[i], rays[index].ray, currentIntersectionPoint, currentNormal, outside); } if(t > 0 && (t < intersectionT || intersectionT < 0)) { materialIndex = geom[i].materialid; geomIndex = i; intersectionT = t; intersectionPoint = currentIntersectionPoint; intersectionNormal = currentNormal; } } if(intersectionT > 0) { if(mat[materialIndex].hasRefractive) { //Get the refracted ray rays[index].ray.direction = glm::normalize(glm::refract(rays[index].ray.direction, intersectionNormal, 1.0f/mat[materialIndex].indexOfRefraction)); rays[index].ray.origin = intersectionPoint + (1e-3f * rays[index].ray.direction); //Intersect again bool outside = true; if(geom[geomIndex].type == GeomType::SPHERE) { intersectionT = sphereIntersectionTest(geom[geomIndex], rays[index].ray, intersectionPoint, intersectionNormal, outside); } else if(geom[geomIndex].type == GeomType::CUBE) { intersectionT = boxIntersectionTest(geom[geomIndex], rays[index].ray, intersectionPoint, intersectionNormal, outside); } //Get the outgoing refracted ray rays[index].ray.direction = glm::normalize(glm::refract(rays[index].ray.direction, intersectionNormal, mat[materialIndex].indexOfRefraction)); rays[index].ray.origin = intersectionPoint + (1e-3f * rays[index].ray.direction); //Multiply by the diffuse color rays[index].color *= mat[materialIndex].color; } else { thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, depth); scatterRay(rays[index].ray, rays[index].color, intersectionPoint, intersectionNormal, mat[materialIndex], lights, cam->position, rng); //Check if the geometry hit is a light source, set it as dead if(mat[materialIndex].emittance > 0) { rays[index].isTerminated = true; } } } else { //The ray didn't intersect with anything, set it as dead rays[index].color = glm::vec3(0.0f); rays[index].isTerminated = true; } } else { image[index] += rays[index].color/(1.0f*depth); } } } /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; const dim3 blockSize2d(BLOCKSIZE, BLOCKSIZE); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray is a (ray, color) pair, where color starts as the // multiplicative identity, white = (1, 1, 1). // * For debugging, you can output your ray directions as colors. // * For each depth: // * Compute one new (ray, color) pair along each path (using scatterRay). // Note that many rays will terminate by hitting a light or hitting // nothing at all. You'll have to decide how to represent your path rays // and how you'll mark terminated rays. // * Color is attenuated (multiplied) by reflections off of any object // surface. // * You can debug your ray-scene intersections by displaying various // values as colors, e.g., the first surface normal, the first bounced // ray direction, the first unlit material color, etc. // * Add all of the terminated rays' results into the appropriate pixels. // * Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * Finally, handle all of the paths that still haven't terminated. // (Easy way is to make them black or background-colored.) //Generate all first level rays and save them hipLaunchKernelGGL(( generateFirstLevelRays), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, dev_camera, dev_ray_array); //Create a for loop that iterates over the desired depth //For each loop iteration // * determine the number of threads and thus blocks needed // * call the pathtrace kernel for each ray // * do stream compaction to get rid of all the terminated rays and get the remaining number of rays! for(int i = 0; i < traceDepth; ++i) { hipLaunchKernelGGL(( pathIteration), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, iter, i, dev_ray_array, dev_camera, dev_scene_geom, dev_scene_lights, dev_scene_material, hst_scene->geoms.size(), dev_image); checkCUDAError("path iteration"); } /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image); checkCUDAError("send to PBO"); // Retrieve image from GPU hipMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost); checkCUDAError("memcpy image data"); }
a2110cd4dadcfe692e2b7ed8f1a894d2f3f520ef.cu
#include <cstdio> #include <cuda.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define ERRORCHECK 1 #define BLOCKSIZE 8 #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene *hst_scene = NULL; static glm::vec3 *dev_image = NULL; //Device variables static Camera *dev_camera = NULL; static Geom *dev_scene_geom = NULL; static Geom *dev_scene_lights = NULL; static Material *dev_scene_material = NULL; static RayState *dev_ray_array = NULL; void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); cudaMalloc(&dev_camera, sizeof(Camera)); cudaMemcpy(dev_camera, &cam, sizeof(Camera), cudaMemcpyHostToDevice); checkCUDAError("Problem with camera memcpy"); cudaMalloc(&dev_scene_geom, hst_scene->geoms.size() * sizeof(Geom)); cudaMemcpy(dev_scene_geom, hst_scene->geoms.data(), hst_scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); checkCUDAError("Problem with scene geometry memcpy"); cudaMalloc(&dev_scene_lights, hst_scene->lights.size() * sizeof(Geom)); cudaMemcpy(dev_scene_lights, hst_scene->lights.data(), hst_scene->lights.size() * sizeof(Geom), cudaMemcpyHostToDevice); checkCUDAError("Problem with scene lights memcpy"); cudaMalloc(&dev_scene_material, hst_scene->materials.size() * sizeof(Material)); cudaMemcpy(dev_scene_material, hst_scene->materials.data(), hst_scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice); checkCUDAError("Problem with scene material memcpy"); cudaMalloc(&dev_ray_array, pixelcount * sizeof(RayState)); checkCUDAError("pathtraceInit"); } void pathtraceFree() { cudaFree(dev_image); // no-op if dev_image is null cudaFree(dev_camera); cudaFree(dev_scene_geom); cudaFree(dev_scene_material); cudaFree(dev_ray_array); checkCUDAError("pathtraceFree"); } /** * Example function to generate static and test the CUDA-GL interop. * Delete this once you're done looking at it! */ __global__ void generateNoiseDeleteMe(Camera cam, int iter, glm::vec3 *image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0); thrust::uniform_real_distribution<float> u01(0, 1); // CHECKITOUT: Note that on every iteration, noise gets added onto // the image (not replaced). As a result, the image smooths out over // time, since the output image is the contents of this array divided // by the number of iterations. // // Your renderer will do the same thing, and, over time, it will become // smoother. image[index] += glm::vec3(u01(rng)); } } /** * Generate gittered ray within pixel from camera to the scene */ __global__ void generateFirstLevelRays(Camera* cam, RayState* rays) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam->resolution.x && y < cam->resolution.y) { int index = x + (y * cam->resolution.x); //Set the ray start position which is the camera position rays[index].ray.origin = cam->position; //Set the ray direction // 1. Get the pixel position in the world coordinates glm::vec3 pixelWorld = cam->mPosition + (cam->hVector * ((2.0f * (x*1.0f/(cam->resolution.x - 1.0f)) ) - 1) ) + (cam->vVector * (1 - (2.0f * (y*1.0f/(cam->resolution.y - 1.0f))) ) ); // 2. Get the normalized ray direction from the ray origin (camera position) to the pixel world coordinates rays[index].ray.direction = glm::normalize(pixelWorld - cam->position); //Set the color carried by the ray to white rays[index].color = glm::vec3(1.0f, 1.0f, 1.0f); //Set the ray as alive rays[index].isTerminated = false; } } /** * */ __global__ void pathIteration(int iter, int depth, RayState *rays, Camera *cam, Geom *geom, Geom *lights, Material *mat, int geomCount, glm::vec3 *image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam->resolution.x && y < cam->resolution.y) { int index = x + (y * cam->resolution.x); if(!rays[index].isTerminated) { int intersectionT = -1; int materialIndex = 0; int geomIndex = 0; glm::vec3 intersectionPoint, intersectionNormal; for(int i = 0; i < geomCount; ++i) { glm::vec3 currentIntersectionPoint, currentNormal; bool outside = false; int t = -1; if(geom[i].type == GeomType::SPHERE) { t = sphereIntersectionTest(geom[i], rays[index].ray, currentIntersectionPoint, currentNormal, outside); } else if(geom[i].type == GeomType::CUBE) { t = boxIntersectionTest(geom[i], rays[index].ray, currentIntersectionPoint, currentNormal, outside); } if(t > 0 && (t < intersectionT || intersectionT < 0)) { materialIndex = geom[i].materialid; geomIndex = i; intersectionT = t; intersectionPoint = currentIntersectionPoint; intersectionNormal = currentNormal; } } if(intersectionT > 0) { if(mat[materialIndex].hasRefractive) { //Get the refracted ray rays[index].ray.direction = glm::normalize(glm::refract(rays[index].ray.direction, intersectionNormal, 1.0f/mat[materialIndex].indexOfRefraction)); rays[index].ray.origin = intersectionPoint + (1e-3f * rays[index].ray.direction); //Intersect again bool outside = true; if(geom[geomIndex].type == GeomType::SPHERE) { intersectionT = sphereIntersectionTest(geom[geomIndex], rays[index].ray, intersectionPoint, intersectionNormal, outside); } else if(geom[geomIndex].type == GeomType::CUBE) { intersectionT = boxIntersectionTest(geom[geomIndex], rays[index].ray, intersectionPoint, intersectionNormal, outside); } //Get the outgoing refracted ray rays[index].ray.direction = glm::normalize(glm::refract(rays[index].ray.direction, intersectionNormal, mat[materialIndex].indexOfRefraction)); rays[index].ray.origin = intersectionPoint + (1e-3f * rays[index].ray.direction); //Multiply by the diffuse color rays[index].color *= mat[materialIndex].color; } else { thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, depth); scatterRay(rays[index].ray, rays[index].color, intersectionPoint, intersectionNormal, mat[materialIndex], lights, cam->position, rng); //Check if the geometry hit is a light source, set it as dead if(mat[materialIndex].emittance > 0) { rays[index].isTerminated = true; } } } else { //The ray didn't intersect with anything, set it as dead rays[index].color = glm::vec3(0.0f); rays[index].isTerminated = true; } } else { image[index] += rays[index].color/(1.0f*depth); } } } /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; const dim3 blockSize2d(BLOCKSIZE, BLOCKSIZE); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray is a (ray, color) pair, where color starts as the // multiplicative identity, white = (1, 1, 1). // * For debugging, you can output your ray directions as colors. // * For each depth: // * Compute one new (ray, color) pair along each path (using scatterRay). // Note that many rays will terminate by hitting a light or hitting // nothing at all. You'll have to decide how to represent your path rays // and how you'll mark terminated rays. // * Color is attenuated (multiplied) by reflections off of any object // surface. // * You can debug your ray-scene intersections by displaying various // values as colors, e.g., the first surface normal, the first bounced // ray direction, the first unlit material color, etc. // * Add all of the terminated rays' results into the appropriate pixels. // * Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * Finally, handle all of the paths that still haven't terminated. // (Easy way is to make them black or background-colored.) //Generate all first level rays and save them generateFirstLevelRays<<<blocksPerGrid2d, blockSize2d>>>(dev_camera, dev_ray_array); //Create a for loop that iterates over the desired depth //For each loop iteration // * determine the number of threads and thus blocks needed // * call the pathtrace kernel for each ray // * do stream compaction to get rid of all the terminated rays and get the remaining number of rays! for(int i = 0; i < traceDepth; ++i) { pathIteration<<<blocksPerGrid2d, blockSize2d>>>(iter, i, dev_ray_array, dev_camera, dev_scene_geom, dev_scene_lights, dev_scene_material, hst_scene->geoms.size(), dev_image); checkCUDAError("path iteration"); } /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image); checkCUDAError("send to PBO"); // Retrieve image from GPU cudaMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost); checkCUDAError("memcpy image data"); }
8dae9ac680e6e2b3f1f386cdf01c18426d62f47c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Pi - CUDA unified memory version * Author: Felipe Gutierrez, SBEL, July 2015 */ #include <stdio.h> /* fprintf() */ #include <iostream> #include <float.h> /* DBL_EPSILON() */ #include <math.h> /* sqrt() */ #include <thrust/reduce.h> #include <thrust/system/hip/execution_policy.h> #include <thrust/system/omp/execution_policy.h> #include <thrust/system/cpp/execution_policy.h> #if OPENMP_ENABLED #include <omp.h> #endif #define nthreads 512 #if CUDA_ENABLED #define NUMBLOCKS(n) ceil(n/nthreads) #definehipLaunchKernelGGL(( KERNEL(n)) , dim3(NUMBLOCKS(n)), dim3(nthreads), 0, 0, #else #define KERNELn) #endif #if CUDA_ENABLED __global__ #endif void calculateAreas(const int numRects, const double width, double *areas) { #if CUDA_ENABLED int threadId = threadIdx.x + (blockIdx.x * blockDim.x); if(threadId >= numRects) { return; } #elif OPENMP_ENABLED #pragma omp parallel for #endif #if !CUDA_ENABLED for(int threadId = 0;threadId < numRects;threadId++) #endif { double x = threadId * width; double heightSq = 1 - (x*x); double height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq)); areas[threadId] = (width * height); // x = threadId * width; // heightSq = 1 - (x*x); // height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq)); // areas[threadId] = (width * height); // x = threadId * width; // heightSq = 1 - (x*x); // height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq)); // areas[threadId] = (width * height); // double extraOp = threadId; // extraOp = extraOp*2.876/5.2; // extraOp = sqrt(extraOp)/23.2; // extraOp = extraOp*2.876/5.2; // extraOp = sqrt(extraOp)/23.2; // extraOp = extraOp*2.876/5.2; // extraOp = sqrt(extraOp)/23.2; // extraOp = extraOp*2.876/5.2; // extraOp = sqrt(extraOp)/23.2; // extraOp = extraOp*2.876/5.2; // extraOp = sqrt(extraOp)/23.2; // extraOp = pow(extraOp,2)/23.2; // extraOp = pow(extraOp,0.5)/23.2; // extraOp = pow(extraOp,3)/23.2; // extraOp = extraOp*2.876/5.2; // extraOp = sqrt(extraOp)/23.2; // extraOp = extraOp*2.876/5.2; // extraOp = sqrt(extraOp)/23.2; // extraOp = extraOp*2.876/5.2; // extraOp = sqrt(extraOp)/23.2; // extraOp = pow(extraOp,2)/23.2; // extraOp = pow(extraOp,0.5)/23.2; // extraOp = pow(extraOp,3)/23.2; } } void calculateArea(const int numRects, double *area) { hipError_t err; dim3 blockDims(32,32); /* Allocate areas in unified memory */ double *unifiedAreas; err = hipMallocManaged(&unifiedAreas, numRects * sizeof(double)); /* Check for unified memory error*/ if(err != hipSuccess) { fprintf(stderr, "hipMallocManaged failed: %s\n", hipGetErrorString(err)); } calculateAreas KERNEL(numRects) (numRects, (1.0 / numRects), unifiedAreas); #if CUDA_ENABLED /* If cuda is enabled we want to do the reduce in GPU */ hipDeviceSynchronize(); // Synchronize the valued calculated in the kernel. (*area) = thrust::reduce(thrust::hip::par, unifiedAreas, unifiedAreas + numRects); #elif OPENMP_ENABLED /* If cuda is not enabled but openmp is we want to do the reduce in the cpu with openmp */ (*area) = thrust::reduce(thrust::omp::par, unifiedAreas, unifiedAreas + numRects); #else /* If neither is enabled we do it serially*/ // (*area) = thrust::reduce(thrust::cpp::par, unifiedAreas, unifiedAreas + numRects); for (int i = 0; i < numRects; i++) { (*area) += unifiedAreas[i]; } #endif hipFree(unifiedAreas); }
8dae9ac680e6e2b3f1f386cdf01c18426d62f47c.cu
/* Pi - CUDA unified memory version * Author: Felipe Gutierrez, SBEL, July 2015 */ #include <stdio.h> /* fprintf() */ #include <iostream> #include <float.h> /* DBL_EPSILON() */ #include <math.h> /* sqrt() */ #include <thrust/reduce.h> #include <thrust/system/cuda/execution_policy.h> #include <thrust/system/omp/execution_policy.h> #include <thrust/system/cpp/execution_policy.h> #if OPENMP_ENABLED #include <omp.h> #endif #define nthreads 512 #if CUDA_ENABLED #define NUMBLOCKS(n) ceil(n/nthreads) #define KERNEL(n) <<<NUMBLOCKS(n), nthreads>>> #else #define KERNEL(n) #endif #if CUDA_ENABLED __global__ #endif void calculateAreas(const int numRects, const double width, double *areas) { #if CUDA_ENABLED int threadId = threadIdx.x + (blockIdx.x * blockDim.x); if(threadId >= numRects) { return; } #elif OPENMP_ENABLED #pragma omp parallel for #endif #if !CUDA_ENABLED for(int threadId = 0;threadId < numRects;threadId++) #endif { double x = threadId * width; double heightSq = 1 - (x*x); double height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq)); areas[threadId] = (width * height); // x = threadId * width; // heightSq = 1 - (x*x); // height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq)); // areas[threadId] = (width * height); // x = threadId * width; // heightSq = 1 - (x*x); // height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq)); // areas[threadId] = (width * height); // double extraOp = threadId; // extraOp = extraOp*2.876/5.2; // extraOp = sqrt(extraOp)/23.2; // extraOp = extraOp*2.876/5.2; // extraOp = sqrt(extraOp)/23.2; // extraOp = extraOp*2.876/5.2; // extraOp = sqrt(extraOp)/23.2; // extraOp = extraOp*2.876/5.2; // extraOp = sqrt(extraOp)/23.2; // extraOp = extraOp*2.876/5.2; // extraOp = sqrt(extraOp)/23.2; // extraOp = pow(extraOp,2)/23.2; // extraOp = pow(extraOp,0.5)/23.2; // extraOp = pow(extraOp,3)/23.2; // extraOp = extraOp*2.876/5.2; // extraOp = sqrt(extraOp)/23.2; // extraOp = extraOp*2.876/5.2; // extraOp = sqrt(extraOp)/23.2; // extraOp = extraOp*2.876/5.2; // extraOp = sqrt(extraOp)/23.2; // extraOp = pow(extraOp,2)/23.2; // extraOp = pow(extraOp,0.5)/23.2; // extraOp = pow(extraOp,3)/23.2; } } void calculateArea(const int numRects, double *area) { cudaError_t err; dim3 blockDims(32,32); /* Allocate areas in unified memory */ double *unifiedAreas; err = cudaMallocManaged(&unifiedAreas, numRects * sizeof(double)); /* Check for unified memory error*/ if(err != cudaSuccess) { fprintf(stderr, "cudaMallocManaged failed: %s\n", cudaGetErrorString(err)); } calculateAreas KERNEL(numRects) (numRects, (1.0 / numRects), unifiedAreas); #if CUDA_ENABLED /* If cuda is enabled we want to do the reduce in GPU */ cudaDeviceSynchronize(); // Synchronize the valued calculated in the kernel. (*area) = thrust::reduce(thrust::cuda::par, unifiedAreas, unifiedAreas + numRects); #elif OPENMP_ENABLED /* If cuda is not enabled but openmp is we want to do the reduce in the cpu with openmp */ (*area) = thrust::reduce(thrust::omp::par, unifiedAreas, unifiedAreas + numRects); #else /* If neither is enabled we do it serially*/ // (*area) = thrust::reduce(thrust::cpp::par, unifiedAreas, unifiedAreas + numRects); for (int i = 0; i < numRects; i++) { (*area) += unifiedAreas[i]; } #endif cudaFree(unifiedAreas); }
f27d2738fa08206ae19047a42e767eff8787e36c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> // Macro for checking errors in CUDA API calls #define cudaErrorCheck(call) \ do{ \ hipError_t cuErr = call; \ if(hipSuccess != cuErr){ \ printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(cuErr));\ exit(0); \ } \ }while(0) // Size of array #define N 1048576 // Kernel __global__ void add_vectors(int *a, int *b, int *c) { int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < N) c[id] = a[id] + b[id]; } // Main program int main() { // Number of bytes to allocate for N integers size_t bytes = N*sizeof(int); // Allocate memory for arrays A, B, and C on host int *A = (int*)malloc(bytes); int *B = (int*)malloc(bytes); int *C = (int*)malloc(bytes); // Allocate memory for arrays d_A, d_B, and d_C on device int *d_A, *d_B, *d_C; cudaErrorCheck( hipMalloc(&d_A, bytes) ); cudaErrorCheck( hipMalloc(&d_B, bytes) ); cudaErrorCheck( hipMalloc(&d_C, bytes) ); // Fill host arrays A and B for(int i=0; i<N; i++) { A[i] = 1; B[i] = 2; } // Copy data from host arrays A and B to device arrays d_A and d_B cudaErrorCheck( hipMemcpy(d_A, A, bytes, hipMemcpyHostToDevice) ); cudaErrorCheck( hipMemcpy(d_B, B, bytes, hipMemcpyHostToDevice) ); // Set execution configuration parameters // thr_per_blk: number of CUDA threads per grid block // blk_in_grid: number of blocks in grid int thr_per_blk = 256; int blk_in_grid = ceil( float(N) / thr_per_blk ); // Launch kernel hipLaunchKernelGGL(( add_vectors), dim3(blk_in_grid), dim3(thr_per_blk) , 0, 0, d_A, d_B, d_C); // Check for errors in kernel launch (e.g. invalid execution configuration paramters) hipError_t cuErrSync = hipGetLastError(); // Check for errors on the GPU after control is returned to CPU hipError_t cuErrAsync = hipDeviceSynchronize(); if (cuErrSync != hipSuccess) { printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(cuErrSync)); exit(0); } if (cuErrAsync != hipSuccess) { printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(cuErrAsync)); exit(0); } // Copy data from device array d_C to host array C cudaErrorCheck( hipMemcpy(C, d_C, bytes, hipMemcpyDeviceToHost) ); // Verify results for(int i=0; i<N; i++) { if(C[i] != 3) { printf("Error: value of C[%d] = %d instead of 3\n", i, C[i]); exit(-1); } } // Free CPU memory free(A); free(B); free(C); // Free GPU memory cudaErrorCheck( hipFree(d_A) ); cudaErrorCheck( hipFree(d_B) ); cudaErrorCheck( hipFree(d_C) ); printf("\n---------------------------\n"); printf("__SUCCESS__\n"); printf("---------------------------\n"); printf("N = %d\n", N); printf("Threads Per Block = %d\n", thr_per_blk); printf("Blocks In Grid = %d\n", blk_in_grid); printf("---------------------------\n\n"); return 0; }
f27d2738fa08206ae19047a42e767eff8787e36c.cu
#include <stdio.h> // Macro for checking errors in CUDA API calls #define cudaErrorCheck(call) \ do{ \ cudaError_t cuErr = call; \ if(cudaSuccess != cuErr){ \ printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErr));\ exit(0); \ } \ }while(0) // Size of array #define N 1048576 // Kernel __global__ void add_vectors(int *a, int *b, int *c) { int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < N) c[id] = a[id] + b[id]; } // Main program int main() { // Number of bytes to allocate for N integers size_t bytes = N*sizeof(int); // Allocate memory for arrays A, B, and C on host int *A = (int*)malloc(bytes); int *B = (int*)malloc(bytes); int *C = (int*)malloc(bytes); // Allocate memory for arrays d_A, d_B, and d_C on device int *d_A, *d_B, *d_C; cudaErrorCheck( cudaMalloc(&d_A, bytes) ); cudaErrorCheck( cudaMalloc(&d_B, bytes) ); cudaErrorCheck( cudaMalloc(&d_C, bytes) ); // Fill host arrays A and B for(int i=0; i<N; i++) { A[i] = 1; B[i] = 2; } // Copy data from host arrays A and B to device arrays d_A and d_B cudaErrorCheck( cudaMemcpy(d_A, A, bytes, cudaMemcpyHostToDevice) ); cudaErrorCheck( cudaMemcpy(d_B, B, bytes, cudaMemcpyHostToDevice) ); // Set execution configuration parameters // thr_per_blk: number of CUDA threads per grid block // blk_in_grid: number of blocks in grid int thr_per_blk = 256; int blk_in_grid = ceil( float(N) / thr_per_blk ); // Launch kernel add_vectors<<< blk_in_grid, thr_per_blk >>>(d_A, d_B, d_C); // Check for errors in kernel launch (e.g. invalid execution configuration paramters) cudaError_t cuErrSync = cudaGetLastError(); // Check for errors on the GPU after control is returned to CPU cudaError_t cuErrAsync = cudaDeviceSynchronize(); if (cuErrSync != cudaSuccess) { printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErrSync)); exit(0); } if (cuErrAsync != cudaSuccess) { printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErrAsync)); exit(0); } // Copy data from device array d_C to host array C cudaErrorCheck( cudaMemcpy(C, d_C, bytes, cudaMemcpyDeviceToHost) ); // Verify results for(int i=0; i<N; i++) { if(C[i] != 3) { printf("Error: value of C[%d] = %d instead of 3\n", i, C[i]); exit(-1); } } // Free CPU memory free(A); free(B); free(C); // Free GPU memory cudaErrorCheck( cudaFree(d_A) ); cudaErrorCheck( cudaFree(d_B) ); cudaErrorCheck( cudaFree(d_C) ); printf("\n---------------------------\n"); printf("__SUCCESS__\n"); printf("---------------------------\n"); printf("N = %d\n", N); printf("Threads Per Block = %d\n", thr_per_blk); printf("Blocks In Grid = %d\n", blk_in_grid); printf("---------------------------\n\n"); return 0; }
78be1e0e62e3ef5e5419372cddc8d8b92dd7ee67.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/fake_dequantize_op.h" namespace paddle { namespace operators { template <typename T> __global__ void KeDequantize(const T* in, const T* scale, T max_range, int num, T* out) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < num) { out[idx] = in[idx] * scale[0] / max_range; } } template <typename T> struct DequantizeFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& dev_ctx, const framework::Tensor* in, const framework::Tensor* scale, T max_range, framework::Tensor* out) { const T* in_data = in->data<T>(); const T* scale_factor = scale->data<T>(); T* out_data = out->mutable_data<T>(dev_ctx.GetPlace()); int num = in->numel(); int block = 512; int grid = (num + block - 1) / block; hipLaunchKernelGGL(( KeDequantize<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), in_data, scale_factor, max_range, num, out_data); } }; template <typename T> __global__ void DequantizeOneScaleQuantAxis0(const T* in, const T* scale, T max_range, int num, int channel, T* out) { int tid = threadIdx.x; int channel_size = num / channel; const T* in_c = in + blockIdx.x * channel_size; T* out_c = out + blockIdx.x * channel_size; for (int i = tid; i < channel_size; i += blockDim.x) { out_c[i] = in_c[i] * scale[blockIdx.x] / max_range; } } template <typename T> __global__ void DequantizeOneScaleQuantAxis1(const T* in, const T* scale, T max_range, const int num, const int cin, const int cout, T* out) { int cout_wh_size = num / cin; int wh_size = cout_wh_size / cout; T s = scale[blockIdx.x]; const T* in_current = in + threadIdx.x * cout_wh_size + blockIdx.x * wh_size; T* out_current = out + threadIdx.x * cout_wh_size + blockIdx.x * wh_size; for (int i = 0; i < wh_size; i++) { out_current[i] = in_current[i] * s / max_range; } } template <typename T> __global__ void DequantizeTwoScale(const T* in, const T* scale_one, const T* scale_two, T max_range, int num, int batch_size, int channel, T* out) { int tid = threadIdx.x; int channel_size = num / (batch_size * channel); int scale_index = blockIdx.x % channel; const T* in_c = in + blockIdx.x * channel_size; T* out_c = out + blockIdx.x * channel_size; for (int i = tid; i < channel_size; i += blockDim.x) { out_c[i] = in_c[i] * scale_one[scale_index] * scale_two[0] / max_range; } } template <typename T> struct ChannelDequantizeFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& dev_ctx, const framework::Tensor* in, const framework::Tensor** scales, const int scale_num, T max_range, const int quant_axis, framework::Tensor* out) { auto in_dims = in->dims(); const T* in_data = in->data<T>(); T* out_data = out->mutable_data<T>(dev_ctx.GetPlace()); if (scale_num == 1) { int num = in->numel(); const T* scale_factor = scales[0]->data<T>(); if (quant_axis == 0) { int grid = in_dims[0]; int block = 1024; hipLaunchKernelGGL(( DequantizeOneScaleQuantAxis0<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), in_data, scale_factor, max_range, num, in_dims[0], out_data); } else if (quant_axis == 1) { // Dequantize weight of Cin * Cout * W * H int grid = in_dims[1]; int block = in_dims[0]; hipLaunchKernelGGL(( DequantizeOneScaleQuantAxis1<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), in_data, scale_factor, max_range, num, in_dims[0], in_dims[1], out_data); } } else if (scale_num == 2) { // Not need to consider quant_axis int num = in->numel(); int batch_size = in->dims()[0]; int channel = in->dims()[1]; const T* scale_one = scales[0]->data<T>(); const T* scale_two = scales[1]->data<T>(); int block = 1024; int grid = batch_size * channel; hipLaunchKernelGGL(( DequantizeTwoScale<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), in_data, scale_one, scale_two, max_range, num, batch_size, channel, out_data); } } }; template struct DequantizeFunctor<platform::CUDADeviceContext, float>; template struct DequantizeFunctor<platform::CUDADeviceContext, double>; template struct ChannelDequantizeFunctor<platform::CUDADeviceContext, float>; template struct ChannelDequantizeFunctor<platform::CUDADeviceContext, double>; } // namespace operators } // namespace paddle namespace ops = paddle::operators; using CUDA = paddle::platform::CUDADeviceContext; REGISTER_OP_CUDA_KERNEL(fake_dequantize_max_abs, ops::FakeDequantizeMaxAbsKernel<CUDA, float>, ops::FakeDequantizeMaxAbsKernel<CUDA, double>); REGISTER_OP_CUDA_KERNEL( fake_channel_wise_dequantize_max_abs, ops::FakeChannelWiseDequantizeMaxAbsKernel<CUDA, float>, ops::FakeChannelWiseDequantizeMaxAbsKernel<CUDA, double>);
78be1e0e62e3ef5e5419372cddc8d8b92dd7ee67.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/fake_dequantize_op.h" namespace paddle { namespace operators { template <typename T> __global__ void KeDequantize(const T* in, const T* scale, T max_range, int num, T* out) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < num) { out[idx] = in[idx] * scale[0] / max_range; } } template <typename T> struct DequantizeFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& dev_ctx, const framework::Tensor* in, const framework::Tensor* scale, T max_range, framework::Tensor* out) { const T* in_data = in->data<T>(); const T* scale_factor = scale->data<T>(); T* out_data = out->mutable_data<T>(dev_ctx.GetPlace()); int num = in->numel(); int block = 512; int grid = (num + block - 1) / block; KeDequantize<T><<<grid, block, 0, dev_ctx.stream()>>>( in_data, scale_factor, max_range, num, out_data); } }; template <typename T> __global__ void DequantizeOneScaleQuantAxis0(const T* in, const T* scale, T max_range, int num, int channel, T* out) { int tid = threadIdx.x; int channel_size = num / channel; const T* in_c = in + blockIdx.x * channel_size; T* out_c = out + blockIdx.x * channel_size; for (int i = tid; i < channel_size; i += blockDim.x) { out_c[i] = in_c[i] * scale[blockIdx.x] / max_range; } } template <typename T> __global__ void DequantizeOneScaleQuantAxis1(const T* in, const T* scale, T max_range, const int num, const int cin, const int cout, T* out) { int cout_wh_size = num / cin; int wh_size = cout_wh_size / cout; T s = scale[blockIdx.x]; const T* in_current = in + threadIdx.x * cout_wh_size + blockIdx.x * wh_size; T* out_current = out + threadIdx.x * cout_wh_size + blockIdx.x * wh_size; for (int i = 0; i < wh_size; i++) { out_current[i] = in_current[i] * s / max_range; } } template <typename T> __global__ void DequantizeTwoScale(const T* in, const T* scale_one, const T* scale_two, T max_range, int num, int batch_size, int channel, T* out) { int tid = threadIdx.x; int channel_size = num / (batch_size * channel); int scale_index = blockIdx.x % channel; const T* in_c = in + blockIdx.x * channel_size; T* out_c = out + blockIdx.x * channel_size; for (int i = tid; i < channel_size; i += blockDim.x) { out_c[i] = in_c[i] * scale_one[scale_index] * scale_two[0] / max_range; } } template <typename T> struct ChannelDequantizeFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& dev_ctx, const framework::Tensor* in, const framework::Tensor** scales, const int scale_num, T max_range, const int quant_axis, framework::Tensor* out) { auto in_dims = in->dims(); const T* in_data = in->data<T>(); T* out_data = out->mutable_data<T>(dev_ctx.GetPlace()); if (scale_num == 1) { int num = in->numel(); const T* scale_factor = scales[0]->data<T>(); if (quant_axis == 0) { int grid = in_dims[0]; int block = 1024; DequantizeOneScaleQuantAxis0<T><<<grid, block, 0, dev_ctx.stream()>>>( in_data, scale_factor, max_range, num, in_dims[0], out_data); } else if (quant_axis == 1) { // Dequantize weight of Cin * Cout * W * H int grid = in_dims[1]; int block = in_dims[0]; DequantizeOneScaleQuantAxis1<T><<<grid, block, 0, dev_ctx.stream()>>>( in_data, scale_factor, max_range, num, in_dims[0], in_dims[1], out_data); } } else if (scale_num == 2) { // Not need to consider quant_axis int num = in->numel(); int batch_size = in->dims()[0]; int channel = in->dims()[1]; const T* scale_one = scales[0]->data<T>(); const T* scale_two = scales[1]->data<T>(); int block = 1024; int grid = batch_size * channel; DequantizeTwoScale<T><<<grid, block, 0, dev_ctx.stream()>>>( in_data, scale_one, scale_two, max_range, num, batch_size, channel, out_data); } } }; template struct DequantizeFunctor<platform::CUDADeviceContext, float>; template struct DequantizeFunctor<platform::CUDADeviceContext, double>; template struct ChannelDequantizeFunctor<platform::CUDADeviceContext, float>; template struct ChannelDequantizeFunctor<platform::CUDADeviceContext, double>; } // namespace operators } // namespace paddle namespace ops = paddle::operators; using CUDA = paddle::platform::CUDADeviceContext; REGISTER_OP_CUDA_KERNEL(fake_dequantize_max_abs, ops::FakeDequantizeMaxAbsKernel<CUDA, float>, ops::FakeDequantizeMaxAbsKernel<CUDA, double>); REGISTER_OP_CUDA_KERNEL( fake_channel_wise_dequantize_max_abs, ops::FakeChannelWiseDequantizeMaxAbsKernel<CUDA, float>, ops::FakeChannelWiseDequantizeMaxAbsKernel<CUDA, double>);
601835dfa0f57cb5945a4268982b9f774387b4d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpu_3d_hip.cuh" #include "device_3d.cuh" #include "setup.cuh" #include "bin_io.hpp" #include "sys_file.hpp" #include "Logging.hpp" #include "random_hip.cuh" #include <hiprand/hiprand.h> // #include <hipcub/hipcub.hpp> #include <cmath> #include <stdio.h> #include <iostream> #include <sstream> gpu_3d::gpu_3d(int L_, double T_, double h_){ L=L_; N=L*L*L; T=T_; h=h_; //init random gen hiprandCreateGenerator(&gen2, HIPRAND_RNG_QUASI_SCRAMBLED_SOBOL64); //allocate memory on divice hipMalloc(&gen_d,N*sizeof(hiprandStatePhilox4_32_10_t)); gpuErrchk(hipMalloc(&s1_D, N*sizeof(spin_t))); gpuErrchk(hipMalloc(&s2_D, N*sizeof(spin_t))); gpuErrchk(hipMalloc(&J_x_D, 2*N*sizeof(int2))); gpuErrchk(hipMalloc(&J_y_D, 2*N*sizeof(int2))); gpuErrchk(hipMalloc(&J_z_D, 2*N*sizeof(int2))); gpuErrchk(hipMalloc(&boltz_D, 14*sizeof(float))); gpuErrchk(hipMalloc(&M_buf_D, 2*64*ceil(N/256.+1)*sizeof(float))); gpuErrchk(hipMalloc(&EJ_buf_D, 2*64*ceil(N/256.+1)*sizeof(float))); // hipFree(buffer_D); hipLaunchKernelGGL(( setup_radome), dim3(ceil(N/256.)),dim3(256), 0, 0, gen_d,1234ULL,N); hiprandGenerateLongLong(gen2,s1_D,N); hiprandGenerateLongLong(gen2,s2_D,N); //set boltzmanfactor float boltz_H[14]; //boltzmanfaktor Host #ifndef SPLIT_H for (size_t i = 0; i < 7; i++) { int j=(i-3)*2; boltz_H[i] = ::min(1., exp(-2*(1/T)*(j-h))); boltz_H[7+i] = ::min(1., exp(-2*(1/T)*(j+h))); } #else for (int i = 0; i < 3; ++i) { int j=2+2*i; boltz_H[i] = exp(-2.*j*(1./T)); } boltz_H[3]=exp(-2*h/T); #endif //loade and binde texure memory hipMemcpy(boltz_D, boltz_H, 14*sizeof(float), hipMemcpyHostToDevice); //setup strems for (int i = 0; i < 4; ++i) { hipStreamCreate(&stream[i]); } } void gpu_3d::gpu_free(){ for (int i = 0; i < 4; ++i) { hipStreamDestroy(stream[i]); } hipFree(boltz_D); hipFree(s1_D); hipFree(s2_D); hipFree(J_x_D); hipFree(J_y_D); hipFree(J_z_D); hipFree(M_buf_D); hipFree(EJ_buf_D); hiprandDestroyGenerator(gen2); } gpu_3d::~gpu_3d(){ } void gpu_3d::sweep(){ LastError(); dim3 block(8,8,8); dim3 grid(ceil(L/8.),ceil(L/8.),ceil(L/8.)); hipLaunchKernelGGL(( metrpolis_3d), dim3(grid),dim3(block),0,stream[1], s1_D,s2_D,gen_d,boltz_D,L,0); hipLaunchKernelGGL(( metrpolis_3d), dim3(grid),dim3(block),0,stream[1], s2_D,s1_D,gen_d,boltz_D,L,0); LastError(); } void gpu_3d::set_T(double T_){ T=T_; float boltz_H[14]; //boltzmanfaktor Host #ifndef SPLIT_H for (size_t i = 0; i < 7; i++) { int j=(i-3)*2; boltz_H[i] = ::min(1., exp(-2*(1/T)*(j-h))); boltz_H[7+i] = ::min(1., exp(-2*(1/T)*(j+h))); } #else for (int i = 0; i < 3; ++i) { int j=2+2*i; boltz_H[i] = exp(-2.*j*(1./T)); } boltz_H[3]=exp(-2*h/T); #endif gpuErrchk(hipDeviceSynchronize()); hipMemcpy(boltz_D, boltz_H, 14*sizeof(float), hipMemcpyHostToDevice); // hipBindTexture(0, get_3d_boltz(), boltz_D, 4*sizeof(float)); } void gpu_3d::set_h(double h_){ h=h_; float boltz_H[14]; //boltzmanfaktor Host #ifndef SPLIT_H for (size_t i = 0; i < 7; i++) { int j=(i-3)*2; boltz[i] = ::min(1., exp(-2*(1/T)*(j-h))); boltz[7+i] = ::min(1., exp(-2*(1/T)*(j+h))); } #else for (int i = 0; i < 3; ++i) { int j=2+2*i; boltz_H[i] = exp(-2.*j*(1./T)); } boltz_H[3]=exp(-2*h/T); #endif gpuErrchk(hipDeviceSynchronize()); hipMemcpy(boltz_D, boltz_H, 14*sizeof(float), hipMemcpyHostToDevice); LastError(); } vector<float> gpu_3d::measure(){ int c=(int)ceil(N/(256.*2)); float *M_H=new float[2*64*c]; float *EJ_H=new float[2*64*c]; LastError(); dim3 grid(ceil(N/(2*256.)),1,1); dim3 block(256,1,1); dim3 block_s(8,8,8); dim3 grid_s(ceil(L/8.),ceil(L/8.),ceil(L/8.)); hipLaunchKernelGGL(( checkerbord_switch_3d), dim3(grid_s),dim3(block_s),0,stream[1], s1_D,s2_D,L,L*L); hipStreamSynchronize(stream[1]); hipLaunchKernelGGL(( measure_EJ_M_3d), dim3(grid),dim3(block),0,stream[1], s1_D,&EJ_buf_D[0],&M_buf_D[0],N,L); hipLaunchKernelGGL(( measure_EJ_M_3d), dim3(grid),dim3(block),0,stream[2], s2_D,&EJ_buf_D[64*c],&M_buf_D[64*c],N,L); hipStreamSynchronize(stream[1]); hipStreamSynchronize(stream[2]); hipLaunchKernelGGL(( checkerbord_switch_3d), dim3(grid_s),dim3(block_s),0,stream[1], s1_D,s2_D,L,L*L); hipMemcpyAsync(&M_H[0],&M_buf_D[0],2*c*64*sizeof(float),hipMemcpyDeviceToHost,stream[2]); hipMemcpyAsync(&EJ_H[0],&EJ_buf_D[0],2*c*64*sizeof(float),hipMemcpyDeviceToHost,stream[2]); LastError(); hipStreamSynchronize(stream[2]); vector<float> result; result.assign(2*64, 0); for (int i = 0; i < 2*64; ++i) { result[i]=0; for (int j = 0; j < c;++j) { result[i]+=EJ_H[i*c+j]/*/N*/+h*M_H[i*c+j]/*/N*/; } } delete[] M_H; delete[] EJ_H; return result; } void gpu_3d::set_seed(long long seed_){ LastError(); hipLaunchKernelGGL(( setup_radome), dim3(ceil(N/256.)),dim3(256), 0, 0, gen_d,seed_,N); hiprandSetPseudoRandomGeneratorSeed(gen2, seed_); } void gpu_3d::load_J(string fname){ spin_t *Jx = new spin_t[2*N]; spin_t *Jy = new spin_t[2*N]; spin_t *Jz = new spin_t[2*N]; int error=load_J_3d(Jx,Jy,Jz,N,fname); if(error<0){ LOG(LOG_WARNING)<<"fehler beim laden von J in file \""<<fname<<"\" mit fehler "<<error<<endl; return; } gpuErrchk(hipDeviceSynchronize()); hipMemcpy(J_x_D, Jx, 2*N*sizeof(spin_t), hipMemcpyHostToDevice); hipMemcpy(J_y_D, Jy, 2*N*sizeof(spin_t), hipMemcpyHostToDevice); hipMemcpy(J_z_D, Jz, 2*N*sizeof(spin_t), hipMemcpyHostToDevice); hipBindTexture(0, get_3d_J_xi(), J_x_D,2*N*sizeof(int2)); hipBindTexture(0, get_3d_J_yi(), J_y_D,2*N*sizeof(int2)); hipBindTexture(0, get_3d_J_zi(), J_z_D,2*N*sizeof(int2)); LastError(); delete[] Jx; delete[] Jy; delete[] Jz; } void gpu_3d::save_J(string fname){ spin_t *Jx = new spin_t[2*N]; spin_t *Jy = new spin_t[2*N]; spin_t *Jz = new spin_t[2*N]; gpuErrchk(hipDeviceSynchronize()); hipMemcpy(Jx, J_x_D, 2*N*sizeof(spin_t),hipMemcpyDeviceToHost); hipMemcpy(Jy, J_y_D, 2*N*sizeof(spin_t),hipMemcpyDeviceToHost); hipMemcpy(Jz, J_z_D, 2*N*sizeof(spin_t),hipMemcpyDeviceToHost); int error=save_J_3d(Jx,Jy,Jz,N,fname); if(error<0){ LOG(LOG_WARNING)<<"fehler beim laden von J in file \""<<fname<<"\" mit fehler "<<error<<endl; } delete[] Jx; delete[] Jy; delete[] Jz; } void gpu_3d::init_J(){ LastError(); unsigned int *buffer_D; //Buffer divice gpuErrchk(hipMalloc(&buffer_D, 3*2*N*sizeof(unsigned int))); //ranomly initlize J gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( generate_kernel), dim3(ceil(N/256.)),dim3(256), 0, 0, gen_d,buffer_D,N, 3*2); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( J_order_3d), dim3(ceil(N/256.)),dim3(256), 0, 0, J_x_D, J_y_D, J_z_D, buffer_D, L, N); LastError(); gpuErrchk(hipDeviceSynchronize()); hipFree(buffer_D); LastError(); hipBindTexture(0, get_3d_J_xi(), J_x_D,2*N*sizeof(int2)); LastError(); hipBindTexture(0, get_3d_J_yi(), J_y_D,2*N*sizeof(int2)); LastError(); hipBindTexture(0, get_3d_J_zi(), J_z_D,2*N*sizeof(int2)); LastError(); } void gpu_3d::init_rand(){ hiprandGenerateLongLong(gen2,s1_D,N); hiprandGenerateLongLong(gen2,s2_D,N); gpuErrchk(hipDeviceSynchronize()); LastError(); } void gpu_3d::save_sys(string prefix){ spin_t s[N]; gpuErrchk(hipDeviceSynchronize()); hipMemcpy(s,s1_D,N*sizeof(spin_t),hipMemcpyDeviceToHost); for (int i = 0; i < 64; ++i) { // image setup stringstream convert; convert<<prefix<<i<<".pbm"; ofstream file(convert.str().c_str()); file<<"P1"<<endl; file<<L<<" "<<L*L<<endl; // print image for (int j = 0; j < L*L; ++j) { for (int k = 0; k < L; ++k) { file<<((s[j*L+k]&((spin_t)1<<i))==0?"0 ":"1 "); } file<<endl; } } } long gpu_3d::get_N(){ return N; } void gpu_3d::swap(gpu_sys *sys, std::unique_ptr<spin_t[]> mask) { gpu_3d *sys_3d = dynamic_cast<gpu_3d *>(sys); if (sys_3d != NULL) { swap(sys_3d, move(mask)); } else { LOG(LOG_ERROR) << "conversion error"; } } void gpu_3d::swap(gpu_3d *sys, std::unique_ptr<spin_t[]> mask) { dim3 block_s(8,8,8); dim3 grid_s(ceil(L/8.),ceil(L/8.),ceil(L/8.)); hipLaunchKernelGGL(( checkerbord_switch_3d), dim3(grid_s),dim3(block_s),0,stream[1], s1_D,s2_D,L,L*L); hipLaunchKernelGGL(( checkerbord_switch_3d), dim3(grid_s),dim3(block_s),0,stream[1], sys->s1_D,sys->s2_D,sys->L,sys->L*sys->L); hipLaunchKernelGGL(( swap_3d), dim3(ceil(N/256.)),dim3(256),0,stream[1], s1_D,sys->s1_D,mask[0],N); hipLaunchKernelGGL(( swap_3d), dim3(ceil(N/256.)),dim3(256),0,stream[1], s2_D,sys->s2_D,mask[1],N); hipLaunchKernelGGL(( checkerbord_switch_3d), dim3(grid_s),dim3(block_s),0,stream[1], sys->s1_D,sys->s2_D,sys->L,sys->L*sys->L); hipLaunchKernelGGL(( checkerbord_switch_3d), dim3(grid_s),dim3(block_s),0,stream[1], s1_D,s2_D,L,L*L); } ostream & gpu_3d::save(ostream &stream){ binary_write(stream, L, 1); binary_write(stream, T, 1); binary_write(stream, h, 1); spin_t *s=new spin_t[N]; hipMemcpy(s,s1_D,N*sizeof(spin_t),hipMemcpyDeviceToHost); binary_write(stream, s[0], N); hipMemcpy(s,s2_D,N*sizeof(spin_t),hipMemcpyDeviceToHost); binary_write(stream, s[0], N); delete[] s; spin_t *J=new spin_t[2*N]; hipMemcpy(J,J_x_D,2*N*sizeof(int2),hipMemcpyDeviceToHost); binary_write(stream, J[0], 2 * N);//x hipMemcpy(J,J_y_D,2*N*sizeof(int2),hipMemcpyDeviceToHost); binary_write(stream, J[0], 2 * N);//y hipMemcpy(J,J_z_D,2*N*sizeof(int2),hipMemcpyDeviceToHost); binary_write(stream, J[0], 2 * N);//z delete[] J; // RNG state hiprandStatePhilox4_32_10_t *gen_n_save=new hiprandStatePhilox4_32_10_t[N]; hipMemcpy(gen_n_save,gen_d,N*sizeof(hiprandStatePhilox4_32_10_t),hipMemcpyDeviceToHost); binary_write(stream, gen_n_save[0], N); delete[] gen_n_save; return stream; } istream & gpu_3d::load(istream &stream){ binary_read(stream, L, 1); N = L * L * L; binary_read(stream, T, 1); set_T(T); binary_read(stream, h, 1); set_h(h); hipDeviceSynchronize(); // memory hipFree(gen_d); hipFree(s1_D); hipFree(s2_D); hipFree(J_x_D); hipFree(J_y_D); hipFree(J_z_D); hipFree(M_buf_D); hipFree(EJ_buf_D); hipMalloc(&gen_d,N*sizeof(hiprandStatePhilox4_32_10_t)); hipMalloc(&s1_D, N*sizeof(spin_t)); hipMalloc(&s2_D, N*sizeof(spin_t)); hipMalloc(&J_x_D, 2*N*sizeof(int2)); hipMalloc(&J_y_D, 2*N*sizeof(int2)); hipMalloc(&J_z_D, 2*N*sizeof(int2)); hipMalloc(&M_buf_D, 2*64*ceil(N/256.+1)*sizeof(float)); hipMalloc(&EJ_buf_D, 2*64*ceil(N/256.+1)*sizeof(float)); // spins spin_t *s=new spin_t[N]; binary_read(stream, s[0], N); hipMemcpy(s1_D,s,N*sizeof(spin_t),hipMemcpyHostToDevice); binary_read(stream, s[0], N); hipMemcpy(s2_D,s,N*sizeof(spin_t),hipMemcpyHostToDevice); delete[] s; LastError(); // copling spin_t *J=new spin_t[2*N]; binary_read(stream, J[0], 2 * N);//x hipMemcpy(J_x_D,J,2*N*sizeof(int2),hipMemcpyHostToDevice); binary_read(stream, J[0], 2 * N);//y hipMemcpy(J_y_D,J,2*N*sizeof(int2),hipMemcpyHostToDevice); binary_read(stream, J[0], 2 * N);//y hipMemcpy(J_z_D,J,2*N*sizeof(int2),hipMemcpyHostToDevice); delete[] J; LastError(); hipBindTexture(0, get_3d_J_xi(), J_x_D,2*N*sizeof(int2)); LastError(); hipBindTexture(0, get_3d_J_yi(), J_y_D,2*N*sizeof(int2)); LastError(); hipBindTexture(0, get_3d_J_zi(), J_z_D,2*N*sizeof(int2)); LastError(); // RNG state hiprandStatePhilox4_32_10_t *gen_n_save=new hiprandStatePhilox4_32_10_t[N]; binary_read(stream, gen_n_save[0], N); hipMemcpy(gen_d,gen_n_save,N*sizeof(hiprandStatePhilox4_32_10_t),hipMemcpyHostToDevice); delete[] gen_n_save; LastError(); return stream; }
601835dfa0f57cb5945a4268982b9f774387b4d2.cu
#include "gpu_3d.cuh" #include "device_3d.cuh" #include "setup.cuh" #include "bin_io.hpp" #include "sys_file.hpp" #include "Logging.hpp" #include "random.cuh" #include <curand.h> // #include <cub/cub.cuh> #include <cmath> #include <stdio.h> #include <iostream> #include <sstream> gpu_3d::gpu_3d(int L_, double T_, double h_){ L=L_; N=L*L*L; T=T_; h=h_; //init random gen curandCreateGenerator(&gen2, CURAND_RNG_QUASI_SCRAMBLED_SOBOL64); //allocate memory on divice cudaMalloc(&gen_d,N*sizeof(curandStatePhilox4_32_10_t)); gpuErrchk(cudaMalloc(&s1_D, N*sizeof(spin_t))); gpuErrchk(cudaMalloc(&s2_D, N*sizeof(spin_t))); gpuErrchk(cudaMalloc(&J_x_D, 2*N*sizeof(int2))); gpuErrchk(cudaMalloc(&J_y_D, 2*N*sizeof(int2))); gpuErrchk(cudaMalloc(&J_z_D, 2*N*sizeof(int2))); gpuErrchk(cudaMalloc(&boltz_D, 14*sizeof(float))); gpuErrchk(cudaMalloc(&M_buf_D, 2*64*ceil(N/256.+1)*sizeof(float))); gpuErrchk(cudaMalloc(&EJ_buf_D, 2*64*ceil(N/256.+1)*sizeof(float))); // cudaFree(buffer_D); setup_radome<<<ceil(N/256.),256>>>(gen_d,1234ULL,N); curandGenerateLongLong(gen2,s1_D,N); curandGenerateLongLong(gen2,s2_D,N); //set boltzmanfactor float boltz_H[14]; //boltzmanfaktor Host #ifndef SPLIT_H for (size_t i = 0; i < 7; i++) { int j=(i-3)*2; boltz_H[i] = std::min(1., exp(-2*(1/T)*(j-h))); boltz_H[7+i] = std::min(1., exp(-2*(1/T)*(j+h))); } #else for (int i = 0; i < 3; ++i) { int j=2+2*i; boltz_H[i] = exp(-2.*j*(1./T)); } boltz_H[3]=exp(-2*h/T); #endif //loade and binde texure memory cudaMemcpy(boltz_D, boltz_H, 14*sizeof(float), cudaMemcpyHostToDevice); //setup strems for (int i = 0; i < 4; ++i) { cudaStreamCreate(&stream[i]); } } void gpu_3d::gpu_free(){ for (int i = 0; i < 4; ++i) { cudaStreamDestroy(stream[i]); } cudaFree(boltz_D); cudaFree(s1_D); cudaFree(s2_D); cudaFree(J_x_D); cudaFree(J_y_D); cudaFree(J_z_D); cudaFree(M_buf_D); cudaFree(EJ_buf_D); curandDestroyGenerator(gen2); } gpu_3d::~gpu_3d(){ } void gpu_3d::sweep(){ LastError(); dim3 block(8,8,8); dim3 grid(ceil(L/8.),ceil(L/8.),ceil(L/8.)); metrpolis_3d<<<grid,block,0,stream[1]>>>(s1_D,s2_D,gen_d,boltz_D,L,0); metrpolis_3d<<<grid,block,0,stream[1]>>>(s2_D,s1_D,gen_d,boltz_D,L,0); LastError(); } void gpu_3d::set_T(double T_){ T=T_; float boltz_H[14]; //boltzmanfaktor Host #ifndef SPLIT_H for (size_t i = 0; i < 7; i++) { int j=(i-3)*2; boltz_H[i] = std::min(1., exp(-2*(1/T)*(j-h))); boltz_H[7+i] = std::min(1., exp(-2*(1/T)*(j+h))); } #else for (int i = 0; i < 3; ++i) { int j=2+2*i; boltz_H[i] = exp(-2.*j*(1./T)); } boltz_H[3]=exp(-2*h/T); #endif gpuErrchk(cudaDeviceSynchronize()); cudaMemcpy(boltz_D, boltz_H, 14*sizeof(float), cudaMemcpyHostToDevice); // cudaBindTexture(0, get_3d_boltz(), boltz_D, 4*sizeof(float)); } void gpu_3d::set_h(double h_){ h=h_; float boltz_H[14]; //boltzmanfaktor Host #ifndef SPLIT_H for (size_t i = 0; i < 7; i++) { int j=(i-3)*2; boltz[i] = std::min(1., exp(-2*(1/T)*(j-h))); boltz[7+i] = std::min(1., exp(-2*(1/T)*(j+h))); } #else for (int i = 0; i < 3; ++i) { int j=2+2*i; boltz_H[i] = exp(-2.*j*(1./T)); } boltz_H[3]=exp(-2*h/T); #endif gpuErrchk(cudaDeviceSynchronize()); cudaMemcpy(boltz_D, boltz_H, 14*sizeof(float), cudaMemcpyHostToDevice); LastError(); } vector<float> gpu_3d::measure(){ int c=(int)ceil(N/(256.*2)); float *M_H=new float[2*64*c]; float *EJ_H=new float[2*64*c]; LastError(); dim3 grid(ceil(N/(2*256.)),1,1); dim3 block(256,1,1); dim3 block_s(8,8,8); dim3 grid_s(ceil(L/8.),ceil(L/8.),ceil(L/8.)); checkerbord_switch_3d<<<grid_s,block_s,0,stream[1]>>>(s1_D,s2_D,L,L*L); cudaStreamSynchronize(stream[1]); measure_EJ_M_3d<<<grid,block,0,stream[1]>>>(s1_D,&EJ_buf_D[0],&M_buf_D[0],N,L); measure_EJ_M_3d<<<grid,block,0,stream[2]>>>(s2_D,&EJ_buf_D[64*c],&M_buf_D[64*c],N,L); cudaStreamSynchronize(stream[1]); cudaStreamSynchronize(stream[2]); checkerbord_switch_3d<<<grid_s,block_s,0,stream[1]>>>(s1_D,s2_D,L,L*L); cudaMemcpyAsync(&M_H[0],&M_buf_D[0],2*c*64*sizeof(float),cudaMemcpyDeviceToHost,stream[2]); cudaMemcpyAsync(&EJ_H[0],&EJ_buf_D[0],2*c*64*sizeof(float),cudaMemcpyDeviceToHost,stream[2]); LastError(); cudaStreamSynchronize(stream[2]); vector<float> result; result.assign(2*64, 0); for (int i = 0; i < 2*64; ++i) { result[i]=0; for (int j = 0; j < c;++j) { result[i]+=EJ_H[i*c+j]/*/N*/+h*M_H[i*c+j]/*/N*/; } } delete[] M_H; delete[] EJ_H; return result; } void gpu_3d::set_seed(long long seed_){ LastError(); setup_radome<<<ceil(N/256.),256>>>(gen_d,seed_,N); curandSetPseudoRandomGeneratorSeed(gen2, seed_); } void gpu_3d::load_J(string fname){ spin_t *Jx = new spin_t[2*N]; spin_t *Jy = new spin_t[2*N]; spin_t *Jz = new spin_t[2*N]; int error=load_J_3d(Jx,Jy,Jz,N,fname); if(error<0){ LOG(LOG_WARNING)<<"fehler beim laden von J in file \""<<fname<<"\" mit fehler "<<error<<endl; return; } gpuErrchk(cudaDeviceSynchronize()); cudaMemcpy(J_x_D, Jx, 2*N*sizeof(spin_t), cudaMemcpyHostToDevice); cudaMemcpy(J_y_D, Jy, 2*N*sizeof(spin_t), cudaMemcpyHostToDevice); cudaMemcpy(J_z_D, Jz, 2*N*sizeof(spin_t), cudaMemcpyHostToDevice); cudaBindTexture(0, get_3d_J_xi(), J_x_D,2*N*sizeof(int2)); cudaBindTexture(0, get_3d_J_yi(), J_y_D,2*N*sizeof(int2)); cudaBindTexture(0, get_3d_J_zi(), J_z_D,2*N*sizeof(int2)); LastError(); delete[] Jx; delete[] Jy; delete[] Jz; } void gpu_3d::save_J(string fname){ spin_t *Jx = new spin_t[2*N]; spin_t *Jy = new spin_t[2*N]; spin_t *Jz = new spin_t[2*N]; gpuErrchk(cudaDeviceSynchronize()); cudaMemcpy(Jx, J_x_D, 2*N*sizeof(spin_t),cudaMemcpyDeviceToHost); cudaMemcpy(Jy, J_y_D, 2*N*sizeof(spin_t),cudaMemcpyDeviceToHost); cudaMemcpy(Jz, J_z_D, 2*N*sizeof(spin_t),cudaMemcpyDeviceToHost); int error=save_J_3d(Jx,Jy,Jz,N,fname); if(error<0){ LOG(LOG_WARNING)<<"fehler beim laden von J in file \""<<fname<<"\" mit fehler "<<error<<endl; } delete[] Jx; delete[] Jy; delete[] Jz; } void gpu_3d::init_J(){ LastError(); unsigned int *buffer_D; //Buffer divice gpuErrchk(cudaMalloc(&buffer_D, 3*2*N*sizeof(unsigned int))); //ranomly initlize J gpuErrchk(cudaDeviceSynchronize()); generate_kernel<<<ceil(N/256.),256>>>(gen_d,buffer_D,N, 3*2); gpuErrchk(cudaDeviceSynchronize()); J_order_3d<<<ceil(N/256.),256>>>(J_x_D, J_y_D, J_z_D, buffer_D, L, N); LastError(); gpuErrchk(cudaDeviceSynchronize()); cudaFree(buffer_D); LastError(); cudaBindTexture(0, get_3d_J_xi(), J_x_D,2*N*sizeof(int2)); LastError(); cudaBindTexture(0, get_3d_J_yi(), J_y_D,2*N*sizeof(int2)); LastError(); cudaBindTexture(0, get_3d_J_zi(), J_z_D,2*N*sizeof(int2)); LastError(); } void gpu_3d::init_rand(){ curandGenerateLongLong(gen2,s1_D,N); curandGenerateLongLong(gen2,s2_D,N); gpuErrchk(cudaDeviceSynchronize()); LastError(); } void gpu_3d::save_sys(string prefix){ spin_t s[N]; gpuErrchk(cudaDeviceSynchronize()); cudaMemcpy(s,s1_D,N*sizeof(spin_t),cudaMemcpyDeviceToHost); for (int i = 0; i < 64; ++i) { // image setup stringstream convert; convert<<prefix<<i<<".pbm"; ofstream file(convert.str().c_str()); file<<"P1"<<endl; file<<L<<" "<<L*L<<endl; // print image for (int j = 0; j < L*L; ++j) { for (int k = 0; k < L; ++k) { file<<((s[j*L+k]&((spin_t)1<<i))==0?"0 ":"1 "); } file<<endl; } } } long gpu_3d::get_N(){ return N; } void gpu_3d::swap(gpu_sys *sys, std::unique_ptr<spin_t[]> mask) { gpu_3d *sys_3d = dynamic_cast<gpu_3d *>(sys); if (sys_3d != NULL) { swap(sys_3d, move(mask)); } else { LOG(LOG_ERROR) << "conversion error"; } } void gpu_3d::swap(gpu_3d *sys, std::unique_ptr<spin_t[]> mask) { dim3 block_s(8,8,8); dim3 grid_s(ceil(L/8.),ceil(L/8.),ceil(L/8.)); checkerbord_switch_3d<<<grid_s,block_s,0,stream[1]>>>(s1_D,s2_D,L,L*L); checkerbord_switch_3d<<<grid_s,block_s,0,stream[1]>>>(sys->s1_D,sys->s2_D,sys->L,sys->L*sys->L); swap_3d<<<ceil(N/256.),256,0,stream[1]>>>(s1_D,sys->s1_D,mask[0],N); swap_3d<<<ceil(N/256.),256,0,stream[1]>>>(s2_D,sys->s2_D,mask[1],N); checkerbord_switch_3d<<<grid_s,block_s,0,stream[1]>>>(sys->s1_D,sys->s2_D,sys->L,sys->L*sys->L); checkerbord_switch_3d<<<grid_s,block_s,0,stream[1]>>>(s1_D,s2_D,L,L*L); } ostream & gpu_3d::save(ostream &stream){ binary_write(stream, L, 1); binary_write(stream, T, 1); binary_write(stream, h, 1); spin_t *s=new spin_t[N]; cudaMemcpy(s,s1_D,N*sizeof(spin_t),cudaMemcpyDeviceToHost); binary_write(stream, s[0], N); cudaMemcpy(s,s2_D,N*sizeof(spin_t),cudaMemcpyDeviceToHost); binary_write(stream, s[0], N); delete[] s; spin_t *J=new spin_t[2*N]; cudaMemcpy(J,J_x_D,2*N*sizeof(int2),cudaMemcpyDeviceToHost); binary_write(stream, J[0], 2 * N);//x cudaMemcpy(J,J_y_D,2*N*sizeof(int2),cudaMemcpyDeviceToHost); binary_write(stream, J[0], 2 * N);//y cudaMemcpy(J,J_z_D,2*N*sizeof(int2),cudaMemcpyDeviceToHost); binary_write(stream, J[0], 2 * N);//z delete[] J; // RNG state curandStatePhilox4_32_10_t *gen_n_save=new curandStatePhilox4_32_10_t[N]; cudaMemcpy(gen_n_save,gen_d,N*sizeof(curandStatePhilox4_32_10_t),cudaMemcpyDeviceToHost); binary_write(stream, gen_n_save[0], N); delete[] gen_n_save; return stream; } istream & gpu_3d::load(istream &stream){ binary_read(stream, L, 1); N = L * L * L; binary_read(stream, T, 1); set_T(T); binary_read(stream, h, 1); set_h(h); cudaDeviceSynchronize(); // memory cudaFree(gen_d); cudaFree(s1_D); cudaFree(s2_D); cudaFree(J_x_D); cudaFree(J_y_D); cudaFree(J_z_D); cudaFree(M_buf_D); cudaFree(EJ_buf_D); cudaMalloc(&gen_d,N*sizeof(curandStatePhilox4_32_10_t)); cudaMalloc(&s1_D, N*sizeof(spin_t)); cudaMalloc(&s2_D, N*sizeof(spin_t)); cudaMalloc(&J_x_D, 2*N*sizeof(int2)); cudaMalloc(&J_y_D, 2*N*sizeof(int2)); cudaMalloc(&J_z_D, 2*N*sizeof(int2)); cudaMalloc(&M_buf_D, 2*64*ceil(N/256.+1)*sizeof(float)); cudaMalloc(&EJ_buf_D, 2*64*ceil(N/256.+1)*sizeof(float)); // spins spin_t *s=new spin_t[N]; binary_read(stream, s[0], N); cudaMemcpy(s1_D,s,N*sizeof(spin_t),cudaMemcpyHostToDevice); binary_read(stream, s[0], N); cudaMemcpy(s2_D,s,N*sizeof(spin_t),cudaMemcpyHostToDevice); delete[] s; LastError(); // copling spin_t *J=new spin_t[2*N]; binary_read(stream, J[0], 2 * N);//x cudaMemcpy(J_x_D,J,2*N*sizeof(int2),cudaMemcpyHostToDevice); binary_read(stream, J[0], 2 * N);//y cudaMemcpy(J_y_D,J,2*N*sizeof(int2),cudaMemcpyHostToDevice); binary_read(stream, J[0], 2 * N);//y cudaMemcpy(J_z_D,J,2*N*sizeof(int2),cudaMemcpyHostToDevice); delete[] J; LastError(); cudaBindTexture(0, get_3d_J_xi(), J_x_D,2*N*sizeof(int2)); LastError(); cudaBindTexture(0, get_3d_J_yi(), J_y_D,2*N*sizeof(int2)); LastError(); cudaBindTexture(0, get_3d_J_zi(), J_z_D,2*N*sizeof(int2)); LastError(); // RNG state curandStatePhilox4_32_10_t *gen_n_save=new curandStatePhilox4_32_10_t[N]; binary_read(stream, gen_n_save[0], N); cudaMemcpy(gen_d,gen_n_save,N*sizeof(curandStatePhilox4_32_10_t),cudaMemcpyHostToDevice); delete[] gen_n_save; LastError(); return stream; }
8267114306f5af3ae787a99ee1c7fb675cb44d8a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" #include "softmax_layer.h" #include "hip/hip_runtime.h" #include "blas.h" __global__ void forward_softmax_layer_kernel( int n, int batch, float *input, float temp, float *output ) { int b = ( blockIdx.x + blockIdx.y*gridDim.x ) * blockDim.x + threadIdx.x; if( b >= batch ) return; int i; float sum = 0; float largest = -INFINITY; for( i = 0; i < n; ++i ) { int val = input[ i + b*n ]; largest = ( val>largest ) ? val : largest; } for( i = 0; i < n; ++i ) { sum += exp( input[ i + b*n ] / temp - largest / temp ); } sum = ( sum != 0 ) ? largest / temp + log( sum ) : largest - 100; for( i = 0; i < n; ++i ) { output[ i + b*n ] = exp( input[ i + b*n ] / temp - sum ); } } void pull_softmax_layer_output( const softmax_layer layer ) { cuda_pull_array( layer.output_gpu, layer.output, layer.inputs*layer.batch ); } /* void forward_softmax_layer_gpu( const softmax_layer layer, network_state state ) { int inputs = layer.inputs / layer.groups; int batch = layer.batch * layer.groups; forward_softmax_layer_kernel << <cuda_gridsize( batch ), BLOCK >> >( inputs, batch, state.input, layer.temperature, layer.output_gpu ); check_error( hipPeekAtLastError() ); } void backward_softmax_layer_gpu( const softmax_layer layer, network_state state ) { axpy_ongpu( layer.batch*layer.inputs, 1, layer.delta_gpu, 1, state.delta, 1 ); } */ /* This is if you want softmax w/o log-loss classification. You probably don't. int i,j,b; for(b = 0; b < layer.batch; ++b){ for(i = 0; i < layer.inputs; ++i){ for(j = 0; j < layer.inputs; ++j){ int d = (i==j); layer.jacobian[b*layer.inputs*layer.inputs + i*layer.inputs + j] = layer.output[b*layer.inputs + i] * (d - layer.output[b*layer.inputs + j]); } } } for(b = 0; b < layer.batch; ++b){ int M = layer.inputs; int N = 1; int K = layer.inputs; float *A = layer.jacobian + b*layer.inputs*layer.inputs; float *B = layer.delta + b*layer.inputs; float *C = delta + b*layer.inputs; gemm(0,0,M,N,K,1,A,K,B,N,0,C,N); } */
8267114306f5af3ae787a99ee1c7fb675cb44d8a.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" #include "softmax_layer.h" #include "cuda.h" #include "blas.h" __global__ void forward_softmax_layer_kernel( int n, int batch, float *input, float temp, float *output ) { int b = ( blockIdx.x + blockIdx.y*gridDim.x ) * blockDim.x + threadIdx.x; if( b >= batch ) return; int i; float sum = 0; float largest = -INFINITY; for( i = 0; i < n; ++i ) { int val = input[ i + b*n ]; largest = ( val>largest ) ? val : largest; } for( i = 0; i < n; ++i ) { sum += exp( input[ i + b*n ] / temp - largest / temp ); } sum = ( sum != 0 ) ? largest / temp + log( sum ) : largest - 100; for( i = 0; i < n; ++i ) { output[ i + b*n ] = exp( input[ i + b*n ] / temp - sum ); } } void pull_softmax_layer_output( const softmax_layer layer ) { cuda_pull_array( layer.output_gpu, layer.output, layer.inputs*layer.batch ); } /* void forward_softmax_layer_gpu( const softmax_layer layer, network_state state ) { int inputs = layer.inputs / layer.groups; int batch = layer.batch * layer.groups; forward_softmax_layer_kernel << <cuda_gridsize( batch ), BLOCK >> >( inputs, batch, state.input, layer.temperature, layer.output_gpu ); check_error( cudaPeekAtLastError() ); } void backward_softmax_layer_gpu( const softmax_layer layer, network_state state ) { axpy_ongpu( layer.batch*layer.inputs, 1, layer.delta_gpu, 1, state.delta, 1 ); } */ /* This is if you want softmax w/o log-loss classification. You probably don't. int i,j,b; for(b = 0; b < layer.batch; ++b){ for(i = 0; i < layer.inputs; ++i){ for(j = 0; j < layer.inputs; ++j){ int d = (i==j); layer.jacobian[b*layer.inputs*layer.inputs + i*layer.inputs + j] = layer.output[b*layer.inputs + i] * (d - layer.output[b*layer.inputs + j]); } } } for(b = 0; b < layer.batch; ++b){ int M = layer.inputs; int N = 1; int K = layer.inputs; float *A = layer.jacobian + b*layer.inputs*layer.inputs; float *B = layer.delta + b*layer.inputs; float *C = delta + b*layer.inputs; gemm(0,0,M,N,K,1,A,K,B,N,0,C,N); } */
bd59959ce580d5edc2227d264751c09916684379.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <cstdio> #include <string> #include <iostream> #include <color_spinor_field.h> #include <clover_field.h> // these control the Wilson-type actions #ifdef GPU_WILSON_DIRAC //#define DIRECT_ACCESS_LINK //#define DIRECT_ACCESS_WILSON_SPINOR //#define DIRECT_ACCESS_WILSON_ACCUM //#define DIRECT_ACCESS_WILSON_INTER //#define DIRECT_ACCESS_WILSON_PACK_SPINOR //#define DIRECT_ACCESS_CLOVER #endif // GPU_WILSON_DIRAC #include <quda_internal.h> #include <dslash_quda.h> #include <sys/time.h> #include <blas_quda.h> #include <face_quda.h> #include <inline_ptx.h> namespace quda { namespace mobius { #undef GPU_STAGGERED_DIRAC #include <dslash_constants.h> #include <dslash_textures.h> #include <dslash_index.cuh> // Enable shared memory dslash for Fermi architecture //#define SHARED_WILSON_DSLASH //#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access #ifdef GPU_DOMAIN_WALL_DIRAC #include <mdw_dslash4_def.h> // Dslash4, intermediate operator for Mobius Mat_4 kernels #include <mdw_dslash4pre_def.h> // Dslash4pre, intermediate operator for Mobius Mat_4 kernels #include <mdw_dslash5_def.h> // Dslash5 Mobius Domain Wall kernels #include <mdw_dslash5inv_def.h> // Dslash5inv Mobius Domain Wall kernels #endif #ifndef DSLASH_SHARED_FLOATS_PER_THREAD #define DSLASH_SHARED_FLOATS_PER_THREAD 0 #endif #include <dslash_quda.cuh> } // declare the dslash events #include <dslash_events.cuh> using namespace mobius; #ifdef GPU_DOMAIN_WALL_DIRAC //Dslash class definition for Mobius Domain Wall Fermion template <typename sFloat, typename gFloat> class MDWFDslashPCCuda : public DslashCuda { private: const gFloat *gauge0, *gauge1; const double mferm, a; double *b5, *c5; const int DS_type; bool checkGrid(TuneParam &param) const { if (param.grid.x > deviceProp.maxGridSize[0] || param.grid.y > deviceProp.maxGridSize[1]) { warningQuda("Autotuner is skipping blockDim=(%u,%u,%u), gridDim=(%u,%u,%u) because lattice volume is too large", param.block.x, param.block.y, param.block.z, param.grid.x, param.grid.y, param.grid.z); return false; } else { return true; } } protected: bool advanceBlockDim(TuneParam &param) const { const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock; const int step[2] = { deviceProp.warpSize, 1 }; bool advance[2] = { false, false }; // first try to advance block.x param.block.x += step[0]; if (param.block.x > deviceProp.maxThreadsDim[0] || sharedBytesPerThread()*param.block.x*param.block.y > max_shared) { advance[0] = false; param.block.x = step[0]; // reset block.x } else { advance[0] = true; // successfully advanced block.x } if (!advance[0]) { // if failed to advance block.x, now try block.y param.block.y += step[1]; if (param.block.y > in->X(4) || sharedBytesPerThread()*param.block.x*param.block.y > max_shared) { advance[1] = false; param.block.y = step[1]; // reset block.x } else { advance[1] = true; // successfully advanced block.y } } if (advance[0] || advance[1]) { param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x, (in->X(4)+param.block.y-1) / param.block.y, 1); bool advance = true; if (!checkGrid(param)) advance = advanceBlockDim(param); return advance; } else { return false; } } unsigned int sharedBytesPerThread() const { return 0; } public: MDWFDslashPCCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1, const QudaReconstructType reconstruct, const cudaColorSpinorField *in, const cudaColorSpinorField *x, const double mferm, const double a, const int dagger, const int DS_type) : DslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1), mferm(mferm), a(a), DS_type(DS_type) { bindSpinorTex<sFloat>(in, out, x); } virtual ~MDWFDslashPCCuda() { unbindSpinorTex<sFloat>(in, out, x); } TuneKey tuneKey() const { TuneKey key = DslashCuda::tuneKey(); switch(DS_type){ case 0: strcat(key.aux,",Dslash4"); break; case 1: strcat(key.aux,",Dslash4pre"); break; case 2: strcat(key.aux,",Dslash5"); break; case 3: strcat(key.aux,",Dslash5inv"); break; } return key; } virtual void initTuneParam(TuneParam &param) const { Tunable::initTuneParam(param); param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x, (in->X(4)+param.block.y-1) / param.block.y, 1); bool ok = true; if (!checkGrid(param)) ok = advanceBlockDim(param); if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim"); } /** sets default values for when tuning is disabled */ virtual void defaultTuneParam(TuneParam &param) const { Tunable::defaultTuneParam(param); param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x, (in->X(4)+param.block.y-1) / param.block.y, 1); bool ok = true; if (!checkGrid(param)) ok = advanceBlockDim(param); if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim"); } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); switch(DS_type){ case 0: DSLASH(MDWFDslash4, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(), (float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a); break; case 1: DSLASH(MDWFDslash4pre, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(), (float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a); break; case 2: DSLASH(MDWFDslash5, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(), (float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a); break; case 3: DSLASH(MDWFDslash5inv, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(), (float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a); break; default: errorQuda("invalid Dslash type"); } } long long flops() const { // FIXME for multi-GPU long long Ls = in->X(4); long long vol4d = in->VolumeCB() / Ls; long long bulk = (Ls-2)*vol4d; long long wall = 2*vol4d; long long flops_Tmp; switch(DS_type){ case 0: flops_Tmp = (x ? 1368ll : 1320ll)*in->VolumeCB(); break; case 1: flops_Tmp = 72ll*in->VolumeCB() + 96ll*bulk + 120ll*wall; break; case 2: flops_Tmp = (x ? 96ll : 48ll)*in->VolumeCB() + 96ll*bulk + 120ll*wall; break; case 3: flops_Tmp = 144ll*in->VolumeCB()*Ls + 3ll*Ls*(Ls-1ll); break; default: errorQuda("invalid Dslash type"); } return flops_Tmp; } }; #endif // GPU_DOMAIN_WALL_DIRAC #include <dslash_policy.cuh> //----------------------------------------------------- // Modification for 4D preconditioned Mobius DWF operator // Additional Arg. is added to give a function name. // // pre-defined DS_type list // 0 = MDWF dslash4 // 1 = MDWF dslash4pre // 2 = MDWF dslash5 // 3 = MDWF dslash5inv //----------------------------------------------------- void MDWFDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const cudaColorSpinorField *in, const int parity, const int dagger, const cudaColorSpinorField *x, const double &m_f, const double &k2, const int *commOverride, const int DS_type, TimeProfile &profile, const QudaDslashPolicy &dslashPolicy) { inSpinor = (cudaColorSpinorField*)in; // EVIL dslashParam.parity = parity; #ifdef GPU_DOMAIN_WALL_DIRAC //currently splitting in space-time is impelemented: int dirs = 4; int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code for(int i = 0;i < dirs; i++){ dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride()); dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride(); dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0 } void *gauge0, *gauge1; bindGaugeTex(gauge, parity, &gauge0, &gauge1); if (in->Precision() != gauge.Precision()) errorQuda("Mixing gauge and spinor precision not supported"); DslashCuda *dslash = 0; size_t regSize = sizeof(float); if (in->Precision() == QUDA_DOUBLE_PRECISION) { #if (__COMPUTE_CAPABILITY__ >= 130) dslash = new MDWFDslashPCCuda<double2,double2>(out, (double2*)gauge0, (double2*)gauge1, gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type); regSize = sizeof(double); #else errorQuda("Double precision not supported on this GPU"); #endif } else if (in->Precision() == QUDA_SINGLE_PRECISION) { dslash = new MDWFDslashPCCuda<float4,float4>(out, (float4*)gauge0, (float4*)gauge1, gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type); } else if (in->Precision() == QUDA_HALF_PRECISION) { dslash = new MDWFDslashPCCuda<short4,short4>(out, (short4*)gauge0, (short4*)gauge1, gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type); } // the parameters passed to dslashCuda must be 4-d volume and 3-d // faces because Ls is added as the y-dimension in thread space int ghostFace[QUDA_MAX_DIM]; for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4); DslashPolicyImp* dslashImp = NULL; if (DS_type != 0) { dslashImp = DslashFactory::create(QUDA_DSLASH_NC); } else { #ifndef GPU_COMMS dslashImp = DslashFactory::create(dslashPolicy); #else dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH); #endif } (*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, in->Volume()/in->X(4), ghostFace, profile); delete dslashImp; delete dslash; unbindGaugeTex(gauge); checkCudaError(); #else errorQuda("Domain wall dslash has not been built"); #endif } }
bd59959ce580d5edc2227d264751c09916684379.cu
#include <cstdlib> #include <cstdio> #include <string> #include <iostream> #include <color_spinor_field.h> #include <clover_field.h> // these control the Wilson-type actions #ifdef GPU_WILSON_DIRAC //#define DIRECT_ACCESS_LINK //#define DIRECT_ACCESS_WILSON_SPINOR //#define DIRECT_ACCESS_WILSON_ACCUM //#define DIRECT_ACCESS_WILSON_INTER //#define DIRECT_ACCESS_WILSON_PACK_SPINOR //#define DIRECT_ACCESS_CLOVER #endif // GPU_WILSON_DIRAC #include <quda_internal.h> #include <dslash_quda.h> #include <sys/time.h> #include <blas_quda.h> #include <face_quda.h> #include <inline_ptx.h> namespace quda { namespace mobius { #undef GPU_STAGGERED_DIRAC #include <dslash_constants.h> #include <dslash_textures.h> #include <dslash_index.cuh> // Enable shared memory dslash for Fermi architecture //#define SHARED_WILSON_DSLASH //#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access #ifdef GPU_DOMAIN_WALL_DIRAC #include <mdw_dslash4_def.h> // Dslash4, intermediate operator for Mobius Mat_4 kernels #include <mdw_dslash4pre_def.h> // Dslash4pre, intermediate operator for Mobius Mat_4 kernels #include <mdw_dslash5_def.h> // Dslash5 Mobius Domain Wall kernels #include <mdw_dslash5inv_def.h> // Dslash5inv Mobius Domain Wall kernels #endif #ifndef DSLASH_SHARED_FLOATS_PER_THREAD #define DSLASH_SHARED_FLOATS_PER_THREAD 0 #endif #include <dslash_quda.cuh> } // declare the dslash events #include <dslash_events.cuh> using namespace mobius; #ifdef GPU_DOMAIN_WALL_DIRAC //Dslash class definition for Mobius Domain Wall Fermion template <typename sFloat, typename gFloat> class MDWFDslashPCCuda : public DslashCuda { private: const gFloat *gauge0, *gauge1; const double mferm, a; double *b5, *c5; const int DS_type; bool checkGrid(TuneParam &param) const { if (param.grid.x > deviceProp.maxGridSize[0] || param.grid.y > deviceProp.maxGridSize[1]) { warningQuda("Autotuner is skipping blockDim=(%u,%u,%u), gridDim=(%u,%u,%u) because lattice volume is too large", param.block.x, param.block.y, param.block.z, param.grid.x, param.grid.y, param.grid.z); return false; } else { return true; } } protected: bool advanceBlockDim(TuneParam &param) const { const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock; const int step[2] = { deviceProp.warpSize, 1 }; bool advance[2] = { false, false }; // first try to advance block.x param.block.x += step[0]; if (param.block.x > deviceProp.maxThreadsDim[0] || sharedBytesPerThread()*param.block.x*param.block.y > max_shared) { advance[0] = false; param.block.x = step[0]; // reset block.x } else { advance[0] = true; // successfully advanced block.x } if (!advance[0]) { // if failed to advance block.x, now try block.y param.block.y += step[1]; if (param.block.y > in->X(4) || sharedBytesPerThread()*param.block.x*param.block.y > max_shared) { advance[1] = false; param.block.y = step[1]; // reset block.x } else { advance[1] = true; // successfully advanced block.y } } if (advance[0] || advance[1]) { param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x, (in->X(4)+param.block.y-1) / param.block.y, 1); bool advance = true; if (!checkGrid(param)) advance = advanceBlockDim(param); return advance; } else { return false; } } unsigned int sharedBytesPerThread() const { return 0; } public: MDWFDslashPCCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1, const QudaReconstructType reconstruct, const cudaColorSpinorField *in, const cudaColorSpinorField *x, const double mferm, const double a, const int dagger, const int DS_type) : DslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1), mferm(mferm), a(a), DS_type(DS_type) { bindSpinorTex<sFloat>(in, out, x); } virtual ~MDWFDslashPCCuda() { unbindSpinorTex<sFloat>(in, out, x); } TuneKey tuneKey() const { TuneKey key = DslashCuda::tuneKey(); switch(DS_type){ case 0: strcat(key.aux,",Dslash4"); break; case 1: strcat(key.aux,",Dslash4pre"); break; case 2: strcat(key.aux,",Dslash5"); break; case 3: strcat(key.aux,",Dslash5inv"); break; } return key; } virtual void initTuneParam(TuneParam &param) const { Tunable::initTuneParam(param); param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x, (in->X(4)+param.block.y-1) / param.block.y, 1); bool ok = true; if (!checkGrid(param)) ok = advanceBlockDim(param); if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim"); } /** sets default values for when tuning is disabled */ virtual void defaultTuneParam(TuneParam &param) const { Tunable::defaultTuneParam(param); param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x, (in->X(4)+param.block.y-1) / param.block.y, 1); bool ok = true; if (!checkGrid(param)) ok = advanceBlockDim(param); if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim"); } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); switch(DS_type){ case 0: DSLASH(MDWFDslash4, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(), (float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a); break; case 1: DSLASH(MDWFDslash4pre, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(), (float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a); break; case 2: DSLASH(MDWFDslash5, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(), (float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a); break; case 3: DSLASH(MDWFDslash5inv, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam, (sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1, (sFloat*)in->V(), (float*)in->Norm(), mferm, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0), a); break; default: errorQuda("invalid Dslash type"); } } long long flops() const { // FIXME for multi-GPU long long Ls = in->X(4); long long vol4d = in->VolumeCB() / Ls; long long bulk = (Ls-2)*vol4d; long long wall = 2*vol4d; long long flops_Tmp; switch(DS_type){ case 0: flops_Tmp = (x ? 1368ll : 1320ll)*in->VolumeCB(); break; case 1: flops_Tmp = 72ll*in->VolumeCB() + 96ll*bulk + 120ll*wall; break; case 2: flops_Tmp = (x ? 96ll : 48ll)*in->VolumeCB() + 96ll*bulk + 120ll*wall; break; case 3: flops_Tmp = 144ll*in->VolumeCB()*Ls + 3ll*Ls*(Ls-1ll); break; default: errorQuda("invalid Dslash type"); } return flops_Tmp; } }; #endif // GPU_DOMAIN_WALL_DIRAC #include <dslash_policy.cuh> //----------------------------------------------------- // Modification for 4D preconditioned Mobius DWF operator // Additional Arg. is added to give a function name. // // pre-defined DS_type list // 0 = MDWF dslash4 // 1 = MDWF dslash4pre // 2 = MDWF dslash5 // 3 = MDWF dslash5inv //----------------------------------------------------- void MDWFDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const cudaColorSpinorField *in, const int parity, const int dagger, const cudaColorSpinorField *x, const double &m_f, const double &k2, const int *commOverride, const int DS_type, TimeProfile &profile, const QudaDslashPolicy &dslashPolicy) { inSpinor = (cudaColorSpinorField*)in; // EVIL dslashParam.parity = parity; #ifdef GPU_DOMAIN_WALL_DIRAC //currently splitting in space-time is impelemented: int dirs = 4; int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code for(int i = 0;i < dirs; i++){ dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride()); dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride(); dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0 } void *gauge0, *gauge1; bindGaugeTex(gauge, parity, &gauge0, &gauge1); if (in->Precision() != gauge.Precision()) errorQuda("Mixing gauge and spinor precision not supported"); DslashCuda *dslash = 0; size_t regSize = sizeof(float); if (in->Precision() == QUDA_DOUBLE_PRECISION) { #if (__COMPUTE_CAPABILITY__ >= 130) dslash = new MDWFDslashPCCuda<double2,double2>(out, (double2*)gauge0, (double2*)gauge1, gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type); regSize = sizeof(double); #else errorQuda("Double precision not supported on this GPU"); #endif } else if (in->Precision() == QUDA_SINGLE_PRECISION) { dslash = new MDWFDslashPCCuda<float4,float4>(out, (float4*)gauge0, (float4*)gauge1, gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type); } else if (in->Precision() == QUDA_HALF_PRECISION) { dslash = new MDWFDslashPCCuda<short4,short4>(out, (short4*)gauge0, (short4*)gauge1, gauge.Reconstruct(), in, x, m_f, k2, dagger, DS_type); } // the parameters passed to dslashCuda must be 4-d volume and 3-d // faces because Ls is added as the y-dimension in thread space int ghostFace[QUDA_MAX_DIM]; for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4); DslashPolicyImp* dslashImp = NULL; if (DS_type != 0) { dslashImp = DslashFactory::create(QUDA_DSLASH_NC); } else { #ifndef GPU_COMMS dslashImp = DslashFactory::create(dslashPolicy); #else dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH); #endif } (*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, in->Volume()/in->X(4), ghostFace, profile); delete dslashImp; delete dslash; unbindGaugeTex(gauge); checkCudaError(); #else errorQuda("Domain wall dslash has not been built"); #endif } }
91a98799b76151ced22b8c113e5d3a214e77c13f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/native/IndexingUtils.h> #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/ExpandUtils.h> #include <ATen/MemoryOverlap.h> #include <ATen/native/TensorIterator.h> #include <ATen/AccumulateType.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/HIPUtils.h> #include <THH/THHDeviceUtils.cuh> #include <THH/THHGeneral.h> #include <THH/THHTensorSort.cuh> #include <ATen/hip/HIPContext.h> #include <THH/THHThrustAllocator.cuh> #include <thrust/execution_policy.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <THH/THHAtomics.cuh> #include <hipcub/hipcub.hpp> #include <c10/macros/Macros.h> namespace { template <typename scalar_t, int SZ> __global__ void indexing_backward_kernel( int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight, int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim) { //numel is total number of flattened indices, not expanded to dimensions that are not indexed. //stride is the cumulative size of the not-indexed last dimensions //stride_before is the stride of the dimension immediately preceding first indexed dimension //if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case //outer_dim is number of elements in the first unindexed dimensions using accscalar_t = at::acc_type<scalar_t, true>; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same destination index as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values processed by each thread (grain size) for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){ int64_t idx = blockIdx.x * blockDim.y + threadIdx.y; if (idx < numel && (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){ do { int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before; const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride; const accscalar_t scale = (accscalar_t)1.0; accscalar_t gradient[SZ]; accscalar_t weight[SZ]; while (start_feature < stride) { #pragma unroll for (int ii = 0; ii < SZ; ii++) { int64_t feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int64_t feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]); } } start_feature += gridDim.y * blockDim.x * SZ; } idx++; } while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]); } } } } namespace at { namespace native { static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) { //we don't need to check range in backward - if there were out of bounds indices forward should already have errored out if (index.numel() != 0 && check_range) { auto max_idx = index.max().item<int64_t>(); auto min_idx = index.min().item<int64_t>(); if (max_idx >= dim_size) { TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size); } if (min_idx < -dim_size) { TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size); } } return index.remainder(dim_size); } static std::vector<int64_t> computeLinearStride(const Tensor & tensor) { // computes the stride as if tensor were contiguous auto sizes = tensor.sizes(); std::vector<int64_t> stride(tensor.dim()); stride[tensor.dim() - 1] = 1; std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>()); return stride; } static std::tuple<Tensor, int64_t, int64_t, int64_t> computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) { auto strides = computeLinearStride(src); const auto& backend = src.type().backend(); // Compute the linear index by multiplying the indexing tensors by the // stride and summing them. All the indexing tensors have the same shape at // this point. We also compute the number of dimensions before and after that // are not being index. Tensor linearIndex; int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0; for (auto i = decltype(src.dim()){0}; i < src.dim(); i++) { if (indices[i].defined()) { // Cast index to the longType matching src's backend // This allows us to support ie indexing a cuda tensor with a cpu tensor Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).toBackend(backend); if (linearIndex.defined()) { linearIndex += index; } else { linearIndex = index; if (i>0) { strideBefore = src.stride(i-1); // stride after undefined dimensions } } } else if (linearIndex.defined()) { emptyAfter++; nElemAfter *= src.size(i); } else { emptyBefore++; nElemBefore *= src.size(i); } } return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter); } static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, const c10::List<c10::optional<at::Tensor>>& orig, bool check_range) { checkIndexTensorTypes(orig); // first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors auto indices = expandTensors(self, orig); // next broadcast all index tensors together indices = expand_outplace(indices); // add missing null Tensors so that it matches self.dim() while (indices.size() < (size_t)self.dim()) { indices.emplace_back(); } // if the non-null indices are not all adjacent, transpose self and indices // together so that they're adjacent at the front std::vector<int64_t> inversePerm; if (!hasContiguousSubspace(indices)) { std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices); } int64_t nElemBefore, strideBefore, nElemAfter; Tensor linearIndex; std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range); return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm); } namespace { void index_put_accum_kernel(Tensor & self, const c10::List<c10::optional<Tensor>>& indices, const Tensor & value, bool unsafe) { if (indices.size() > (size_t)self.dim()) { TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")"); } auto value_ = value.contiguous(); Tensor linearIndex, expandedValue, src; int64_t nElemBefore, strideBefore, sliceSize; std::vector<int64_t> inversePerm; std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe); int64_t num_indices = linearIndex.numel(); if (num_indices > 0 && sliceSize > 0) { const bool permuted = !src.is_contiguous(); auto src_ = permuted ? src.contiguous() : src; linearIndex = linearIndex.reshape(-1); auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT); using device_ptr = thrust::device_ptr<int64_t>; const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); linearIndex.floor_divide_(sliceSize); { sorted_indices.copy_(linearIndex); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); // Fill sortedOrigIndices with sequential indices const auto count_iter = thrust::counting_iterator<int64_t>(0); auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>()); thrust::copy(policy, count_iter, count_iter + num_indices, orig_data); // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly // Sort; a stable sort is not required // NB - not passing comparator causes thrust to use radix sort, and it hurts perf A LOT, at least for medium (few K) sized indices auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>()); thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, ThrustLTOp<int64_t>()); } TORCH_INTERNAL_ASSERT(linearIndex.numel()*sliceSize*nElemBefore == value.numel(), "number of flattened indices did not match number of elements in the value tensor", linearIndex.numel()*sliceSize*nElemBefore, value.numel()); const int UNROLL = 4; const int indices_per_block = 4; dim3 grid(THCCeilDiv(num_indices, (int64_t) indices_per_block), std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], THCCeilDiv(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))), ::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2])); dim3 block(C10_WARP_SIZE, indices_per_block); AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, value_.scalar_type(), "indexing_backward", [&] { hipLaunchKernelGGL(( indexing_backward_kernel<scalar_t, UNROLL>), dim3(grid), dim3(block), 0, stream, sorted_indices.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(), value_.data_ptr<scalar_t>(), src_.data_ptr<scalar_t>(), num_indices, sliceSize, strideBefore, nElemBefore); }); C10_HIP_KERNEL_LAUNCH_CHECK(); if (permuted) self.copy_(src_.permute(inversePerm)); } } REGISTER_CUDA_DISPATCH(index_put_accum_stub, &index_put_accum_kernel); } //anonymous // Check tensor dimensions for index operations, and return the slice size. static ptrdiff_t getSliceSize(const Tensor & dst, int dim, const Tensor & index, const Tensor & src) { int dstDims = dst.dim(); int srcDims = src.dim(); TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar"); ptrdiff_t dstSliceSize = 1; TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds"); for (int d = 0; d < dstDims; d++) { if (d != dim) { dstSliceSize *= dst.size(d); } } TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds"); TORCH_CHECK(index.numel() == src.size(dim), "length of src.size[dim] is not equal to length of indices"); ptrdiff_t srcSliceSize = 1; bool mismatch = false; if (dstDims != srcDims) mismatch = true; for (int d = 0; d < srcDims; d++) { if (d != dim) { srcSliceSize *= src.size(d); if (!mismatch && dst.size(d) != src.size(d)) mismatch = true; } } TORCH_CHECK(dstSliceSize == srcSliceSize, "Source/destination tensor have different slice sizes (%ld vs %ld)", dstSliceSize, srcSliceSize); if (mismatch) { TORCH_WARN_ONCE( "Warning: source/destination slices have same size but different " "shape for an index operation. This behavior is deprecated.\n"); } return dstSliceSize; } // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexAddLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstAddDim, int srcAddDim, IndexType innerSize, int64_t dstAddDimSize) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) { // Lua indices begin at 1 IndexType dstIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)]; CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstAddDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcAddDim]; gpuAtomicAdd(&dst.data[dstOffset], src.data[srcOffset]); } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexAddSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstAddDim, int srcAddDim, IndexType totalSize, IndexType innerSize, int64_t dstAddDimSize) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType srcIndex, elementInSlice; if (IndexIsMajor) { srcIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; srcIndex = linearIndex % innerSize; } // Lua indices begin at 1 IndexType dstIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)]; CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize); IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstAddDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcAddDim]; gpuAtomicAdd(&dst.data[dstOffset], src.data[srcOffset]); } } // Compare the stride between adjacent slices (sliceStride) with strides in the // other dimensions (i.e., strides *inside* each slice). // // - Returns true if some dimension inside the slice has lower stride than // sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim // == 0 (that is, each slice is a row). // // In this case, we choose the CUDA kernel that processes the data in // "index-major order". For example, if thread count equals slice size, then // all threads process slice #0 in lockstep, and then slice #1, and so on. // // - Otherwise (i.e., sliceStride has the lowest value), this function returns // false. The simplest example is a 2-D contiguous tensor with sliceDim == 1 // (each slice is a column). // // In this case, we choose the CUDA kernel that processes the data in // "elementInSlice-major order". For example, each thread can process element // #0 of every slice, and then element #1 of every slice, and so on. template <typename scalar_t> bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info, int sliceDim) { // The stride between adjacent slices (e.g., between element #0 of slice #100 // and element #0 of slice #101). unsigned int sliceStride = info.strides[sliceDim]; for (int i = 0; i < info.dims; ++i) { if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) { return true; } } return false; } Tensor& index_add_cuda_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("index_add_cuda_"); dim = maybe_wrap_dim(dim, self.dim()); TensorArg self_arg{self, "self", 1}, index_arg{index, "index", 3}, source_arg{source, "source", 4}; checkAllSameGPU("index_add", {self_arg, index_arg, source_arg}); TORCH_CHECK_INDEX(index.dim() <= 1, "index_add_(): Index is supposed to be a vector"); TORCH_CHECK(index.scalar_type() == ScalarType::Long || index.scalar_type() == ScalarType::Int, "index_add_(): Expected dtype int32/int64 for index"); TORCH_CHECK(self.scalar_type() == source.scalar_type(), "index_add_(): self and source must have the same scalar type"); TORCH_CHECK(dim == 0 || dim < source.dim(), "index_add_(): Indexing dim ", dim, " is out of bounds of tensor"); TORCH_CHECK(index.numel() == (source.dim() == 0 ? 1 : source.size(dim)), "index_add_(): Number of indices should be equal to self.size(dim)"); at::assert_no_internal_overlap(self); at::assert_no_overlap(self, index); at::assert_no_overlap(self, source); // Scalars are treated as 1-d tensor Tensor self_ = (self.dim() == 0) ? self.view(1) : self; Tensor source_ = (source.dim() == 0) ? source.view(1) : source; TORCH_CHECK(self.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING); TORCH_CHECK(source.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING); TORCH_CHECK(index.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING); at::assert_no_internal_overlap(self); at::assert_no_partial_overlap(self, index); at::assert_no_partial_overlap(self, source); // The `source` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of index we are choosing, which is the total size // of the tensor `index`. ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_); ptrdiff_t sourceTotalSize = source.numel(); int64_t selfAddDimSize = self_.size(dim); ptrdiff_t numIndex = index.numel(); if (sliceSize == 0) { return self; } const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); bool indContig = index.is_contiguous(); int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \ hipLaunchKernelGGL(( indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM>) \ , dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \ selfInfo, sourceInfo, indexInfo, \ selfAddDim, sourceAddDim, sliceSize, selfAddDimSize); \ C10_HIP_KERNEL_LAUNCH_CHECK(); #define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \ SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \ hipLaunchKernelGGL(( indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \ SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR>) \ , dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \ selfInfo, sourceInfo, indexInfo, \ selfAddDim, sourceAddDim, sourceTotalSize, \ (IDX_IS_MAJOR) ? sliceSize : numIndex, \ selfAddDimSize); \ C10_HIP_KERNEL_LAUNCH_CHECK(); dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(::min(THCCeilDiv(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(::min(sourceTotalSize, (ptrdiff_t)128)); if (cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(source) && cuda::detail::canUse32BitIndexMath(index)) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] { cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo = cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_); int selfAddDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfAddDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () { auto sourceInfo = cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_); int sourceAddDim = sourceInfo.collapseDims(dim); sourceInfo.reduceDim(sourceAddDim); auto indexInfo = cuda::detail::getTensorInfo<index_t, unsigned int>(index); indexInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // index to choose if (numIndex <= 16) { if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2); } else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2); } else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim); if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true); } else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false); } } else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true); } } }); }); } else { AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] { cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo = cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_); int selfAddDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfAddDim); cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo = cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_); int sourceAddDim = sourceInfo.collapseDims(dim); sourceInfo.reduceDim(sourceAddDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () { cuda::detail::TensorInfo<index_t, uint64_t> indexInfo = cuda::detail::getTensorInfo<index_t, uint64_t>(index); indexInfo.collapseDims(); LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true); }); }); } return self; #undef SMALL_INDEX #undef LARGE_INDEX } namespace { // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexSelectLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstSelectDim, int srcSelectDim, IndexType innerSize, int64_t srcSelectDimSize) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) { IndexType srcIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)]; CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstSelectDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcSelectDim]; dst.data[dstOffset] = src.data[srcOffset]; } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexSelectSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstSelectDim, int srcSelectDim, IndexType totalSize, IndexType innerSize, int64_t srcSelectDimSize) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstIndex, elementInSlice; if (IndexIsMajor) { dstIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; dstIndex = linearIndex % innerSize; } IndexType srcIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)]; CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize); IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstSelectDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcSelectDim]; dst.data[dstOffset] = src.data[srcOffset]; } } namespace { // When using a 0-dim scalar tensor, we need the legacy (THC) semantics of // TensorInfo: Pretend that the scalar tensor is in fact a one-element vector. template <typename T, typename IndexType> cuda::detail::TensorInfo<T, IndexType> tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) { if (ti.dims == 0) { ti.dims = 1; ti.sizes[0] = 1; ti.strides[0] = 1; } return ti; } } template<typename scalar_t> void index_select_out_cuda_impl(Tensor& out, const Tensor& self, long dim, const Tensor& index) { ptrdiff_t numIndices = index.numel(); int selfDims = self.dim() == 0 ? 1 : self.dim(); const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); TORCH_CHECK(index.dim() <= 1, "Index is supposed to be an empty tensor or a vector"); TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds"); std::vector<int64_t> newSize = self.sizes().vec(); if (self.dim() > 0) { newSize[dim] = numIndices; } at::native::resize_(out, newSize, {}); ptrdiff_t outTotalSize = out.numel(); if (outTotalSize == 0) { return; } bool indContig = index.is_contiguous(); // The `self` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim); ptrdiff_t sliceSize = outTotalSize / numIndices; int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ hipLaunchKernelGGL(( indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \ , dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \ outInfo, selfInfo, indicesInfo, \ outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \ selfSelectDimSize); \ C10_HIP_KERNEL_LAUNCH_CHECK(); #define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ hipLaunchKernelGGL(( indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \ , dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \ outInfo, selfInfo, indicesInfo, \ outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \ static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \ selfSelectDimSize); \ C10_HIP_KERNEL_LAUNCH_CHECK(); dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(::min(THCCeilDiv(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(::min(outTotalSize, (ptrdiff_t)128)); if (cuda::detail::canUse32BitIndexMath(out) && cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(index)) { auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out)); int outSelectDim = outInfo.collapseDims(dim); outInfo.reduceDim(outSelectDim); auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self)); int selfSelectDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfSelectDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () { auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index)); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2); } else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2); } else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim); if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true); } else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false); } } else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true); } } }); } else { auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out)); int outSelectDim = outInfo.collapseDims(dim); outInfo.reduceDim(outSelectDim); auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self)); int selfSelectDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfSelectDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () { auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index)); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true); }); } #undef SMALL_INDEX #undef LARGE_INDEX } } // anonymous namespace Tensor& index_select_out_cuda(Tensor& out, const Tensor& self, int64_t dim, const Tensor& index) { static constexpr string_view DIM_WARNING = "Tensor too large or too many (> 25) dimensions"; TORCH_CHECK(at::cuda::check_device({out, self, index}), "Input, output and indices must be on the current device"); at::assert_no_internal_overlap(out); at::assert_no_overlap(out, self); at::assert_no_overlap(out, index); dim = at::maybe_wrap_dim(dim, self); TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING); TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, out.scalar_type(), "index_select_cuda", [&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); }); return out; } Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) { Tensor out = at::empty({0}, self.options()); index_select_out_cuda(out, self, dim, index); return out; } template<typename T> struct NonZeroOp { __host__ __device__ __forceinline__ bool operator()(const T& a) const { return (a!=T(0)); } }; template<typename scalar_t> void nonzero_cuda_out_impl(const Tensor& self, Tensor& out){ Tensor self_ = self.contiguous(); int N = self_.numel(); const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // compute number of nonzero elements size_t temp_storage_bytes=0; auto& allocator = *c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get(); auto num_nonzeros = allocator.allocate(sizeof(int)); hipcub::TransformInputIterator<bool, NonZeroOp<scalar_t>, scalar_t*> itr(self_.data_ptr<scalar_t>(), NonZeroOp<scalar_t>()); hipcub::DeviceReduce::Sum(nullptr, temp_storage_bytes, itr, (int*)num_nonzeros.get(), N, stream); auto temp_storage = allocator.allocate(temp_storage_bytes); hipcub::DeviceReduce::Sum(temp_storage.get(), temp_storage_bytes, itr, (int*)num_nonzeros.get(), N, stream); int num_nonzeros_h; C10_HIP_CHECK(hipMemcpyAsync(&num_nonzeros_h, num_nonzeros.get(), sizeof(int), hipMemcpyDeviceToHost, stream)); //need to synchronize to make sure data is available on the host C10_HIP_CHECK(hipStreamSynchronize(stream)); //expected output size is num_nonzeros x ndim //we are producing output with size {num_nonzeros, ndim} and strides {num_nonzeros, 1} (that is, transposed ndim x num_nonzeros output) //we are able to directly use passed output with this size and strides, and we can also (per contract) //resize passed output with incorrect sizes anyway we want. //However, out with correct sizes and incorrect strides will have to be copied to from the intermediate we've produced. bool need_to_copy = out.dim() == 2 && out.sizes()[0] == num_nonzeros_h && out.sizes()[1] == self.dim() && !out.t().is_contiguous(); at::Tensor out_temp = need_to_copy ? at::native::empty_cuda({self.dim(), num_nonzeros_h}, optTypeMetaToScalarType(out.options().dtype_opt()), out.options().layout_opt(), out.options().device_opt(), out.options().pinned_memory_opt()) : out.resize_({self.dim(), num_nonzeros_h}); //Scalars are expected to produce output of size (1,0), so we can't write to it if (self.dim() > 0) { hipcub::CountingInputIterator<int64_t> counting_itr(0); temp_storage_bytes = 0; hipcub::DeviceSelect::Flagged(nullptr, temp_storage_bytes, counting_itr, itr, out_temp.data_ptr<int64_t>(), (int*)num_nonzeros.get(), N, stream); temp_storage = allocator.allocate(temp_storage_bytes); hipcub::DeviceSelect::Flagged(temp_storage.get(), temp_storage_bytes, counting_itr, itr, out_temp.data_ptr<int64_t>(), (int*)num_nonzeros.get(), N, stream); if (num_nonzeros_h > 0 && self.dim() > 1){ int64_t div = 1; auto thrust_allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); for (int dim = self.dim()-1; dim >= 0; dim--){ int64_t dim_size = self.sizes()[dim]; thrust::transform( thrust::hip::par(thrust_allocator).on(stream), thrust::device_ptr<int64_t>(out_temp.data_ptr<int64_t>()), thrust::device_ptr<int64_t>(out_temp.data_ptr<int64_t>()) + num_nonzeros_h, thrust::device_ptr<int64_t>(out_temp.data_ptr<int64_t>()) + num_nonzeros_h * dim, [=] C10_HOST_DEVICE (const int64_t val) {return (val/div) % dim_size;} ); div *= dim_size; } } } if (need_to_copy) { out.copy_(out_temp.t()); } else { //transpose out so it is correct size Tensor out_ = out_temp.t(); out.set_(out_); } } Tensor& nonzero_out_cuda(Tensor& out, const Tensor& self){ TORCH_CHECK(self.numel() < std::numeric_limits<int>::max(), "nonzero is not supported for tensors with more than INT_MAX elements, \ file a support request"); TORCH_CHECK(out.dtype() == at::kLong, "Expected object of scalar type ", at::kLong, " as out, but got ", out.dtype()); TORCH_CHECK(self.device() == out.device(), "expected self and out to be on the same device, but got out on ", out.device(), " and self on ", self.device()); AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::BFloat16, at::ScalarType::Half, self.scalar_type(), "nonzero_cuda", [&] {nonzero_cuda_out_impl<scalar_t>(self, out);}); return out; } Tensor nonzero_cuda(const Tensor& self){ Tensor out = at::native::empty_cuda({0}, kLong, self.options().layout_opt(), self.options().device_opt(), self.options().pinned_memory_opt()); return nonzero_out_cuda(out, self); } } // native } // at
91a98799b76151ced22b8c113e5d3a214e77c13f.cu
#include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/native/IndexingUtils.h> #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/ExpandUtils.h> #include <ATen/MemoryOverlap.h> #include <ATen/native/TensorIterator.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/CUDAUtils.h> #include <THC/THCDeviceUtils.cuh> #include <THC/THCGeneral.h> #include <THC/THCTensorSort.cuh> #include <ATen/cuda/CUDAContext.h> #include <THC/THCThrustAllocator.cuh> #include <thrust/execution_policy.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <THC/THCAtomics.cuh> #include <cub/cub.cuh> #include <c10/macros/Macros.h> namespace { template <typename scalar_t, int SZ> __global__ void indexing_backward_kernel( int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight, int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim) { //numel is total number of flattened indices, not expanded to dimensions that are not indexed. //stride is the cumulative size of the not-indexed last dimensions //stride_before is the stride of the dimension immediately preceding first indexed dimension //if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case //outer_dim is number of elements in the first unindexed dimensions using accscalar_t = at::acc_type<scalar_t, true>; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same destination index as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values processed by each thread (grain size) for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){ int64_t idx = blockIdx.x * blockDim.y + threadIdx.y; if (idx < numel && (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){ do { int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before; const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride; const accscalar_t scale = (accscalar_t)1.0; accscalar_t gradient[SZ]; accscalar_t weight[SZ]; while (start_feature < stride) { #pragma unroll for (int ii = 0; ii < SZ; ii++) { int64_t feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int64_t feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]); } } start_feature += gridDim.y * blockDim.x * SZ; } idx++; } while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]); } } } } namespace at { namespace native { static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) { //we don't need to check range in backward - if there were out of bounds indices forward should already have errored out if (index.numel() != 0 && check_range) { auto max_idx = index.max().item<int64_t>(); auto min_idx = index.min().item<int64_t>(); if (max_idx >= dim_size) { TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size); } if (min_idx < -dim_size) { TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size); } } return index.remainder(dim_size); } static std::vector<int64_t> computeLinearStride(const Tensor & tensor) { // computes the stride as if tensor were contiguous auto sizes = tensor.sizes(); std::vector<int64_t> stride(tensor.dim()); stride[tensor.dim() - 1] = 1; std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>()); return stride; } static std::tuple<Tensor, int64_t, int64_t, int64_t> computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) { auto strides = computeLinearStride(src); const auto& backend = src.type().backend(); // Compute the linear index by multiplying the indexing tensors by the // stride and summing them. All the indexing tensors have the same shape at // this point. We also compute the number of dimensions before and after that // are not being index. Tensor linearIndex; int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0; for (auto i = decltype(src.dim()){0}; i < src.dim(); i++) { if (indices[i].defined()) { // Cast index to the longType matching src's backend // This allows us to support ie indexing a cuda tensor with a cpu tensor Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).toBackend(backend); if (linearIndex.defined()) { linearIndex += index; } else { linearIndex = index; if (i>0) { strideBefore = src.stride(i-1); // stride after undefined dimensions } } } else if (linearIndex.defined()) { emptyAfter++; nElemAfter *= src.size(i); } else { emptyBefore++; nElemBefore *= src.size(i); } } return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter); } static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, const c10::List<c10::optional<at::Tensor>>& orig, bool check_range) { checkIndexTensorTypes(orig); // first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors auto indices = expandTensors(self, orig); // next broadcast all index tensors together indices = expand_outplace(indices); // add missing null Tensors so that it matches self.dim() while (indices.size() < (size_t)self.dim()) { indices.emplace_back(); } // if the non-null indices are not all adjacent, transpose self and indices // together so that they're adjacent at the front std::vector<int64_t> inversePerm; if (!hasContiguousSubspace(indices)) { std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices); } int64_t nElemBefore, strideBefore, nElemAfter; Tensor linearIndex; std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range); return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm); } namespace { void index_put_accum_kernel(Tensor & self, const c10::List<c10::optional<Tensor>>& indices, const Tensor & value, bool unsafe) { if (indices.size() > (size_t)self.dim()) { TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")"); } auto value_ = value.contiguous(); Tensor linearIndex, expandedValue, src; int64_t nElemBefore, strideBefore, sliceSize; std::vector<int64_t> inversePerm; std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe); int64_t num_indices = linearIndex.numel(); if (num_indices > 0 && sliceSize > 0) { const bool permuted = !src.is_contiguous(); auto src_ = permuted ? src.contiguous() : src; linearIndex = linearIndex.reshape(-1); auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT); using device_ptr = thrust::device_ptr<int64_t>; const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); linearIndex.floor_divide_(sliceSize); { sorted_indices.copy_(linearIndex); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); // Fill sortedOrigIndices with sequential indices const auto count_iter = thrust::counting_iterator<int64_t>(0); auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>()); thrust::copy(policy, count_iter, count_iter + num_indices, orig_data); // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly // Sort; a stable sort is not required // NB - not passing comparator causes thrust to use radix sort, and it hurts perf A LOT, at least for medium (few K) sized indices auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>()); thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, ThrustLTOp<int64_t>()); } TORCH_INTERNAL_ASSERT(linearIndex.numel()*sliceSize*nElemBefore == value.numel(), "number of flattened indices did not match number of elements in the value tensor", linearIndex.numel()*sliceSize*nElemBefore, value.numel()); const int UNROLL = 4; const int indices_per_block = 4; dim3 grid(THCCeilDiv(num_indices, (int64_t) indices_per_block), std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], THCCeilDiv(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))), std::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2])); dim3 block(C10_WARP_SIZE, indices_per_block); AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, value_.scalar_type(), "indexing_backward", [&] { indexing_backward_kernel<scalar_t, UNROLL><<<grid, block, 0, stream>>>( sorted_indices.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(), value_.data_ptr<scalar_t>(), src_.data_ptr<scalar_t>(), num_indices, sliceSize, strideBefore, nElemBefore); }); C10_CUDA_KERNEL_LAUNCH_CHECK(); if (permuted) self.copy_(src_.permute(inversePerm)); } } REGISTER_CUDA_DISPATCH(index_put_accum_stub, &index_put_accum_kernel); } //anonymous // Check tensor dimensions for index operations, and return the slice size. static ptrdiff_t getSliceSize(const Tensor & dst, int dim, const Tensor & index, const Tensor & src) { int dstDims = dst.dim(); int srcDims = src.dim(); TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar"); ptrdiff_t dstSliceSize = 1; TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds"); for (int d = 0; d < dstDims; d++) { if (d != dim) { dstSliceSize *= dst.size(d); } } TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds"); TORCH_CHECK(index.numel() == src.size(dim), "length of src.size[dim] is not equal to length of indices"); ptrdiff_t srcSliceSize = 1; bool mismatch = false; if (dstDims != srcDims) mismatch = true; for (int d = 0; d < srcDims; d++) { if (d != dim) { srcSliceSize *= src.size(d); if (!mismatch && dst.size(d) != src.size(d)) mismatch = true; } } TORCH_CHECK(dstSliceSize == srcSliceSize, "Source/destination tensor have different slice sizes (%ld vs %ld)", dstSliceSize, srcSliceSize); if (mismatch) { TORCH_WARN_ONCE( "Warning: source/destination slices have same size but different " "shape for an index operation. This behavior is deprecated.\n"); } return dstSliceSize; } // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexAddLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstAddDim, int srcAddDim, IndexType innerSize, int64_t dstAddDimSize) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) { // Lua indices begin at 1 IndexType dstIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)]; CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstAddDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcAddDim]; gpuAtomicAdd(&dst.data[dstOffset], src.data[srcOffset]); } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexAddSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstAddDim, int srcAddDim, IndexType totalSize, IndexType innerSize, int64_t dstAddDimSize) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType srcIndex, elementInSlice; if (IndexIsMajor) { srcIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; srcIndex = linearIndex % innerSize; } // Lua indices begin at 1 IndexType dstIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)]; CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize); IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstAddDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcAddDim]; gpuAtomicAdd(&dst.data[dstOffset], src.data[srcOffset]); } } // Compare the stride between adjacent slices (sliceStride) with strides in the // other dimensions (i.e., strides *inside* each slice). // // - Returns true if some dimension inside the slice has lower stride than // sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim // == 0 (that is, each slice is a row). // // In this case, we choose the CUDA kernel that processes the data in // "index-major order". For example, if thread count equals slice size, then // all threads process slice #0 in lockstep, and then slice #1, and so on. // // - Otherwise (i.e., sliceStride has the lowest value), this function returns // false. The simplest example is a 2-D contiguous tensor with sliceDim == 1 // (each slice is a column). // // In this case, we choose the CUDA kernel that processes the data in // "elementInSlice-major order". For example, each thread can process element // #0 of every slice, and then element #1 of every slice, and so on. template <typename scalar_t> bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info, int sliceDim) { // The stride between adjacent slices (e.g., between element #0 of slice #100 // and element #0 of slice #101). unsigned int sliceStride = info.strides[sliceDim]; for (int i = 0; i < info.dims; ++i) { if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) { return true; } } return false; } Tensor& index_add_cuda_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("index_add_cuda_"); dim = maybe_wrap_dim(dim, self.dim()); TensorArg self_arg{self, "self", 1}, index_arg{index, "index", 3}, source_arg{source, "source", 4}; checkAllSameGPU("index_add", {self_arg, index_arg, source_arg}); TORCH_CHECK_INDEX(index.dim() <= 1, "index_add_(): Index is supposed to be a vector"); TORCH_CHECK(index.scalar_type() == ScalarType::Long || index.scalar_type() == ScalarType::Int, "index_add_(): Expected dtype int32/int64 for index"); TORCH_CHECK(self.scalar_type() == source.scalar_type(), "index_add_(): self and source must have the same scalar type"); TORCH_CHECK(dim == 0 || dim < source.dim(), "index_add_(): Indexing dim ", dim, " is out of bounds of tensor"); TORCH_CHECK(index.numel() == (source.dim() == 0 ? 1 : source.size(dim)), "index_add_(): Number of indices should be equal to self.size(dim)"); at::assert_no_internal_overlap(self); at::assert_no_overlap(self, index); at::assert_no_overlap(self, source); // Scalars are treated as 1-d tensor Tensor self_ = (self.dim() == 0) ? self.view(1) : self; Tensor source_ = (source.dim() == 0) ? source.view(1) : source; TORCH_CHECK(self.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING); TORCH_CHECK(source.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING); TORCH_CHECK(index.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING); at::assert_no_internal_overlap(self); at::assert_no_partial_overlap(self, index); at::assert_no_partial_overlap(self, source); // The `source` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of index we are choosing, which is the total size // of the tensor `index`. ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_); ptrdiff_t sourceTotalSize = source.numel(); int64_t selfAddDimSize = self_.size(dim); ptrdiff_t numIndex = index.numel(); if (sliceSize == 0) { return self; } const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); bool indContig = index.is_contiguous(); int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \ indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ selfInfo, sourceInfo, indexInfo, \ selfAddDim, sourceAddDim, sliceSize, selfAddDimSize); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); #define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \ SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \ indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \ SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ selfInfo, sourceInfo, indexInfo, \ selfAddDim, sourceAddDim, sourceTotalSize, \ (IDX_IS_MAJOR) ? sliceSize : numIndex, \ selfAddDimSize); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(THCCeilDiv(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(sourceTotalSize, (ptrdiff_t)128)); if (cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(source) && cuda::detail::canUse32BitIndexMath(index)) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] { cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo = cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_); int selfAddDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfAddDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () { auto sourceInfo = cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_); int sourceAddDim = sourceInfo.collapseDims(dim); sourceInfo.reduceDim(sourceAddDim); auto indexInfo = cuda::detail::getTensorInfo<index_t, unsigned int>(index); indexInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // index to choose if (numIndex <= 16) { if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2); } else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2); } else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim); if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true); } else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false); } } else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true); } } }); }); } else { AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] { cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo = cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_); int selfAddDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfAddDim); cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo = cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_); int sourceAddDim = sourceInfo.collapseDims(dim); sourceInfo.reduceDim(sourceAddDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () { cuda::detail::TensorInfo<index_t, uint64_t> indexInfo = cuda::detail::getTensorInfo<index_t, uint64_t>(index); indexInfo.collapseDims(); LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true); }); }); } return self; #undef SMALL_INDEX #undef LARGE_INDEX } namespace { // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexSelectLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstSelectDim, int srcSelectDim, IndexType innerSize, int64_t srcSelectDimSize) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) { IndexType srcIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)]; CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstSelectDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcSelectDim]; dst.data[dstOffset] = src.data[srcOffset]; } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexSelectSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstSelectDim, int srcSelectDim, IndexType totalSize, IndexType innerSize, int64_t srcSelectDimSize) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstIndex, elementInSlice; if (IndexIsMajor) { dstIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; dstIndex = linearIndex % innerSize; } IndexType srcIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)]; CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize); IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstSelectDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcSelectDim]; dst.data[dstOffset] = src.data[srcOffset]; } } namespace { // When using a 0-dim scalar tensor, we need the legacy (THC) semantics of // TensorInfo: Pretend that the scalar tensor is in fact a one-element vector. template <typename T, typename IndexType> cuda::detail::TensorInfo<T, IndexType> tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) { if (ti.dims == 0) { ti.dims = 1; ti.sizes[0] = 1; ti.strides[0] = 1; } return ti; } } template<typename scalar_t> void index_select_out_cuda_impl(Tensor& out, const Tensor& self, long dim, const Tensor& index) { ptrdiff_t numIndices = index.numel(); int selfDims = self.dim() == 0 ? 1 : self.dim(); const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); TORCH_CHECK(index.dim() <= 1, "Index is supposed to be an empty tensor or a vector"); TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds"); std::vector<int64_t> newSize = self.sizes().vec(); if (self.dim() > 0) { newSize[dim] = numIndices; } at::native::resize_(out, newSize, {}); ptrdiff_t outTotalSize = out.numel(); if (outTotalSize == 0) { return; } bool indContig = index.is_contiguous(); // The `self` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim); ptrdiff_t sliceSize = outTotalSize / numIndices; int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ outInfo, selfInfo, indicesInfo, \ outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \ selfSelectDimSize); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); #define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ outInfo, selfInfo, indicesInfo, \ outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \ static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \ selfSelectDimSize); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(THCCeilDiv(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(outTotalSize, (ptrdiff_t)128)); if (cuda::detail::canUse32BitIndexMath(out) && cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(index)) { auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out)); int outSelectDim = outInfo.collapseDims(dim); outInfo.reduceDim(outSelectDim); auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self)); int selfSelectDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfSelectDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () { auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index)); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2); } else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2); } else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim); if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true); } else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false); } } else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true); } } }); } else { auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out)); int outSelectDim = outInfo.collapseDims(dim); outInfo.reduceDim(outSelectDim); auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self)); int selfSelectDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfSelectDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () { auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index)); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true); }); } #undef SMALL_INDEX #undef LARGE_INDEX } } // anonymous namespace Tensor& index_select_out_cuda(Tensor& out, const Tensor& self, int64_t dim, const Tensor& index) { static constexpr string_view DIM_WARNING = "Tensor too large or too many (> 25) dimensions"; TORCH_CHECK(at::cuda::check_device({out, self, index}), "Input, output and indices must be on the current device"); at::assert_no_internal_overlap(out); at::assert_no_overlap(out, self); at::assert_no_overlap(out, index); dim = at::maybe_wrap_dim(dim, self); TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING); TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, out.scalar_type(), "index_select_cuda", [&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); }); return out; } Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) { Tensor out = at::empty({0}, self.options()); index_select_out_cuda(out, self, dim, index); return out; } template<typename T> struct NonZeroOp { __host__ __device__ __forceinline__ bool operator()(const T& a) const { return (a!=T(0)); } }; template<typename scalar_t> void nonzero_cuda_out_impl(const Tensor& self, Tensor& out){ Tensor self_ = self.contiguous(); int N = self_.numel(); const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // compute number of nonzero elements size_t temp_storage_bytes=0; auto& allocator = *c10::cuda::CUDACachingAllocator::get(); auto num_nonzeros = allocator.allocate(sizeof(int)); cub::TransformInputIterator<bool, NonZeroOp<scalar_t>, scalar_t*> itr(self_.data_ptr<scalar_t>(), NonZeroOp<scalar_t>()); cub::DeviceReduce::Sum(nullptr, temp_storage_bytes, itr, (int*)num_nonzeros.get(), N, stream); auto temp_storage = allocator.allocate(temp_storage_bytes); cub::DeviceReduce::Sum(temp_storage.get(), temp_storage_bytes, itr, (int*)num_nonzeros.get(), N, stream); int num_nonzeros_h; C10_CUDA_CHECK(cudaMemcpyAsync(&num_nonzeros_h, num_nonzeros.get(), sizeof(int), cudaMemcpyDeviceToHost, stream)); //need to synchronize to make sure data is available on the host C10_CUDA_CHECK(cudaStreamSynchronize(stream)); //expected output size is num_nonzeros x ndim //we are producing output with size {num_nonzeros, ndim} and strides {num_nonzeros, 1} (that is, transposed ndim x num_nonzeros output) //we are able to directly use passed output with this size and strides, and we can also (per contract) //resize passed output with incorrect sizes anyway we want. //However, out with correct sizes and incorrect strides will have to be copied to from the intermediate we've produced. bool need_to_copy = out.dim() == 2 && out.sizes()[0] == num_nonzeros_h && out.sizes()[1] == self.dim() && !out.t().is_contiguous(); at::Tensor out_temp = need_to_copy ? at::native::empty_cuda({self.dim(), num_nonzeros_h}, optTypeMetaToScalarType(out.options().dtype_opt()), out.options().layout_opt(), out.options().device_opt(), out.options().pinned_memory_opt()) : out.resize_({self.dim(), num_nonzeros_h}); //Scalars are expected to produce output of size (1,0), so we can't write to it if (self.dim() > 0) { cub::CountingInputIterator<int64_t> counting_itr(0); temp_storage_bytes = 0; cub::DeviceSelect::Flagged(nullptr, temp_storage_bytes, counting_itr, itr, out_temp.data_ptr<int64_t>(), (int*)num_nonzeros.get(), N, stream); temp_storage = allocator.allocate(temp_storage_bytes); cub::DeviceSelect::Flagged(temp_storage.get(), temp_storage_bytes, counting_itr, itr, out_temp.data_ptr<int64_t>(), (int*)num_nonzeros.get(), N, stream); if (num_nonzeros_h > 0 && self.dim() > 1){ int64_t div = 1; auto thrust_allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); for (int dim = self.dim()-1; dim >= 0; dim--){ int64_t dim_size = self.sizes()[dim]; thrust::transform( thrust::cuda::par(thrust_allocator).on(stream), thrust::device_ptr<int64_t>(out_temp.data_ptr<int64_t>()), thrust::device_ptr<int64_t>(out_temp.data_ptr<int64_t>()) + num_nonzeros_h, thrust::device_ptr<int64_t>(out_temp.data_ptr<int64_t>()) + num_nonzeros_h * dim, [=] C10_HOST_DEVICE (const int64_t val) {return (val/div) % dim_size;} ); div *= dim_size; } } } if (need_to_copy) { out.copy_(out_temp.t()); } else { //transpose out so it is correct size Tensor out_ = out_temp.t(); out.set_(out_); } } Tensor& nonzero_out_cuda(Tensor& out, const Tensor& self){ TORCH_CHECK(self.numel() < std::numeric_limits<int>::max(), "nonzero is not supported for tensors with more than INT_MAX elements, \ file a support request"); TORCH_CHECK(out.dtype() == at::kLong, "Expected object of scalar type ", at::kLong, " as out, but got ", out.dtype()); TORCH_CHECK(self.device() == out.device(), "expected self and out to be on the same device, but got out on ", out.device(), " and self on ", self.device()); AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::BFloat16, at::ScalarType::Half, self.scalar_type(), "nonzero_cuda", [&] {nonzero_cuda_out_impl<scalar_t>(self, out);}); return out; } Tensor nonzero_cuda(const Tensor& self){ Tensor out = at::native::empty_cuda({0}, kLong, self.options().layout_opt(), self.options().device_opt(), self.options().pinned_memory_opt()); return nonzero_out_cuda(out, self); } } // native } // at
bfc06c42b79bd7a77bdf15626581079862c3830d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <vector> #include <cstdlib> #include <algorithm> #include <iterator> #include <iomanip> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <cub/warp/warp_scan.cuh> #include "../../util_device.cuh" #include "../../graph-utils/load_balanced_search.cuh" #define VERTEX_FRONTIER 100000 void load_balance_search(int num_edges, const std::vector<int> &scanned_edges, std::vector<int> &result) { int ai = 0, bi = 0; while(ai < num_edges || bi < scanned_edges.size()) { bool p; if(bi >= scanned_edges.size()) p = true; else if(ai >= num_edges) p = false; else p = ai < scanned_edges[bi]; // aKey < bKey is upper-bound condition if(p) result[ai++] = bi-1; //subtract 1 from the upper-bound else ++bi; } } int main() { std::vector<int> counts(VERTEX_FRONTIER), counts_scan(VERTEX_FRONTIER), sources, lbs; for(unsigned i=0; i<counts.size(); i++) { counts[i] = rand() % 100000; //0 through (k-1) work-items } /*std::cout << "Number of work items: " << std::endl; std::copy(counts.begin(),counts.end(),std::ostream_iterator<int>(std::cout," ")); std::cout << std::endl;*/ thrust::exclusive_scan(counts.begin(),counts.end(),counts_scan.begin()); /*std::cout << "Scanned work items: " << std::endl; std::copy(counts_scan.begin(),counts_scan.end(),std::ostream_iterator<int>(std::cout," ")); std::cout << std::endl;*/ int edges = counts_scan[counts_scan.size()-1]+counts[counts.size()-1]; std::cout << "Number of edges to traverse: " << edges << std::endl; sources.resize(edges); lbs.resize(edges); load_balance_search(edges,counts_scan,lbs); /*std::cout << "Edges to be traversed: " << std::endl; for(unsigned i=0; i<lbs.size(); i++) { std::cout << "(" << lbs[i] << "," << i - counts_scan[lbs[i]] << ")" << " "; } std::cout << std::endl;*/ std::cout << std::endl << "Repeating on the GPU: " << std::endl; thrust::device_vector<int> counts_d = counts; thrust::device_vector<int> counts_scan_d(VERTEX_FRONTIER,0); thrust::device_vector<int> result_d(edges); //Have to assume O(m) space here for a graph thrust::device_vector<int> edges_d(1,0); hipEvent_t start_event, end_event; start_clock(start_event,end_event); hipLaunchKernelGGL(( extract_edges_block), dim3(1),dim3(BLOCK_SIZE), 0, 0, VERTEX_FRONTIER,thrust::raw_pointer_cast(counts_d.data()),thrust::raw_pointer_cast(counts_scan_d.data()),thrust::raw_pointer_cast(result_d.data()),thrust::raw_pointer_cast(edges_d.data())); checkCudaErrors(hipPeekAtLastError()); float time = end_clock(start_event,end_event); std::cout << "Number of edges to traverse: " << edges_d[0] << std::endl; thrust::host_vector<int> result_h = result_d; thrust::host_vector<int> counts_scan_h = counts_scan_d; /*std::cout << "Edges to be traversed: " << std::endl; for(unsigned i=0; i<result_h.size(); i++) { std::cout << "(" << result_h[i] << "," << i - counts_scan_h[result_h[i]] << ")" << " "; } std::cout << std::endl;*/ std::cout << std::endl; if(!thrust::equal(counts_scan.begin(),counts_scan.end(),counts_scan_h.begin())) { std::cout << "Scan failed." << std::endl; //thrust::copy(counts_scan_h.begin(),counts_scan_h.end(),std::ostream_iterator<int>(std::cout," " )); //std::cout << std::endl; } else { std::cout << "Scan passed." << std::endl; } thrust::equal(lbs.begin(),lbs.end(),result_h.begin()) ? std::cout << "Test passed." : std::cout << "Test failed."; std::cout << std::endl; std::cout << "Time for Load-Balancing Search: " << std::setprecision(9) << time << " s" << std::endl; int64_t bytes = sizeof(int)*edges_d[0]; //The edges themselves are ints but there could be more than 2^31 of them for measurement/testing purposes double bandwidth = bytes/time; std::cout << "Memory Bandwidth: " << bandwidth/(1e9) << " GB/s" << std::endl; return 0; }
bfc06c42b79bd7a77bdf15626581079862c3830d.cu
#include <iostream> #include <vector> #include <cstdlib> #include <algorithm> #include <iterator> #include <iomanip> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <cub/warp/warp_scan.cuh> #include "../../util_device.cuh" #include "../../graph-utils/load_balanced_search.cuh" #define VERTEX_FRONTIER 100000 void load_balance_search(int num_edges, const std::vector<int> &scanned_edges, std::vector<int> &result) { int ai = 0, bi = 0; while(ai < num_edges || bi < scanned_edges.size()) { bool p; if(bi >= scanned_edges.size()) p = true; else if(ai >= num_edges) p = false; else p = ai < scanned_edges[bi]; // aKey < bKey is upper-bound condition if(p) result[ai++] = bi-1; //subtract 1 from the upper-bound else ++bi; } } int main() { std::vector<int> counts(VERTEX_FRONTIER), counts_scan(VERTEX_FRONTIER), sources, lbs; for(unsigned i=0; i<counts.size(); i++) { counts[i] = rand() % 100000; //0 through (k-1) work-items } /*std::cout << "Number of work items: " << std::endl; std::copy(counts.begin(),counts.end(),std::ostream_iterator<int>(std::cout," ")); std::cout << std::endl;*/ thrust::exclusive_scan(counts.begin(),counts.end(),counts_scan.begin()); /*std::cout << "Scanned work items: " << std::endl; std::copy(counts_scan.begin(),counts_scan.end(),std::ostream_iterator<int>(std::cout," ")); std::cout << std::endl;*/ int edges = counts_scan[counts_scan.size()-1]+counts[counts.size()-1]; std::cout << "Number of edges to traverse: " << edges << std::endl; sources.resize(edges); lbs.resize(edges); load_balance_search(edges,counts_scan,lbs); /*std::cout << "Edges to be traversed: " << std::endl; for(unsigned i=0; i<lbs.size(); i++) { std::cout << "(" << lbs[i] << "," << i - counts_scan[lbs[i]] << ")" << " "; } std::cout << std::endl;*/ std::cout << std::endl << "Repeating on the GPU: " << std::endl; thrust::device_vector<int> counts_d = counts; thrust::device_vector<int> counts_scan_d(VERTEX_FRONTIER,0); thrust::device_vector<int> result_d(edges); //Have to assume O(m) space here for a graph thrust::device_vector<int> edges_d(1,0); cudaEvent_t start_event, end_event; start_clock(start_event,end_event); extract_edges_block<<<1,BLOCK_SIZE>>>(VERTEX_FRONTIER,thrust::raw_pointer_cast(counts_d.data()),thrust::raw_pointer_cast(counts_scan_d.data()),thrust::raw_pointer_cast(result_d.data()),thrust::raw_pointer_cast(edges_d.data())); checkCudaErrors(cudaPeekAtLastError()); float time = end_clock(start_event,end_event); std::cout << "Number of edges to traverse: " << edges_d[0] << std::endl; thrust::host_vector<int> result_h = result_d; thrust::host_vector<int> counts_scan_h = counts_scan_d; /*std::cout << "Edges to be traversed: " << std::endl; for(unsigned i=0; i<result_h.size(); i++) { std::cout << "(" << result_h[i] << "," << i - counts_scan_h[result_h[i]] << ")" << " "; } std::cout << std::endl;*/ std::cout << std::endl; if(!thrust::equal(counts_scan.begin(),counts_scan.end(),counts_scan_h.begin())) { std::cout << "Scan failed." << std::endl; //thrust::copy(counts_scan_h.begin(),counts_scan_h.end(),std::ostream_iterator<int>(std::cout," " )); //std::cout << std::endl; } else { std::cout << "Scan passed." << std::endl; } thrust::equal(lbs.begin(),lbs.end(),result_h.begin()) ? std::cout << "Test passed." : std::cout << "Test failed."; std::cout << std::endl; std::cout << "Time for Load-Balancing Search: " << std::setprecision(9) << time << " s" << std::endl; int64_t bytes = sizeof(int)*edges_d[0]; //The edges themselves are ints but there could be more than 2^31 of them for measurement/testing purposes double bandwidth = bytes/time; std::cout << "Memory Bandwidth: " << bandwidth/(1e9) << " GB/s" << std::endl; return 0; }
327d72212a525384d16256bb41c41276fac1cd2c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../include/cuda/init.cuh" __global__ void init_prng(curandStateMRG32k3a * prngstate) { INDCHECK() #ifndef TEST hiprand_init(clock64() * sbi, 0, 0, &prngstate[sbi]); #else // fixed seeds for a deterministic simulation hiprand_init(sbi, 0, 0, &prngstate[sbi]); #endif } __global__ void init_ncount(neigh neigh) { INDCHECK() int nc = 0; for (int i = 0; i < 6; i++) nc += (neigh.index[sbi * 6 + i] != sbi); neigh.count[sbi] = nc; } __device__ int HOR(reactions reactions, int spi) { int max_hor = 0; bool is_bi_reaction = false; for (int ri = 0; ri < RC; ri++) { // if spi is not a reactant of the current reaction, continue // with the next one. if (reactions.r[GET_COEFF(spi, ri)] == 0) continue; // sum all the coeff. of the current reaction to compute its // order. int hor = 0; for (int j = 0; j < SPC; j++) { int c = reactions.r[GET_COEFF(j, ri)]; hor += c; // check if ri requires 2 molecules of spi if (j == spi && c == 2) { is_bi_reaction = true; // TODO: replace with branchless code } } max_hor = max(hor, max_hor); } if (is_bi_reaction) max_hor = 3; return max_hor; } __global__ void init_hors(int * hors, reactions reactions, int spc) { unsigned int sbi = blockIdx.x * blockDim.x + threadIdx.x; if (sbi != 0) return; for (int spi = 0; spi < spc; spi++) hors[spi] = HOR(reactions, spi); }
327d72212a525384d16256bb41c41276fac1cd2c.cu
#include "../include/cuda/init.cuh" __global__ void init_prng(curandStateMRG32k3a * prngstate) { INDCHECK() #ifndef TEST curand_init(clock64() * sbi, 0, 0, &prngstate[sbi]); #else // fixed seeds for a deterministic simulation curand_init(sbi, 0, 0, &prngstate[sbi]); #endif } __global__ void init_ncount(neigh neigh) { INDCHECK() int nc = 0; for (int i = 0; i < 6; i++) nc += (neigh.index[sbi * 6 + i] != sbi); neigh.count[sbi] = nc; } __device__ int HOR(reactions reactions, int spi) { int max_hor = 0; bool is_bi_reaction = false; for (int ri = 0; ri < RC; ri++) { // if spi is not a reactant of the current reaction, continue // with the next one. if (reactions.r[GET_COEFF(spi, ri)] == 0) continue; // sum all the coeff. of the current reaction to compute its // order. int hor = 0; for (int j = 0; j < SPC; j++) { int c = reactions.r[GET_COEFF(j, ri)]; hor += c; // check if ri requires 2 molecules of spi if (j == spi && c == 2) { is_bi_reaction = true; // TODO: replace with branchless code } } max_hor = max(hor, max_hor); } if (is_bi_reaction) max_hor = 3; return max_hor; } __global__ void init_hors(int * hors, reactions reactions, int spc) { unsigned int sbi = blockIdx.x * blockDim.x + threadIdx.x; if (sbi != 0) return; for (int spi = 0; spi < spc; spi++) hors[spi] = HOR(reactions, spi); }
528ba0188be389ccc3f0c60a6e794348bb7e6474.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by lidan on 24/09/2020. // #include <stdio.h> #include <stdlib.h> #include <conio.h> // blockDim gpublockdim IDx block __global__ void what_is_my_id(unsigned int* const block ,unsigned int* const thread , unsigned int* const warp ,unsigned int* const calc_thread ) { const unsigned int thread_idx = (blockIdx.x*blockDim.x) + threadIdx.x ; block[thread_idx] = blockIdx.x ; thread[thread_idx] = threadIdx.x ; warp[thread_idx] = threadIdx.x / warpSize ; calc_thread[thread_idx] = thread_idx ; } #define ARRAY_SIZE 128 #define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int)*(ARRAY_SIZE)) unsigned int cpu_block[ARRAY_SIZE] ; unsigned int cpu_thread[ARRAY_SIZE] ; unsigned int cpu_wrap[ARRAY_SIZE] ; unsigned int cpu_calc_thread[ARRAY_SIZE] ; int main(void) { const unsigned int num_block = 2; const unsigned int num_threads = 64 ; char ch ; unsigned int * gpu_block ; unsigned int * gpu_thread ; unsigned int * gpu_warp ; unsigned int * gpu_calc_thread ; unsigned int i ; hipMalloc((void**)&gpu_block,ARRAY_SIZE_IN_BYTES) ; hipMalloc((void**)&gpu_thread,ARRAY_SIZE_IN_BYTES) ; hipMalloc((void**)&gpu_warp,ARRAY_SIZE_IN_BYTES) ; hipMalloc((void**)&gpu_calc_thread,ARRAY_SIZE_IN_BYTES) ; hipLaunchKernelGGL(( what_is_my_id), dim3(num_block),dim3(num_threads), 0, 0, gpu_block,gpu_thread,gpu_warp,gpu_calc_thread) ; hipMemcpy(cpu_block,gpu_block,ARRAY_SIZE_IN_BYTES,hipMemcpyDeviceToHost) ; hipMemcpy(cpu_thread,gpu_thread,ARRAY_SIZE_IN_BYTES,hipMemcpyDeviceToHost) ; hipMemcpy(cpu_wrap,gpu_warp,ARRAY_SIZE_IN_BYTES,hipMemcpyDeviceToHost) ; hipMemcpy(cpu_calc_thread,gpu_calc_thread,ARRAY_SIZE_IN_BYTES,hipMemcpyDeviceToHost) ; hipFree(gpu_calc_thread) ; hipFree(gpu_thread) ; hipFree(gpu_block) ; hipFree(gpu_warp) ; for(i = 0;i< ARRAY_SIZE;i++) { printf("calculate thread: %3u - Block: %2u - Warp : %2u - Thread %3u \n", cpu_calc_thread[i],cpu_block[i],cpu_wrap[i],cpu_thread[i]) ; } ch = getch() ; }
528ba0188be389ccc3f0c60a6e794348bb7e6474.cu
// // Created by lidan on 24/09/2020. // #include <stdio.h> #include <stdlib.h> #include <conio.h> // blockDim 对应gpublock的dim IDx 这个线程在block中的位置 __global__ void what_is_my_id(unsigned int* const block ,unsigned int* const thread , unsigned int* const warp ,unsigned int* const calc_thread ) { const unsigned int thread_idx = (blockIdx.x*blockDim.x) + threadIdx.x ; block[thread_idx] = blockIdx.x ; thread[thread_idx] = threadIdx.x ; warp[thread_idx] = threadIdx.x / warpSize ; calc_thread[thread_idx] = thread_idx ; } #define ARRAY_SIZE 128 #define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int)*(ARRAY_SIZE)) unsigned int cpu_block[ARRAY_SIZE] ; unsigned int cpu_thread[ARRAY_SIZE] ; unsigned int cpu_wrap[ARRAY_SIZE] ; unsigned int cpu_calc_thread[ARRAY_SIZE] ; int main(void) { const unsigned int num_block = 2; const unsigned int num_threads = 64 ; char ch ; unsigned int * gpu_block ; unsigned int * gpu_thread ; unsigned int * gpu_warp ; unsigned int * gpu_calc_thread ; unsigned int i ; cudaMalloc((void**)&gpu_block,ARRAY_SIZE_IN_BYTES) ; cudaMalloc((void**)&gpu_thread,ARRAY_SIZE_IN_BYTES) ; cudaMalloc((void**)&gpu_warp,ARRAY_SIZE_IN_BYTES) ; cudaMalloc((void**)&gpu_calc_thread,ARRAY_SIZE_IN_BYTES) ; what_is_my_id<<<num_block,num_threads>>>(gpu_block,gpu_thread,gpu_warp,gpu_calc_thread) ; cudaMemcpy(cpu_block,gpu_block,ARRAY_SIZE_IN_BYTES,cudaMemcpyDeviceToHost) ; cudaMemcpy(cpu_thread,gpu_thread,ARRAY_SIZE_IN_BYTES,cudaMemcpyDeviceToHost) ; cudaMemcpy(cpu_wrap,gpu_warp,ARRAY_SIZE_IN_BYTES,cudaMemcpyDeviceToHost) ; cudaMemcpy(cpu_calc_thread,gpu_calc_thread,ARRAY_SIZE_IN_BYTES,cudaMemcpyDeviceToHost) ; cudaFree(gpu_calc_thread) ; cudaFree(gpu_thread) ; cudaFree(gpu_block) ; cudaFree(gpu_warp) ; for(i = 0;i< ARRAY_SIZE;i++) { printf("calculate thread: %3u - Block: %2u - Warp : %2u - Thread %3u \n", cpu_calc_thread[i],cpu_block[i],cpu_wrap[i],cpu_thread[i]) ; } ch = getch() ; }
03fb7851805b97016d3f195259345779f5b33336.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: askeys #include "FIREEnergyMinimizerGPU.cuh" #include "hoomd/TextureTools.h" #include "hoomd/VectorMath.h" #include <assert.h> #include <stdio.h> /*! \file FIREEnergyMinimizerGPU.cu \brief Defines GPU kernel code for one performing one FIRE energy minimization iteration on the GPU. Used by FIREEnergyMinimizerGPU. */ //! Shared memory used in reducing sums extern __shared__ Scalar fire_sdata[]; //! The kernel function to zeros velocities, called by gpu_fire_zero_v() /*! \param d_vel device array of particle velocities \param d_group_members Device array listing the indicies of the mebers of the group to zero \param group_size Number of members in the group */ extern "C" __global__ void gpu_fire_zero_v_kernel(Scalar4 *d_vel, unsigned int *d_group_members, unsigned int group_size) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; // read the particle's velocity (MEM TRANSFER: 32 bytes) Scalar4 vel = d_vel[idx]; // zero the velocity(FLOPS: ?) vel.x = Scalar(0.0); vel.y = Scalar(0.0); vel.z = Scalar(0.0); // write out the results (MEM_TRANSFER: 32 bytes) d_vel[idx] = vel; } } //! The kernel function to zero angular momenta, called by gpu_fire_zero_angmom() extern "C" __global__ void gpu_fire_zero_angmom_kernel(Scalar4 *d_angmom, unsigned int *d_group_members, unsigned int group_size) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; // write out the results (MEM_TRANSFER: 32 bytes) d_angmom[idx] = make_scalar4(0,0,0,0); } } /*! \param d_vel device array of particle velocities \param d_group_members Device array listing the indicies of the mebers of the group to integrate \param group_size Number of members in the group This function is just the driver for gpu_fire_zero_v_kernel(), see that function for details. */ hipError_t gpu_fire_zero_v(Scalar4 *d_vel, unsigned int *d_group_members, unsigned int group_size) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (group_size/block_size) + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel hipLaunchKernelGGL(( gpu_fire_zero_v_kernel), dim3(grid), dim3(threads) , 0, 0, d_vel, d_group_members, group_size); return hipSuccess; } hipError_t gpu_fire_zero_angmom(Scalar4 *d_angmom, unsigned int *d_group_members, unsigned int group_size) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (group_size/block_size) + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel hipLaunchKernelGGL(( gpu_fire_zero_angmom_kernel), dim3(grid), dim3(threads) , 0, 0, d_angmom, d_group_members, group_size); return hipSuccess; } //! Kernel function for reducing the potential energy to a partial sum /*! \param d_group_members Device array listing the indicies of the mebers of the group to sum \param group_size Number of members in the group \param d_net_force Pointer to the force array for all particles \param d_partial_sum_pe Placeholder for the partial sum */ extern "C" __global__ void gpu_fire_reduce_pe_partial_kernel(unsigned int *d_group_members, unsigned int group_size, Scalar4* d_net_force, Scalar* d_partial_sum_pe) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar pe = 0; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; // read the particle's force and extract the pe from w component (MEM TRANSFER: 32 bytes) Scalar4 force = d_net_force[idx]; pe = force.w; // Uncoalesced Memory Read replace by Texture Read above. Scalars4* d_net_force still being passed to support this // defunct structure. //pe = d_net_force[idx].w; } fire_sdata[threadIdx.x] = pe; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) fire_sdata[threadIdx.x] += fire_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) { d_partial_sum_pe[blockIdx.x] = fire_sdata[0]; } } //! Kernel function for reducing a partial sum to a full sum (one value) /*! \param d_sum Placeholder for the sum \param d_partial_sum Array containing the parial sum \param num_blocks Number of blocks to execute */ extern "C" __global__ void gpu_fire_reduce_partial_sum_kernel(Scalar *d_sum, Scalar* d_partial_sum, unsigned int num_blocks) { Scalar sum = Scalar(0.0); // sum up the values in the partial sum via a sliding window for (int start = 0; start < num_blocks; start += blockDim.x) { __syncthreads(); if (start + threadIdx.x < num_blocks) fire_sdata[threadIdx.x] = d_partial_sum[start + threadIdx.x]; else fire_sdata[threadIdx.x] = Scalar(0.0); __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) fire_sdata[threadIdx.x] += fire_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // everybody sums up sum2K sum += fire_sdata[0]; } if (threadIdx.x == 0) *d_sum = sum; } /*! \param d_group_members Device array listing the indicies of the mebers of the group to integrate \param group_size Number of members in the group \param d_net_force Array containing the net forces \param d_sum_pe Placeholder for the sum of the PE \param d_partial_sum_pe Array containing the parial sum of the PE \param block_size The size of one block \param num_blocks Number of blocks to execute This is a driver for gpu_fire_reduce_pe_partial_kernel() and gpu_fire_reduce_partial_sum_kernel(), see them for details */ hipError_t gpu_fire_compute_sum_pe(unsigned int *d_group_members, unsigned int group_size, Scalar4* d_net_force, Scalar* d_sum_pe, Scalar* d_partial_sum_pe, unsigned int block_size, unsigned int num_blocks) { // setup the grid to run the kernel dim3 grid(num_blocks, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel hipLaunchKernelGGL(( gpu_fire_reduce_pe_partial_kernel), dim3(grid), dim3(threads), block_size*sizeof(Scalar) , 0, d_group_members, group_size, d_net_force, d_partial_sum_pe); hipLaunchKernelGGL(( gpu_fire_reduce_partial_sum_kernel), dim3(grid), dim3(threads), block_size*sizeof(Scalar) , 0, d_sum_pe, d_partial_sum_pe, num_blocks); return hipSuccess; } //! Kernel function to compute the partial sum over the P term in the FIRE algorithm /*! \param d_vel particle velocities and masses on the device \param d_accel particle accelerations on the device \param d_group_members Device array listing the indicies of the mebers of the group to integrate \param group_size Number of members in the group \param d_partial_sum_P Array to hold the partial sum */ extern "C" __global__ void gpu_fire_reduce_P_partial_kernel(const Scalar4 *d_vel, const Scalar3 *d_accel, unsigned int *d_group_members, unsigned int group_size, Scalar* d_partial_sum_P) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar P = 0; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; Scalar3 a = d_accel[idx]; Scalar4 v = d_vel[idx]; P = a.x*v.x + a.y*v.y + a.z*v.z; } fire_sdata[threadIdx.x] = P; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) fire_sdata[threadIdx.x] += fire_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) d_partial_sum_P[blockIdx.x] = fire_sdata[0]; } // Angular terms __global__ void gpu_fire_reduce_Pr_partial_kernel(const Scalar4 *d_angmom, const Scalar4 *d_orientation, const Scalar3 *d_inertia, const Scalar4 *d_net_torque, unsigned int *d_group_members, unsigned int group_size, Scalar* d_partial_sum_Pr) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar Pr = 0; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; vec3<Scalar> t(d_net_torque[idx]); quat<Scalar> p(d_angmom[idx]); quat<Scalar> q(d_orientation[idx]); vec3<Scalar> I(d_inertia[idx]); // rotate torque into principal frame t = rotate(conj(q),t); // check for zero moment of inertia bool x_zero, y_zero, z_zero; x_zero = (I.x < EPSILON); y_zero = (I.y < EPSILON); z_zero = (I.z < EPSILON); // ignore torque component along an axis for which the moment of inertia zero if (x_zero) t.x = 0; if (y_zero) t.y = 0; if (z_zero) t.z = 0; // s is the pure imaginary quaternion with im. part equal to true angular velocity vec3<Scalar> s = (Scalar(1./2.) * conj(q) * p).v; // rotational power = torque * angvel Pr = dot(t,s); } fire_sdata[threadIdx.x] = Pr; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) fire_sdata[threadIdx.x] += fire_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) d_partial_sum_Pr[blockIdx.x] = fire_sdata[0]; } // Norm of angular velocity vector __global__ void gpu_fire_reduce_wnorm_partial_kernel(const Scalar4 *d_angmom, const Scalar4 *d_orientation, unsigned int *d_group_members, unsigned int group_size, Scalar* d_partial_sum_w) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar w = 0; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; quat<Scalar> p(d_angmom[idx]); quat<Scalar> q(d_orientation[idx]); vec3<Scalar> s = (Scalar(1./2.) * conj(q) * p).v; w = dot(s,s); } fire_sdata[threadIdx.x] = w; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) fire_sdata[threadIdx.x] += fire_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) d_partial_sum_w[blockIdx.x] = fire_sdata[0]; } //! Kernel function to compute the partial sum over the vsq term in the FIRE algorithm /*! \param d_vel device array of particle velocities \param d_group_members Array listing members of the group \param group_size Number of members in the group \param d_partial_sum_vsq Array to hold the partial sum */ extern "C" __global__ void gpu_fire_reduce_vsq_partial_kernel(const Scalar4 *d_vel, unsigned int *d_group_members, unsigned int group_size, Scalar* d_partial_sum_vsq) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar vsq = 0; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; Scalar4 v = d_vel[idx]; vsq = v.x*v.x + v.y*v.y + v.z*v.z; } fire_sdata[threadIdx.x] = vsq; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) fire_sdata[threadIdx.x] += fire_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) d_partial_sum_vsq[blockIdx.x] = fire_sdata[0]; } //! Kernel function to compute the partial sum over the asq term in the FIRE algorithm /*! \param d_accel device array of particle accelerations \param d_group_members Array listing members of the group \param group_size Number of members in the group \param d_partial_sum_asq Array to hold the partial sum */ extern "C" __global__ void gpu_fire_reduce_asq_partial_kernel(const Scalar3 *d_accel, unsigned int *d_group_members, unsigned int group_size, Scalar* d_partial_sum_asq) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar asq = 0; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; Scalar3 a = d_accel[idx]; asq = a.x*a.x + a.y*a.y + a.z*a.z; } fire_sdata[threadIdx.x] = asq; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) fire_sdata[threadIdx.x] += fire_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) d_partial_sum_asq[blockIdx.x] = fire_sdata[0]; } __global__ void gpu_fire_reduce_tsq_partial_kernel(const Scalar4 *d_net_torque, const Scalar4 *d_orientation, const Scalar3 *d_inertia, unsigned int *d_group_members, unsigned int group_size, Scalar* d_partial_sum_tsq) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar tsq = 0; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; vec3<Scalar> t(d_net_torque[idx]); quat<Scalar> q(d_orientation[idx]); vec3<Scalar> I(d_inertia[idx]); // rotate torque into principal frame t = rotate(conj(q),t); // check for zero moment of inertia bool x_zero, y_zero, z_zero; x_zero = (I.x < EPSILON); y_zero = (I.y < EPSILON); z_zero = (I.z < EPSILON); // ignore torque component along an axis for which the moment of inertia zero if (x_zero) t.x = 0; if (y_zero) t.y = 0; if (z_zero) t.z = 0; tsq = dot(t,t); } fire_sdata[threadIdx.x] = tsq; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) fire_sdata[threadIdx.x] += fire_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) d_partial_sum_tsq[blockIdx.x] = fire_sdata[0]; } /*! \param N number of particles in system \param d_vel array of particle velocities \param d_accel array of particle accelerations \param d_group_members Device array listing the indicies of the mebers of the group to integrate \param group_size Number of members in the group \param d_sum_all Array to hold the sum over P, vsq, and asq \param d_partial_sum_P Array to hold the partial sum over P (a*v) \param d_partial_sum_vsq Array to hold the partial sum over vsq (v*v) \param d_partial_sum_asq Array to hold the partial sum over asq (a*a) \param block_size is the size of one block \param num_blocks is the number of blocks to execute \note Currently the sums are performed consecutively. The efficiency of this function could be improved by computing all three sums simultaneously This is a driver for gpu_fire_reduce_{X}_partial_kernel() (where X = P, vsq, asq) and gpu_fire_reduce_partial_sum_kernel(), see them for details */ hipError_t gpu_fire_compute_sum_all( const unsigned int N, const Scalar4 *d_vel, const Scalar3 *d_accel, unsigned int *d_group_members, unsigned int group_size, Scalar* d_sum_all, Scalar* d_partial_sum_P, Scalar* d_partial_sum_vsq, Scalar* d_partial_sum_asq, unsigned int block_size, unsigned int num_blocks) { // setup the grid to run the kernel dim3 grid(num_blocks, 1, 1); dim3 grid1(1, 1, 1); dim3 threads(block_size, 1, 1); dim3 threads1(256, 1, 1); // run the kernels hipLaunchKernelGGL(( gpu_fire_reduce_P_partial_kernel), dim3(grid), dim3(threads), block_size*sizeof(Scalar) , 0, d_vel, d_accel, d_group_members, group_size, d_partial_sum_P); hipLaunchKernelGGL(( gpu_fire_reduce_partial_sum_kernel), dim3(grid1), dim3(threads1), block_size*sizeof(Scalar) , 0, &d_sum_all[0], d_partial_sum_P, num_blocks); hipLaunchKernelGGL(( gpu_fire_reduce_vsq_partial_kernel), dim3(grid), dim3(threads), block_size*sizeof(Scalar) , 0, d_vel, d_group_members, group_size, d_partial_sum_vsq); hipLaunchKernelGGL(( gpu_fire_reduce_partial_sum_kernel), dim3(grid1), dim3(threads1), block_size*sizeof(Scalar) , 0, &d_sum_all[1], d_partial_sum_vsq, num_blocks); hipLaunchKernelGGL(( gpu_fire_reduce_asq_partial_kernel), dim3(grid), dim3(threads), block_size*sizeof(Scalar) , 0, d_accel, d_group_members, group_size, d_partial_sum_asq); hipLaunchKernelGGL(( gpu_fire_reduce_partial_sum_kernel), dim3(grid1), dim3(threads1), block_size*sizeof(Scalar) , 0, &d_sum_all[2], d_partial_sum_asq, num_blocks); return hipSuccess; } hipError_t gpu_fire_compute_sum_all_angular(const unsigned int N, const Scalar4 *d_orientation, const Scalar3 *d_inertia, const Scalar4 *d_angmom, const Scalar4 *d_net_torque, unsigned int *d_group_members, unsigned int group_size, Scalar* d_sum_all, Scalar* d_partial_sum_Pr, Scalar* d_partial_sum_wnorm, Scalar* d_partial_sum_tsq, unsigned int block_size, unsigned int num_blocks) { // setup the grid to run the kernel dim3 grid(num_blocks, 1, 1); dim3 grid1(1, 1, 1); dim3 threads(block_size, 1, 1); dim3 threads1(256, 1, 1); // run the kernels hipLaunchKernelGGL(( gpu_fire_reduce_Pr_partial_kernel), dim3(grid), dim3(threads), block_size*sizeof(Scalar) , 0, d_angmom, d_orientation, d_inertia, d_net_torque, d_group_members, group_size, d_partial_sum_Pr); hipLaunchKernelGGL(( gpu_fire_reduce_partial_sum_kernel), dim3(grid1), dim3(threads1), block_size*sizeof(Scalar) , 0, &d_sum_all[0], d_partial_sum_Pr, num_blocks); hipLaunchKernelGGL(( gpu_fire_reduce_wnorm_partial_kernel), dim3(grid), dim3(threads), block_size*sizeof(Scalar) , 0, d_angmom, d_orientation, d_group_members, group_size, d_partial_sum_wnorm); hipLaunchKernelGGL(( gpu_fire_reduce_partial_sum_kernel), dim3(grid1), dim3(threads1), block_size*sizeof(Scalar) , 0, &d_sum_all[1], d_partial_sum_wnorm, num_blocks); hipLaunchKernelGGL(( gpu_fire_reduce_tsq_partial_kernel), dim3(grid), dim3(threads), block_size*sizeof(Scalar) , 0, d_net_torque, d_orientation, d_inertia, d_group_members, group_size, d_partial_sum_tsq); hipLaunchKernelGGL(( gpu_fire_reduce_partial_sum_kernel), dim3(grid1), dim3(threads1), block_size*sizeof(Scalar) , 0, &d_sum_all[2], d_partial_sum_tsq, num_blocks); return hipSuccess; } //! Kernel function to update the velocties used by the FIRE algorithm /*! \param d_vel Array of velocities to update \param d_accel Array of accelerations \param d_group_members Device array listing the indicies of the mebers of the group to update \param group_size Number of members in the grou \param alpha Alpha coupling parameter used by the FIRE algorithm \param factor_t Combined factor vnorm/fnorm*alpha, or 1 if fnorm==0 */ extern "C" __global__ void gpu_fire_update_v_kernel(Scalar4 *d_vel, const Scalar3 *d_accel, unsigned int *d_group_members, unsigned int group_size, Scalar alpha, Scalar factor_t) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; // read the particle's velocity and acceleration (MEM TRANSFER: 32 bytes) Scalar4 v = d_vel[idx]; Scalar3 a = d_accel[idx]; v.x = v.x*(Scalar(1.0)-alpha) + a.x*factor_t; v.y = v.y*(Scalar(1.0)-alpha) + a.y*factor_t; v.z = v.z*(Scalar(1.0)-alpha) + a.z*factor_t; // write out the results (MEM_TRANSFER: 32 bytes) d_vel[idx] = v; } } /*! \param d_vel array of particle velocities to update \param d_accel array of particle accelerations \param d_group_members Device array listing the indicies of the mebers of the group to integrate \param group_size Number of members in the group \param alpha Alpha coupling parameter used by the FIRE algorithm \param vnorm Magnitude of the (3*N) dimensional velocity vector \param invfnorm 1 over the magnitude of the (3*N) dimensional force vector This function is a driver for gpu_fire_update_v_kernel(), see it for details. */ hipError_t gpu_fire_update_v(Scalar4 *d_vel, const Scalar3 *d_accel, unsigned int *d_group_members, unsigned int group_size, Scalar alpha, Scalar factor_t) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (group_size/block_size) + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel hipLaunchKernelGGL(( gpu_fire_update_v_kernel), dim3(grid), dim3(threads) , 0, 0, d_vel, d_accel, d_group_members, group_size, alpha, factor_t); return hipSuccess; } __global__ void gpu_fire_update_angmom_kernel(const Scalar4 *d_net_torque, const Scalar4 *d_orientation, const Scalar3 *d_inertia, Scalar4 *d_angmom, unsigned int *d_group_members, unsigned int group_size, Scalar alpha, Scalar factor_r) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; quat<Scalar> q(d_orientation[idx]); vec3<Scalar> t(d_net_torque[idx]); quat<Scalar> p(d_angmom[idx]); vec3<Scalar> I(d_inertia[idx]); // rotate torque into principal frame t = rotate(conj(q),t); // check for zero moment of inertia bool x_zero, y_zero, z_zero; x_zero = (I.x < EPSILON); y_zero = (I.y < EPSILON); z_zero = (I.z < EPSILON); // ignore torque component along an axis for which the moment of inertia zero if (x_zero) t.x = 0; if (y_zero) t.y = 0; if (z_zero) t.z = 0; p = p*Scalar(1.0-alpha) + Scalar(2.0)*q*t*factor_r; d_angmom[idx] = quat_to_scalar4(p); } } hipError_t gpu_fire_update_angmom(const Scalar4 *d_net_torque, const Scalar4 *d_orientation, const Scalar3 *d_inertia, Scalar4 *d_angmom, unsigned int *d_group_members, unsigned int group_size, Scalar alpha, Scalar factor_r) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (group_size/block_size) + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel hipLaunchKernelGGL(( gpu_fire_update_angmom_kernel), dim3(grid), dim3(threads) , 0, 0, d_net_torque, d_orientation, d_inertia, d_angmom, d_group_members, group_size, alpha, factor_r); return hipSuccess; }
03fb7851805b97016d3f195259345779f5b33336.cu
// Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: askeys #include "FIREEnergyMinimizerGPU.cuh" #include "hoomd/TextureTools.h" #include "hoomd/VectorMath.h" #include <assert.h> #include <stdio.h> /*! \file FIREEnergyMinimizerGPU.cu \brief Defines GPU kernel code for one performing one FIRE energy minimization iteration on the GPU. Used by FIREEnergyMinimizerGPU. */ //! Shared memory used in reducing sums extern __shared__ Scalar fire_sdata[]; //! The kernel function to zeros velocities, called by gpu_fire_zero_v() /*! \param d_vel device array of particle velocities \param d_group_members Device array listing the indicies of the mebers of the group to zero \param group_size Number of members in the group */ extern "C" __global__ void gpu_fire_zero_v_kernel(Scalar4 *d_vel, unsigned int *d_group_members, unsigned int group_size) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; // read the particle's velocity (MEM TRANSFER: 32 bytes) Scalar4 vel = d_vel[idx]; // zero the velocity(FLOPS: ?) vel.x = Scalar(0.0); vel.y = Scalar(0.0); vel.z = Scalar(0.0); // write out the results (MEM_TRANSFER: 32 bytes) d_vel[idx] = vel; } } //! The kernel function to zero angular momenta, called by gpu_fire_zero_angmom() extern "C" __global__ void gpu_fire_zero_angmom_kernel(Scalar4 *d_angmom, unsigned int *d_group_members, unsigned int group_size) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; // write out the results (MEM_TRANSFER: 32 bytes) d_angmom[idx] = make_scalar4(0,0,0,0); } } /*! \param d_vel device array of particle velocities \param d_group_members Device array listing the indicies of the mebers of the group to integrate \param group_size Number of members in the group This function is just the driver for gpu_fire_zero_v_kernel(), see that function for details. */ cudaError_t gpu_fire_zero_v(Scalar4 *d_vel, unsigned int *d_group_members, unsigned int group_size) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (group_size/block_size) + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel gpu_fire_zero_v_kernel<<< grid, threads >>>(d_vel, d_group_members, group_size); return cudaSuccess; } cudaError_t gpu_fire_zero_angmom(Scalar4 *d_angmom, unsigned int *d_group_members, unsigned int group_size) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (group_size/block_size) + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel gpu_fire_zero_angmom_kernel<<< grid, threads >>>(d_angmom, d_group_members, group_size); return cudaSuccess; } //! Kernel function for reducing the potential energy to a partial sum /*! \param d_group_members Device array listing the indicies of the mebers of the group to sum \param group_size Number of members in the group \param d_net_force Pointer to the force array for all particles \param d_partial_sum_pe Placeholder for the partial sum */ extern "C" __global__ void gpu_fire_reduce_pe_partial_kernel(unsigned int *d_group_members, unsigned int group_size, Scalar4* d_net_force, Scalar* d_partial_sum_pe) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar pe = 0; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; // read the particle's force and extract the pe from w component (MEM TRANSFER: 32 bytes) Scalar4 force = d_net_force[idx]; pe = force.w; // Uncoalesced Memory Read replace by Texture Read above. Scalars4* d_net_force still being passed to support this // defunct structure. //pe = d_net_force[idx].w; } fire_sdata[threadIdx.x] = pe; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) fire_sdata[threadIdx.x] += fire_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) { d_partial_sum_pe[blockIdx.x] = fire_sdata[0]; } } //! Kernel function for reducing a partial sum to a full sum (one value) /*! \param d_sum Placeholder for the sum \param d_partial_sum Array containing the parial sum \param num_blocks Number of blocks to execute */ extern "C" __global__ void gpu_fire_reduce_partial_sum_kernel(Scalar *d_sum, Scalar* d_partial_sum, unsigned int num_blocks) { Scalar sum = Scalar(0.0); // sum up the values in the partial sum via a sliding window for (int start = 0; start < num_blocks; start += blockDim.x) { __syncthreads(); if (start + threadIdx.x < num_blocks) fire_sdata[threadIdx.x] = d_partial_sum[start + threadIdx.x]; else fire_sdata[threadIdx.x] = Scalar(0.0); __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) fire_sdata[threadIdx.x] += fire_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // everybody sums up sum2K sum += fire_sdata[0]; } if (threadIdx.x == 0) *d_sum = sum; } /*! \param d_group_members Device array listing the indicies of the mebers of the group to integrate \param group_size Number of members in the group \param d_net_force Array containing the net forces \param d_sum_pe Placeholder for the sum of the PE \param d_partial_sum_pe Array containing the parial sum of the PE \param block_size The size of one block \param num_blocks Number of blocks to execute This is a driver for gpu_fire_reduce_pe_partial_kernel() and gpu_fire_reduce_partial_sum_kernel(), see them for details */ cudaError_t gpu_fire_compute_sum_pe(unsigned int *d_group_members, unsigned int group_size, Scalar4* d_net_force, Scalar* d_sum_pe, Scalar* d_partial_sum_pe, unsigned int block_size, unsigned int num_blocks) { // setup the grid to run the kernel dim3 grid(num_blocks, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel gpu_fire_reduce_pe_partial_kernel<<< grid, threads, block_size*sizeof(Scalar) >>>(d_group_members, group_size, d_net_force, d_partial_sum_pe); gpu_fire_reduce_partial_sum_kernel<<< grid, threads, block_size*sizeof(Scalar) >>>(d_sum_pe, d_partial_sum_pe, num_blocks); return cudaSuccess; } //! Kernel function to compute the partial sum over the P term in the FIRE algorithm /*! \param d_vel particle velocities and masses on the device \param d_accel particle accelerations on the device \param d_group_members Device array listing the indicies of the mebers of the group to integrate \param group_size Number of members in the group \param d_partial_sum_P Array to hold the partial sum */ extern "C" __global__ void gpu_fire_reduce_P_partial_kernel(const Scalar4 *d_vel, const Scalar3 *d_accel, unsigned int *d_group_members, unsigned int group_size, Scalar* d_partial_sum_P) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar P = 0; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; Scalar3 a = d_accel[idx]; Scalar4 v = d_vel[idx]; P = a.x*v.x + a.y*v.y + a.z*v.z; } fire_sdata[threadIdx.x] = P; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) fire_sdata[threadIdx.x] += fire_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) d_partial_sum_P[blockIdx.x] = fire_sdata[0]; } // Angular terms __global__ void gpu_fire_reduce_Pr_partial_kernel(const Scalar4 *d_angmom, const Scalar4 *d_orientation, const Scalar3 *d_inertia, const Scalar4 *d_net_torque, unsigned int *d_group_members, unsigned int group_size, Scalar* d_partial_sum_Pr) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar Pr = 0; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; vec3<Scalar> t(d_net_torque[idx]); quat<Scalar> p(d_angmom[idx]); quat<Scalar> q(d_orientation[idx]); vec3<Scalar> I(d_inertia[idx]); // rotate torque into principal frame t = rotate(conj(q),t); // check for zero moment of inertia bool x_zero, y_zero, z_zero; x_zero = (I.x < EPSILON); y_zero = (I.y < EPSILON); z_zero = (I.z < EPSILON); // ignore torque component along an axis for which the moment of inertia zero if (x_zero) t.x = 0; if (y_zero) t.y = 0; if (z_zero) t.z = 0; // s is the pure imaginary quaternion with im. part equal to true angular velocity vec3<Scalar> s = (Scalar(1./2.) * conj(q) * p).v; // rotational power = torque * angvel Pr = dot(t,s); } fire_sdata[threadIdx.x] = Pr; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) fire_sdata[threadIdx.x] += fire_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) d_partial_sum_Pr[blockIdx.x] = fire_sdata[0]; } // Norm of angular velocity vector __global__ void gpu_fire_reduce_wnorm_partial_kernel(const Scalar4 *d_angmom, const Scalar4 *d_orientation, unsigned int *d_group_members, unsigned int group_size, Scalar* d_partial_sum_w) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar w = 0; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; quat<Scalar> p(d_angmom[idx]); quat<Scalar> q(d_orientation[idx]); vec3<Scalar> s = (Scalar(1./2.) * conj(q) * p).v; w = dot(s,s); } fire_sdata[threadIdx.x] = w; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) fire_sdata[threadIdx.x] += fire_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) d_partial_sum_w[blockIdx.x] = fire_sdata[0]; } //! Kernel function to compute the partial sum over the vsq term in the FIRE algorithm /*! \param d_vel device array of particle velocities \param d_group_members Array listing members of the group \param group_size Number of members in the group \param d_partial_sum_vsq Array to hold the partial sum */ extern "C" __global__ void gpu_fire_reduce_vsq_partial_kernel(const Scalar4 *d_vel, unsigned int *d_group_members, unsigned int group_size, Scalar* d_partial_sum_vsq) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar vsq = 0; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; Scalar4 v = d_vel[idx]; vsq = v.x*v.x + v.y*v.y + v.z*v.z; } fire_sdata[threadIdx.x] = vsq; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) fire_sdata[threadIdx.x] += fire_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) d_partial_sum_vsq[blockIdx.x] = fire_sdata[0]; } //! Kernel function to compute the partial sum over the asq term in the FIRE algorithm /*! \param d_accel device array of particle accelerations \param d_group_members Array listing members of the group \param group_size Number of members in the group \param d_partial_sum_asq Array to hold the partial sum */ extern "C" __global__ void gpu_fire_reduce_asq_partial_kernel(const Scalar3 *d_accel, unsigned int *d_group_members, unsigned int group_size, Scalar* d_partial_sum_asq) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar asq = 0; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; Scalar3 a = d_accel[idx]; asq = a.x*a.x + a.y*a.y + a.z*a.z; } fire_sdata[threadIdx.x] = asq; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) fire_sdata[threadIdx.x] += fire_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) d_partial_sum_asq[blockIdx.x] = fire_sdata[0]; } __global__ void gpu_fire_reduce_tsq_partial_kernel(const Scalar4 *d_net_torque, const Scalar4 *d_orientation, const Scalar3 *d_inertia, unsigned int *d_group_members, unsigned int group_size, Scalar* d_partial_sum_tsq) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar tsq = 0; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; vec3<Scalar> t(d_net_torque[idx]); quat<Scalar> q(d_orientation[idx]); vec3<Scalar> I(d_inertia[idx]); // rotate torque into principal frame t = rotate(conj(q),t); // check for zero moment of inertia bool x_zero, y_zero, z_zero; x_zero = (I.x < EPSILON); y_zero = (I.y < EPSILON); z_zero = (I.z < EPSILON); // ignore torque component along an axis for which the moment of inertia zero if (x_zero) t.x = 0; if (y_zero) t.y = 0; if (z_zero) t.z = 0; tsq = dot(t,t); } fire_sdata[threadIdx.x] = tsq; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) fire_sdata[threadIdx.x] += fire_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) d_partial_sum_tsq[blockIdx.x] = fire_sdata[0]; } /*! \param N number of particles in system \param d_vel array of particle velocities \param d_accel array of particle accelerations \param d_group_members Device array listing the indicies of the mebers of the group to integrate \param group_size Number of members in the group \param d_sum_all Array to hold the sum over P, vsq, and asq \param d_partial_sum_P Array to hold the partial sum over P (a*v) \param d_partial_sum_vsq Array to hold the partial sum over vsq (v*v) \param d_partial_sum_asq Array to hold the partial sum over asq (a*a) \param block_size is the size of one block \param num_blocks is the number of blocks to execute \note Currently the sums are performed consecutively. The efficiency of this function could be improved by computing all three sums simultaneously This is a driver for gpu_fire_reduce_{X}_partial_kernel() (where X = P, vsq, asq) and gpu_fire_reduce_partial_sum_kernel(), see them for details */ cudaError_t gpu_fire_compute_sum_all( const unsigned int N, const Scalar4 *d_vel, const Scalar3 *d_accel, unsigned int *d_group_members, unsigned int group_size, Scalar* d_sum_all, Scalar* d_partial_sum_P, Scalar* d_partial_sum_vsq, Scalar* d_partial_sum_asq, unsigned int block_size, unsigned int num_blocks) { // setup the grid to run the kernel dim3 grid(num_blocks, 1, 1); dim3 grid1(1, 1, 1); dim3 threads(block_size, 1, 1); dim3 threads1(256, 1, 1); // run the kernels gpu_fire_reduce_P_partial_kernel<<< grid, threads, block_size*sizeof(Scalar) >>>( d_vel, d_accel, d_group_members, group_size, d_partial_sum_P); gpu_fire_reduce_partial_sum_kernel<<< grid1, threads1, block_size*sizeof(Scalar) >>>(&d_sum_all[0], d_partial_sum_P, num_blocks); gpu_fire_reduce_vsq_partial_kernel<<< grid, threads, block_size*sizeof(Scalar) >>>(d_vel, d_group_members, group_size, d_partial_sum_vsq); gpu_fire_reduce_partial_sum_kernel<<< grid1, threads1, block_size*sizeof(Scalar) >>>(&d_sum_all[1], d_partial_sum_vsq, num_blocks); gpu_fire_reduce_asq_partial_kernel<<< grid, threads, block_size*sizeof(Scalar) >>>(d_accel, d_group_members, group_size, d_partial_sum_asq); gpu_fire_reduce_partial_sum_kernel<<< grid1, threads1, block_size*sizeof(Scalar) >>>(&d_sum_all[2], d_partial_sum_asq, num_blocks); return cudaSuccess; } cudaError_t gpu_fire_compute_sum_all_angular(const unsigned int N, const Scalar4 *d_orientation, const Scalar3 *d_inertia, const Scalar4 *d_angmom, const Scalar4 *d_net_torque, unsigned int *d_group_members, unsigned int group_size, Scalar* d_sum_all, Scalar* d_partial_sum_Pr, Scalar* d_partial_sum_wnorm, Scalar* d_partial_sum_tsq, unsigned int block_size, unsigned int num_blocks) { // setup the grid to run the kernel dim3 grid(num_blocks, 1, 1); dim3 grid1(1, 1, 1); dim3 threads(block_size, 1, 1); dim3 threads1(256, 1, 1); // run the kernels gpu_fire_reduce_Pr_partial_kernel<<< grid, threads, block_size*sizeof(Scalar) >>>( d_angmom, d_orientation, d_inertia, d_net_torque, d_group_members, group_size, d_partial_sum_Pr); gpu_fire_reduce_partial_sum_kernel<<< grid1, threads1, block_size*sizeof(Scalar) >>>(&d_sum_all[0], d_partial_sum_Pr, num_blocks); gpu_fire_reduce_wnorm_partial_kernel<<< grid, threads, block_size*sizeof(Scalar) >>>(d_angmom, d_orientation, d_group_members, group_size, d_partial_sum_wnorm); gpu_fire_reduce_partial_sum_kernel<<< grid1, threads1, block_size*sizeof(Scalar) >>>(&d_sum_all[1], d_partial_sum_wnorm, num_blocks); gpu_fire_reduce_tsq_partial_kernel<<< grid, threads, block_size*sizeof(Scalar) >>>(d_net_torque, d_orientation, d_inertia, d_group_members, group_size, d_partial_sum_tsq); gpu_fire_reduce_partial_sum_kernel<<< grid1, threads1, block_size*sizeof(Scalar) >>>(&d_sum_all[2], d_partial_sum_tsq, num_blocks); return cudaSuccess; } //! Kernel function to update the velocties used by the FIRE algorithm /*! \param d_vel Array of velocities to update \param d_accel Array of accelerations \param d_group_members Device array listing the indicies of the mebers of the group to update \param group_size Number of members in the grou \param alpha Alpha coupling parameter used by the FIRE algorithm \param factor_t Combined factor vnorm/fnorm*alpha, or 1 if fnorm==0 */ extern "C" __global__ void gpu_fire_update_v_kernel(Scalar4 *d_vel, const Scalar3 *d_accel, unsigned int *d_group_members, unsigned int group_size, Scalar alpha, Scalar factor_t) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; // read the particle's velocity and acceleration (MEM TRANSFER: 32 bytes) Scalar4 v = d_vel[idx]; Scalar3 a = d_accel[idx]; v.x = v.x*(Scalar(1.0)-alpha) + a.x*factor_t; v.y = v.y*(Scalar(1.0)-alpha) + a.y*factor_t; v.z = v.z*(Scalar(1.0)-alpha) + a.z*factor_t; // write out the results (MEM_TRANSFER: 32 bytes) d_vel[idx] = v; } } /*! \param d_vel array of particle velocities to update \param d_accel array of particle accelerations \param d_group_members Device array listing the indicies of the mebers of the group to integrate \param group_size Number of members in the group \param alpha Alpha coupling parameter used by the FIRE algorithm \param vnorm Magnitude of the (3*N) dimensional velocity vector \param invfnorm 1 over the magnitude of the (3*N) dimensional force vector This function is a driver for gpu_fire_update_v_kernel(), see it for details. */ cudaError_t gpu_fire_update_v(Scalar4 *d_vel, const Scalar3 *d_accel, unsigned int *d_group_members, unsigned int group_size, Scalar alpha, Scalar factor_t) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (group_size/block_size) + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel gpu_fire_update_v_kernel<<< grid, threads >>>(d_vel, d_accel, d_group_members, group_size, alpha, factor_t); return cudaSuccess; } __global__ void gpu_fire_update_angmom_kernel(const Scalar4 *d_net_torque, const Scalar4 *d_orientation, const Scalar3 *d_inertia, Scalar4 *d_angmom, unsigned int *d_group_members, unsigned int group_size, Scalar alpha, Scalar factor_r) { // determine which particle this thread works on (MEM TRANSFER: 4 bytes) int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx < group_size) { unsigned int idx = d_group_members[group_idx]; quat<Scalar> q(d_orientation[idx]); vec3<Scalar> t(d_net_torque[idx]); quat<Scalar> p(d_angmom[idx]); vec3<Scalar> I(d_inertia[idx]); // rotate torque into principal frame t = rotate(conj(q),t); // check for zero moment of inertia bool x_zero, y_zero, z_zero; x_zero = (I.x < EPSILON); y_zero = (I.y < EPSILON); z_zero = (I.z < EPSILON); // ignore torque component along an axis for which the moment of inertia zero if (x_zero) t.x = 0; if (y_zero) t.y = 0; if (z_zero) t.z = 0; p = p*Scalar(1.0-alpha) + Scalar(2.0)*q*t*factor_r; d_angmom[idx] = quat_to_scalar4(p); } } cudaError_t gpu_fire_update_angmom(const Scalar4 *d_net_torque, const Scalar4 *d_orientation, const Scalar3 *d_inertia, Scalar4 *d_angmom, unsigned int *d_group_members, unsigned int group_size, Scalar alpha, Scalar factor_r) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (group_size/block_size) + 1, 1, 1); dim3 threads(block_size, 1, 1); // run the kernel gpu_fire_update_angmom_kernel<<< grid, threads >>>(d_net_torque, d_orientation, d_inertia, d_angmom, d_group_members, group_size, alpha, factor_r); return cudaSuccess; }
e2d27ad2e7111171952b98edf6397fa3bafff915.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> int main(int argc, char *argv[]){ int *mem,*mem2; int i; size_t avail, total; hipMemGetInfo(&avail, &total); printf( "total available memory: %ld\n" ,avail / 1024 / 1024); long long size=(long)1024*1024*1024*atoi(argv[1]); long long size2=(long)1024*1024*atoi(argv[2]); hipMalloc(&mem,avail-(long)1024*1024*1024*atoi(argv[1])-(long)1024*1024*atoi(argv[2])); //hipMalloc(&mem2,size2); // size_t avail, total; hipMemGetInfo(&avail, &total); printf( "available memory: %ld\n" ,avail / 1024 / 1024); printf("Press Enter key to continue..."); fgetc(stdin); }
e2d27ad2e7111171952b98edf6397fa3bafff915.cu
#include <stdio.h> int main(int argc, char *argv[]){ int *mem,*mem2; int i; size_t avail, total; cudaMemGetInfo(&avail, &total); printf( "total available memory: %ld\n" ,avail / 1024 / 1024); long long size=(long)1024*1024*1024*atoi(argv[1]); long long size2=(long)1024*1024*atoi(argv[2]); cudaMalloc(&mem,avail-(long)1024*1024*1024*atoi(argv[1])-(long)1024*1024*atoi(argv[2])); //cudaMalloc(&mem2,size2); // size_t avail, total; cudaMemGetInfo(&avail, &total); printf( "available memory: %ld\n" ,avail / 1024 / 1024); printf("Press Enter key to continue..."); fgetc(stdin); }
a57403c2541460c76c4aabf9b0ce45a68876ff14.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <GL/glut.h> #include <GL/gl.h> #include <malloc.h> #include <signal.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> /****************************************************************************** nvcc -o image_processing cuda_image_processing.cu -lglut -lGL -lm ******************************************************************************/ #define width 100 #define height 72 unsigned char image[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255, 255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255, 255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,255,255,255,255,255,255,255,255,0,0,255,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255, 255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255, 255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255, 255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0, 0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255, 255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 }; unsigned char results[width * height]; __global__ void detect_edges(unsigned char *in, unsigned char *out) { unsigned int i = (blockIdx.x*72)+ threadIdx.x; int x, y; // the pixel of interest int b, d, f, h; // the pixels adjacent to x,y used for the calculation int r; // the result of calculate y = i / 100; x = i - (100 * y); if (x == 0 || y == 0 || x == width - 1 || y == height - 1) { out[i] = 0; } else { b = i + width; d = i - 1; f = i + 1; h = i - width; r = (in[i] * 4) + (in[b] * -1) + (in[d] * -1) + (in[f] * -1) + (in[h] * -1); if (r > 0) { // if the result is positive this is an edge pixel out[i] = 255; } else { out[i] = 0; } } } void tidy_and_exit() { exit(0); } void sigint_callback(int signal_number){ printf("\nInterrupt from keyboard\n"); tidy_and_exit(); } static void display() { glClear(GL_COLOR_BUFFER_BIT); glRasterPos4i(-1, -1, 0, 1); glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image); glRasterPos4i(0, -1, 0, 1); glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results); glFlush(); } static void key_pressed(unsigned char key, int x, int y) { switch(key){ case 27: // escape tidy_and_exit(); break; default: printf("\nPress escape to exit\n"); break; } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(int argc, char **argv) { unsigned char *image_m, *results_m; hipMalloc((void**)&image_m, sizeof(unsigned char) * (width * height) ); hipMalloc((void**)&results_m, sizeof(unsigned char) * (width * height)); hipMemcpy(image_m, &image, sizeof(unsigned char) * (width * height), hipMemcpyHostToDevice); struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); signal(SIGINT, sigint_callback); printf("image dimensions %dx%d\n", width, height); hipLaunchKernelGGL(( detect_edges), dim3(100),dim3(72), 0, 0, image_m, results_m); hipDeviceSynchronize(); hipMemcpy(&results, results_m, sizeof(unsigned char) * (width * height), hipMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); hipFree(&image_m); hipFree(&results_m); glutInit(&argc, argv); glutInitWindowSize(width * 2,height); glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE); glutCreateWindow("6CS005 Image Progessing Courework"); glutDisplayFunc(display); glutKeyboardFunc(key_pressed); glClearColor(0.0, 1.0, 0.0, 1.0); glutMainLoop(); tidy_and_exit(); return 0; }
a57403c2541460c76c4aabf9b0ce45a68876ff14.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <GL/glut.h> #include <GL/gl.h> #include <malloc.h> #include <signal.h> #include <cuda_runtime_api.h> #include <cuda.h> /****************************************************************************** nvcc -o image_processing cuda_image_processing.cu -lglut -lGL -lm ******************************************************************************/ #define width 100 #define height 72 unsigned char image[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255, 255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255, 255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,255,255,255,255,255,255,255,255,0,0,255,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255, 255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255, 255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255, 255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255, 255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0, 0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255, 255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 }; unsigned char results[width * height]; __global__ void detect_edges(unsigned char *in, unsigned char *out) { unsigned int i = (blockIdx.x*72)+ threadIdx.x; int x, y; // the pixel of interest int b, d, f, h; // the pixels adjacent to x,y used for the calculation int r; // the result of calculate y = i / 100; x = i - (100 * y); if (x == 0 || y == 0 || x == width - 1 || y == height - 1) { out[i] = 0; } else { b = i + width; d = i - 1; f = i + 1; h = i - width; r = (in[i] * 4) + (in[b] * -1) + (in[d] * -1) + (in[f] * -1) + (in[h] * -1); if (r > 0) { // if the result is positive this is an edge pixel out[i] = 255; } else { out[i] = 0; } } } void tidy_and_exit() { exit(0); } void sigint_callback(int signal_number){ printf("\nInterrupt from keyboard\n"); tidy_and_exit(); } static void display() { glClear(GL_COLOR_BUFFER_BIT); glRasterPos4i(-1, -1, 0, 1); glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image); glRasterPos4i(0, -1, 0, 1); glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results); glFlush(); } static void key_pressed(unsigned char key, int x, int y) { switch(key){ case 27: // escape tidy_and_exit(); break; default: printf("\nPress escape to exit\n"); break; } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(int argc, char **argv) { unsigned char *image_m, *results_m; cudaMalloc((void**)&image_m, sizeof(unsigned char) * (width * height) ); cudaMalloc((void**)&results_m, sizeof(unsigned char) * (width * height)); cudaMemcpy(image_m, &image, sizeof(unsigned char) * (width * height), cudaMemcpyHostToDevice); struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); signal(SIGINT, sigint_callback); printf("image dimensions %dx%d\n", width, height); detect_edges<<<100,72>>>(image_m, results_m); cudaThreadSynchronize(); cudaMemcpy(&results, results_m, sizeof(unsigned char) * (width * height), cudaMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); cudaFree(&image_m); cudaFree(&results_m); glutInit(&argc, argv); glutInitWindowSize(width * 2,height); glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE); glutCreateWindow("6CS005 Image Progessing Courework"); glutDisplayFunc(display); glutKeyboardFunc(key_pressed); glClearColor(0.0, 1.0, 0.0, 1.0); glutMainLoop(); tidy_and_exit(); return 0; }
a457c744d59523a31d01e2bb5a65d39e067680c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/dropout_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void DropoutForward(const int n, const Dtype* in, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] * (mask[index] > threshold) * scale; } } template <typename Dtype> void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); if (this->phase_ == TRAIN) { unsigned int* mask = static_cast<unsigned int*>(rand_vec_.mutable_gpu_data()); caffe_gpu_rng_uniform(count, mask); // set thresholds // NOLINT_NEXT_LINE(whitespace/operators) if (scale_train_) { DropoutForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, bottom_data, mask, uint_thres_, scale_, top_data); } else { DropoutForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, bottom_data, mask, uint_thres_, 1.f, top_data); } CUDA_POST_KERNEL_CHECK; } else { caffe_copy(count, bottom_data, top_data); if (!scale_train_) { caffe_gpu_scal<Dtype>(count, 1. / scale_, top_data); } } } template <typename Dtype> __global__ void DropoutBackward(const int n, const Dtype* in_diff, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); } } template <typename Dtype> void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (this->phase_ == TRAIN) { const unsigned int* mask = static_cast<const unsigned int*>(rand_vec_.gpu_data()); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) if (scale_train_) { DropoutBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, top_diff, mask, uint_thres_, scale_, bottom_diff); } else { DropoutBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, top_diff, mask, uint_thres_, 1.f, bottom_diff); } CUDA_POST_KERNEL_CHECK; } else { caffe_copy(top[0]->count(), top_diff, bottom_diff); if (!scale_train_) { caffe_gpu_scal<Dtype>(top[0]->count(), 1. / scale_, bottom_diff); } } } } INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer); } // namespace caffe
a457c744d59523a31d01e2bb5a65d39e067680c4.cu
#include <vector> #include "caffe/layers/dropout_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void DropoutForward(const int n, const Dtype* in, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] * (mask[index] > threshold) * scale; } } template <typename Dtype> void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); if (this->phase_ == TRAIN) { unsigned int* mask = static_cast<unsigned int*>(rand_vec_.mutable_gpu_data()); caffe_gpu_rng_uniform(count, mask); // set thresholds // NOLINT_NEXT_LINE(whitespace/operators) if (scale_train_) { DropoutForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, bottom_data, mask, uint_thres_, scale_, top_data); } else { DropoutForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, bottom_data, mask, uint_thres_, 1.f, top_data); } CUDA_POST_KERNEL_CHECK; } else { caffe_copy(count, bottom_data, top_data); if (!scale_train_) { caffe_gpu_scal<Dtype>(count, 1. / scale_, top_data); } } } template <typename Dtype> __global__ void DropoutBackward(const int n, const Dtype* in_diff, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); } } template <typename Dtype> void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (this->phase_ == TRAIN) { const unsigned int* mask = static_cast<const unsigned int*>(rand_vec_.gpu_data()); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) if (scale_train_) { DropoutBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, top_diff, mask, uint_thres_, scale_, bottom_diff); } else { DropoutBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, top_diff, mask, uint_thres_, 1.f, bottom_diff); } CUDA_POST_KERNEL_CHECK; } else { caffe_copy(top[0]->count(), top_diff, bottom_diff); if (!scale_train_) { caffe_gpu_scal<Dtype>(top[0]->count(), 1. / scale_, bottom_diff); } } } } INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer); } // namespace caffe
d857e2e074d5c05b4b843f03e92ea363da89af5b.hip
// !!! This is a file automatically generated by hipify!!! #include <assert.h> #include <hip/hip_runtime.h> #include <mma.h> #include <hip/hip_runtime_api.h> #include "TensorCoreGemm.cuh" using namespace nvcuda; namespace MNN { namespace CUDA { template<typename T> __global__ void GemmPackedFull(const MatMulParam* param, const int iBlock, T *c, const half *a, const half *b, const T* biasPtr) { size_t eU = param->elhPack[0]; size_t lU = param->elhPack[1]; size_t hU = param->elhPack[2]; size_t maxCount = eU * hU * warpSize; size_t wrapId = threadIdx.x / warpSize; size_t laneId = threadIdx.x % warpSize; extern __shared__ float sharedMemory[]; T* cache = (T*)(sharedMemory + wrapId * 16 * 16); // Declare the fragments wmma::fragment<wmma::matrix_a, 16, 16, 16, half, wmma::row_major> a_frag; wmma::fragment<wmma::matrix_b, 16, 16, 16, half, wmma::col_major> b_frag; wmma::fragment<wmma::accumulator, 16, 16, 16, T> acc_frag; for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) { size_t subIndex = index / warpSize; size_t warpM = subIndex % eU; size_t warpN = subIndex / eU; wmma::load_matrix_sync(acc_frag, biasPtr + 16 * warpN, 0, wmma::mem_row_major); const half* aStart = a + warpM * lU * 16 * 16; const half* bStart = b + warpN * lU * 16 * 16; //printf("GemmPacked: %d - %d - %d, numele: %d, %d\n", eU, lU, hU, a_frag.num_elements, b_frag.num_elements); // MLA for (size_t i = 0; i < lU; ++i) { half* aTemp = ((half *)(aStart+i*256));//aStart + (i << 8) + (laneId << 1); half* bTemp = ((half *)(bStart+i*256));//bStart + (i << 8) + (laneId << 1); wmma::load_matrix_sync(a_frag, aStart + i * 256, 16); wmma::load_matrix_sync(b_frag, bStart + i * 256, 16); wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag); } for(size_t t=0; t<acc_frag.num_elements; t++){ acc_frag.x[t] = max(acc_frag.x[t], param->minValue); acc_frag.x[t] = min(acc_frag.x[t], param->maxValue); } size_t eSta = (warpM + iBlock*eU) * 16; if(eSta >= (size_t)param->elh[0]) { continue; } size_t eEnd = ((eSta + (size_t)16) > (size_t)param->elh[0]) ? (size_t)param->elh[0] : (eSta + (size_t)16); size_t eC = eEnd - eSta; T* dstStart = (T*)(c + warpN * 16 * (size_t)param->elh[0] + eSta * 16); wmma::store_matrix_sync(cache, acc_frag, 16, wmma::mem_row_major); if (warpSize % 16 == 0) { if(sizeof(T) == 4) { size_t r = warpSize / 16; size_t x = laneId / r; size_t ysta = laneId % r; for (size_t y = ysta; y < eC; y+=r) { float value = *((T*)(cache + 16 * y + x)); dstStart[y * 16 + x] = value; } } else { size_t xsta = (laneId % 8) * 2; size_t ysta = laneId / 8; for (size_t y = ysta; y < eC; y+=4) { dstStart[y * 16 + xsta] = *((T*)(cache + 16 * y + xsta)); dstStart[y * 16 + xsta + 1] = *((T*)(cache + 16 * y + xsta + 1)); } } } else { for (size_t tId = laneId; tId < eC * 16; tId += warpSize) { size_t y = tId % eC; size_t x = tId / eC; float value = *((T*)(cache + 16 * y + x)); dstStart[y * 16 + x] = value; } } } } template<typename T> __global__ void GemmPackedFull16x32(const MatMulParam* param, const int iBlock, T *c, const half *a, const half *b, const T* biasPtr) { size_t eU = param->elhPack[0]; size_t lU = param->elhPack[1]; size_t hU = param->elhPack[2]; size_t threadCount = blockDim.x / warpSize; size_t maxCount = eU * hU; size_t wrapId = threadIdx.x / warpSize; size_t laneId = threadIdx.x % warpSize; extern __shared__ float sharedMemory[]; T* cache = (T*)(sharedMemory + wrapId * 16 * 32); for (size_t index = blockIdx.x * threadCount + wrapId; index < maxCount; index += gridDim.x * threadCount) { size_t warpM = index % eU; size_t warpN = index / eU; // Declare the fragments wmma::fragment<wmma::matrix_a, 16, 16, 16, half, wmma::row_major> MA0; wmma::fragment<wmma::matrix_b, 16, 16, 16, half, wmma::col_major> MB0; wmma::fragment<wmma::matrix_b, 16, 16, 16, half, wmma::col_major> MB1; wmma::fragment<wmma::accumulator, 16, 16, 16, T> MC0; wmma::fragment<wmma::accumulator, 16, 16, 16, T> MC1; wmma::load_matrix_sync(MC0, biasPtr + 32 * warpN + 0, 0, wmma::mem_row_major); wmma::load_matrix_sync(MC1, biasPtr + 32 * warpN + 16, 0, wmma::mem_row_major); const half* aStart = a + warpM * lU * 16 * 16; const half* bStart = b + warpN * lU * 16 * 32; //printf("GemmPacked: %d - %d - %d, numele: %d, %d\n", eU, lU, hU, a_frag.num_elements, b_frag.num_elements); // MLA for (size_t i = 0; i < lU; ++i) { wmma::load_matrix_sync(MA0, aStart + i * 256 + 0, 16); wmma::load_matrix_sync(MB0, bStart + i * 512, 16); wmma::load_matrix_sync(MB1, bStart + i * 512 + 256, 16); wmma::mma_sync(MC0, MA0, MB0, MC0); wmma::mma_sync(MC1, MA0, MB1, MC1); } for(size_t t=0; t<MC0.num_elements; t++){ MC0.x[t] = max(MC0.x[t], param->minValue); MC0.x[t] = min(MC0.x[t], param->maxValue); } for(size_t t=0; t<MC1.num_elements; t++){ MC1.x[t] = max(MC1.x[t], param->minValue); MC1.x[t] = min(MC1.x[t], param->maxValue); } size_t eSta = (warpM + iBlock*eU) * 16; if(eSta >= (size_t)param->elh[0]) { continue; } size_t eEnd = ((eSta + (size_t)16) > (size_t)param->elh[0]) ? (size_t)param->elh[0] : (eSta + (size_t)16); size_t eC = eEnd - eSta; T* dst0 = (T*)(c + warpN * 32 * (size_t)param->elh[0] + eSta * 16); T* dst1 = (T*)(c + (warpN * 32 + 16) * (size_t)param->elh[0] + eSta * 16); // First 8x32 wmma::store_matrix_sync(cache, MC0, 16, wmma::mem_row_major); // Second 8x32 wmma::store_matrix_sync(cache + 256, MC1, 16, wmma::mem_row_major); auto dst = dst0; auto src = cache; if (laneId >= 16) { dst = dst1; src = cache + 256; } size_t x = laneId % 16; for (size_t y = 0; y < eC; ++y) { dst[y * 16 + x] = src[y * 16 + x]; } } } template<typename T> __global__ void GemmPackedFull32x16(const MatMulParam* param, const int iBlock, T *c, const half *a, const half *b, const T* biasPtr) { size_t eU = param->elhPack[0]; size_t lU = param->elhPack[1]; size_t hU = param->elhPack[2]; size_t threadCount = blockDim.x / warpSize; size_t maxCount = eU * hU; size_t wrapId = threadIdx.x / warpSize; size_t laneId = threadIdx.x % warpSize; extern __shared__ float sharedMemory[]; T* cache = (T*)(sharedMemory + wrapId * 32 * 16); for (size_t index = blockIdx.x * threadCount + wrapId; index < maxCount; index += gridDim.x * threadCount) { size_t warpN = index % hU; size_t warpM = index / hU; // Declare the fragments wmma::fragment<wmma::matrix_a, 16, 16, 16, half, wmma::row_major> MA0; wmma::fragment<wmma::matrix_a, 16, 16, 16, half, wmma::row_major> MA1; wmma::fragment<wmma::matrix_b, 16, 16, 16, half, wmma::col_major> MB0; wmma::fragment<wmma::accumulator, 16, 16, 16, T> MC0; wmma::fragment<wmma::accumulator, 16, 16, 16, T> MC1; wmma::load_matrix_sync(MC0, biasPtr + 16 * warpN + 0, 0, wmma::mem_row_major); for(size_t t=0; t<MC0.num_elements; t++){ MC1.x[t] = MC0.x[t]; } const half* aStart = a + warpM * lU * 32 * 16; const half* bStart = b + warpN * lU * 16 * 16; //printf("GemmPacked: %d - %d - %d, numele: %d, %d\n", eU, lU, hU, a_frag.num_elements, b_frag.num_elements); // MLA for (size_t i = 0; i < lU; ++i) { wmma::load_matrix_sync(MA0, aStart + i * 512 + 0, 16); wmma::load_matrix_sync(MA1, aStart + i * 512 + 256, 16); wmma::load_matrix_sync(MB0, bStart + i * 256 + 0, 16); wmma::mma_sync(MC0, MA0, MB0, MC0); wmma::mma_sync(MC1, MA1, MB0, MC1); } for(size_t t=0; t<MC0.num_elements; t++){ MC0.x[t] = max(MC0.x[t], param->minValue); MC0.x[t] = min(MC0.x[t], param->maxValue); } for(size_t t=0; t<MC1.num_elements; t++){ MC1.x[t] = max(MC1.x[t], param->minValue); MC1.x[t] = min(MC1.x[t], param->maxValue); } size_t eSta = (warpM + iBlock*eU) * 32; if(eSta >= (size_t)param->elh[0]) { continue; } size_t eEnd = ((eSta + (size_t)16) > (size_t)param->elh[0]) ? (size_t)param->elh[0] : (eSta + (size_t)16); size_t eC = eEnd - eSta; T* dst0 = (T*)(c + warpN * 16 * (size_t)param->elh[0] + eSta * 16); T* dst1 = (T*)(dst0 + 256); // First 8x32 wmma::store_matrix_sync(cache, MC0, 16, wmma::mem_row_major); // Second 8x32 wmma::store_matrix_sync(cache + 256, MC1, 16, wmma::mem_row_major); auto dst = dst0; auto src = cache; if (laneId >= 16) { dst = dst1; src = cache + 256; } size_t x = laneId % 16; for (size_t y = 0; y < eC; ++y) { dst[y * 16 + x] = src[y * 16 + x]; } } } void GemmPackedFullMain(CUDARuntime* runtime, const MatMulParam* cpuParam, const MatMulParam* param, void *c, const half *a, const half *b, const half* biasPtr, int bytes, int iBlock) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; // MNN_PRINT("%d: %d - %d - %d - %d\n", iBlock, cpuParam->elhPack[0], cpuParam->elhPack[1], cpuParam->elhPack[2], cpuParam->elh[0]); { int maxThreadInWarp = UP_DIV(cpuParam->elhPack[0] * cpuParam->elhPack[2], cores); int threads_num = ::min(prop.maxThreadsPerBlock, maxThreadInWarp * prop.warpSize); int basicMemory = 16 * 16 * sizeof(float) * prop.maxThreadsPerBlock / prop.warpSize; if (4 == bytes) { hipFuncSetAttribute(GemmPackedFull<float>, hipFuncAttributeMaxDynamicSharedMemorySize, prop.sharedMemPerMultiprocessor); hipLaunchKernelGGL(( GemmPackedFull), dim3(cores), dim3(threads_num), basicMemory, 0, param, iBlock, (float*)c, a, b, (float*)biasPtr); checkKernelErrors; } else { //MNN_PRINT("%d - %d, %d- %d\n", cpuParam->elhPack[0], cpuParam->elhPack[2], cpuParam->elh[0], cpuParam->elh[2]); hipFuncSetAttribute(GemmPackedFull<half>, hipFuncAttributeMaxDynamicSharedMemorySize, prop.sharedMemPerMultiprocessor); hipLaunchKernelGGL(( GemmPackedFull), dim3(cores), dim3(threads_num), basicMemory, 0, param, iBlock, (half*)c, a, b, (half*)biasPtr); checkKernelErrors; } } } void GemmPacked16x32(CUDARuntime* runtime, const MatMulParam* cpuParam, const MatMulParam* param, void *c, const half *a, const half *b, const half* biasPtr, int bytes, int iBlock) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; // MNN_PRINT("%d - %d - %d\n", cpuParam->elhPack[0], cpuParam->elhPack[1], cpuParam->elhPack[2]); { int hUP = cpuParam->elhPack[2]; int maxThreadInWarp = UP_DIV(cpuParam->elhPack[0] * hUP, cores); int threads_num = ALIMIN(512, maxThreadInWarp * prop.warpSize); //MNN_PRINT("GemmPacked16x32%d-%d-%d-%d-%d\n\n", hUP, cpuParam->elhPack[0], cpuParam->elhPack[2], cpuParam->elhPack[0]*cpuParam->elhPack[2], threads_num); threads_num = ALIMIN(prop.maxThreadsPerBlock, threads_num); int basicMemory = 32 * 16 * sizeof(float) * (threads_num / prop.warpSize); if (4 == bytes) { hipFuncSetAttribute(GemmPackedFull16x32<float>, hipFuncAttributeMaxDynamicSharedMemorySize, basicMemory); hipLaunchKernelGGL(( GemmPackedFull16x32), dim3(cores), dim3(threads_num), basicMemory, 0, param, iBlock, (float*)c, a, b, (float*)biasPtr); checkKernelErrors; } else { hipFuncSetAttribute(GemmPackedFull16x32<half>, hipFuncAttributeMaxDynamicSharedMemorySize, basicMemory); hipLaunchKernelGGL(( GemmPackedFull16x32), dim3(cores), dim3(threads_num), basicMemory, 0, param, iBlock, (half*)c, a, b, (half*)biasPtr); checkKernelErrors; } } } void GemmPacked32x16(CUDARuntime* runtime, const MatMulParam* cpuParam, const MatMulParam* param, void *c, const half *a, const half *b, const half* biasPtr, int bytes, int iBlock) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; // MNN_PRINT("%d - %d - %d\n", cpuParam->elhPack[0], cpuParam->elhPack[1], cpuParam->elhPack[2]); { int eUP = cpuParam->elhPack[0]; int maxThreadInWarp = UP_DIV(eUP * cpuParam->elhPack[2], cores); int threads_num = ALIMIN(512, maxThreadInWarp * prop.warpSize); //MNN_PRINT("GemmPacked32x16%d-%d-%d-%d-%d\n\n", eUP, cpuParam->elhPack[0], cpuParam->elhPack[2], cpuParam->elhPack[0]*cpuParam->elhPack[2], threads_num); threads_num = ALIMIN(prop.maxThreadsPerBlock, threads_num); int basicMemory = 32 * 16 * sizeof(float) * (threads_num / prop.warpSize); if (4 == bytes) { hipFuncSetAttribute(GemmPackedFull32x16<float>, hipFuncAttributeMaxDynamicSharedMemorySize, basicMemory); hipLaunchKernelGGL(( GemmPackedFull32x16), dim3(cores), dim3(threads_num), basicMemory, 0, param, iBlock, (float*)c, a, b, (float*)biasPtr); checkKernelErrors; } else { hipFuncSetAttribute(GemmPackedFull32x16<half>, hipFuncAttributeMaxDynamicSharedMemorySize, basicMemory); hipLaunchKernelGGL(( GemmPackedFull32x16), dim3(cores), dim3(threads_num), basicMemory, 0, param, iBlock, (half*)c, a, b, (half*)biasPtr); checkKernelErrors; } } } } }
d857e2e074d5c05b4b843f03e92ea363da89af5b.cu
#include <assert.h> #include <cuda.h> #include <mma.h> #include <cuda_runtime_api.h> #include "TensorCoreGemm.cuh" using namespace nvcuda; namespace MNN { namespace CUDA { template<typename T> __global__ void GemmPackedFull(const MatMulParam* param, const int iBlock, T *c, const half *a, const half *b, const T* biasPtr) { size_t eU = param->elhPack[0]; size_t lU = param->elhPack[1]; size_t hU = param->elhPack[2]; size_t maxCount = eU * hU * warpSize; size_t wrapId = threadIdx.x / warpSize; size_t laneId = threadIdx.x % warpSize; extern __shared__ float sharedMemory[]; T* cache = (T*)(sharedMemory + wrapId * 16 * 16); // Declare the fragments wmma::fragment<wmma::matrix_a, 16, 16, 16, half, wmma::row_major> a_frag; wmma::fragment<wmma::matrix_b, 16, 16, 16, half, wmma::col_major> b_frag; wmma::fragment<wmma::accumulator, 16, 16, 16, T> acc_frag; for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) { size_t subIndex = index / warpSize; size_t warpM = subIndex % eU; size_t warpN = subIndex / eU; wmma::load_matrix_sync(acc_frag, biasPtr + 16 * warpN, 0, wmma::mem_row_major); const half* aStart = a + warpM * lU * 16 * 16; const half* bStart = b + warpN * lU * 16 * 16; //printf("GemmPacked: %d - %d - %d, numele: %d, %d\n", eU, lU, hU, a_frag.num_elements, b_frag.num_elements); // MLA for (size_t i = 0; i < lU; ++i) { half* aTemp = ((half *)(aStart+i*256));//aStart + (i << 8) + (laneId << 1); half* bTemp = ((half *)(bStart+i*256));//bStart + (i << 8) + (laneId << 1); wmma::load_matrix_sync(a_frag, aStart + i * 256, 16); wmma::load_matrix_sync(b_frag, bStart + i * 256, 16); wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag); } for(size_t t=0; t<acc_frag.num_elements; t++){ acc_frag.x[t] = max(acc_frag.x[t], param->minValue); acc_frag.x[t] = min(acc_frag.x[t], param->maxValue); } size_t eSta = (warpM + iBlock*eU) * 16; if(eSta >= (size_t)param->elh[0]) { continue; } size_t eEnd = ((eSta + (size_t)16) > (size_t)param->elh[0]) ? (size_t)param->elh[0] : (eSta + (size_t)16); size_t eC = eEnd - eSta; T* dstStart = (T*)(c + warpN * 16 * (size_t)param->elh[0] + eSta * 16); wmma::store_matrix_sync(cache, acc_frag, 16, wmma::mem_row_major); if (warpSize % 16 == 0) { if(sizeof(T) == 4) { size_t r = warpSize / 16; size_t x = laneId / r; size_t ysta = laneId % r; for (size_t y = ysta; y < eC; y+=r) { float value = *((T*)(cache + 16 * y + x)); dstStart[y * 16 + x] = value; } } else { size_t xsta = (laneId % 8) * 2; size_t ysta = laneId / 8; for (size_t y = ysta; y < eC; y+=4) { dstStart[y * 16 + xsta] = *((T*)(cache + 16 * y + xsta)); dstStart[y * 16 + xsta + 1] = *((T*)(cache + 16 * y + xsta + 1)); } } } else { for (size_t tId = laneId; tId < eC * 16; tId += warpSize) { size_t y = tId % eC; size_t x = tId / eC; float value = *((T*)(cache + 16 * y + x)); dstStart[y * 16 + x] = value; } } } } template<typename T> __global__ void GemmPackedFull16x32(const MatMulParam* param, const int iBlock, T *c, const half *a, const half *b, const T* biasPtr) { size_t eU = param->elhPack[0]; size_t lU = param->elhPack[1]; size_t hU = param->elhPack[2]; size_t threadCount = blockDim.x / warpSize; size_t maxCount = eU * hU; size_t wrapId = threadIdx.x / warpSize; size_t laneId = threadIdx.x % warpSize; extern __shared__ float sharedMemory[]; T* cache = (T*)(sharedMemory + wrapId * 16 * 32); for (size_t index = blockIdx.x * threadCount + wrapId; index < maxCount; index += gridDim.x * threadCount) { size_t warpM = index % eU; size_t warpN = index / eU; // Declare the fragments wmma::fragment<wmma::matrix_a, 16, 16, 16, half, wmma::row_major> MA0; wmma::fragment<wmma::matrix_b, 16, 16, 16, half, wmma::col_major> MB0; wmma::fragment<wmma::matrix_b, 16, 16, 16, half, wmma::col_major> MB1; wmma::fragment<wmma::accumulator, 16, 16, 16, T> MC0; wmma::fragment<wmma::accumulator, 16, 16, 16, T> MC1; wmma::load_matrix_sync(MC0, biasPtr + 32 * warpN + 0, 0, wmma::mem_row_major); wmma::load_matrix_sync(MC1, biasPtr + 32 * warpN + 16, 0, wmma::mem_row_major); const half* aStart = a + warpM * lU * 16 * 16; const half* bStart = b + warpN * lU * 16 * 32; //printf("GemmPacked: %d - %d - %d, numele: %d, %d\n", eU, lU, hU, a_frag.num_elements, b_frag.num_elements); // MLA for (size_t i = 0; i < lU; ++i) { wmma::load_matrix_sync(MA0, aStart + i * 256 + 0, 16); wmma::load_matrix_sync(MB0, bStart + i * 512, 16); wmma::load_matrix_sync(MB1, bStart + i * 512 + 256, 16); wmma::mma_sync(MC0, MA0, MB0, MC0); wmma::mma_sync(MC1, MA0, MB1, MC1); } for(size_t t=0; t<MC0.num_elements; t++){ MC0.x[t] = max(MC0.x[t], param->minValue); MC0.x[t] = min(MC0.x[t], param->maxValue); } for(size_t t=0; t<MC1.num_elements; t++){ MC1.x[t] = max(MC1.x[t], param->minValue); MC1.x[t] = min(MC1.x[t], param->maxValue); } size_t eSta = (warpM + iBlock*eU) * 16; if(eSta >= (size_t)param->elh[0]) { continue; } size_t eEnd = ((eSta + (size_t)16) > (size_t)param->elh[0]) ? (size_t)param->elh[0] : (eSta + (size_t)16); size_t eC = eEnd - eSta; T* dst0 = (T*)(c + warpN * 32 * (size_t)param->elh[0] + eSta * 16); T* dst1 = (T*)(c + (warpN * 32 + 16) * (size_t)param->elh[0] + eSta * 16); // First 8x32 wmma::store_matrix_sync(cache, MC0, 16, wmma::mem_row_major); // Second 8x32 wmma::store_matrix_sync(cache + 256, MC1, 16, wmma::mem_row_major); auto dst = dst0; auto src = cache; if (laneId >= 16) { dst = dst1; src = cache + 256; } size_t x = laneId % 16; for (size_t y = 0; y < eC; ++y) { dst[y * 16 + x] = src[y * 16 + x]; } } } template<typename T> __global__ void GemmPackedFull32x16(const MatMulParam* param, const int iBlock, T *c, const half *a, const half *b, const T* biasPtr) { size_t eU = param->elhPack[0]; size_t lU = param->elhPack[1]; size_t hU = param->elhPack[2]; size_t threadCount = blockDim.x / warpSize; size_t maxCount = eU * hU; size_t wrapId = threadIdx.x / warpSize; size_t laneId = threadIdx.x % warpSize; extern __shared__ float sharedMemory[]; T* cache = (T*)(sharedMemory + wrapId * 32 * 16); for (size_t index = blockIdx.x * threadCount + wrapId; index < maxCount; index += gridDim.x * threadCount) { size_t warpN = index % hU; size_t warpM = index / hU; // Declare the fragments wmma::fragment<wmma::matrix_a, 16, 16, 16, half, wmma::row_major> MA0; wmma::fragment<wmma::matrix_a, 16, 16, 16, half, wmma::row_major> MA1; wmma::fragment<wmma::matrix_b, 16, 16, 16, half, wmma::col_major> MB0; wmma::fragment<wmma::accumulator, 16, 16, 16, T> MC0; wmma::fragment<wmma::accumulator, 16, 16, 16, T> MC1; wmma::load_matrix_sync(MC0, biasPtr + 16 * warpN + 0, 0, wmma::mem_row_major); for(size_t t=0; t<MC0.num_elements; t++){ MC1.x[t] = MC0.x[t]; } const half* aStart = a + warpM * lU * 32 * 16; const half* bStart = b + warpN * lU * 16 * 16; //printf("GemmPacked: %d - %d - %d, numele: %d, %d\n", eU, lU, hU, a_frag.num_elements, b_frag.num_elements); // MLA for (size_t i = 0; i < lU; ++i) { wmma::load_matrix_sync(MA0, aStart + i * 512 + 0, 16); wmma::load_matrix_sync(MA1, aStart + i * 512 + 256, 16); wmma::load_matrix_sync(MB0, bStart + i * 256 + 0, 16); wmma::mma_sync(MC0, MA0, MB0, MC0); wmma::mma_sync(MC1, MA1, MB0, MC1); } for(size_t t=0; t<MC0.num_elements; t++){ MC0.x[t] = max(MC0.x[t], param->minValue); MC0.x[t] = min(MC0.x[t], param->maxValue); } for(size_t t=0; t<MC1.num_elements; t++){ MC1.x[t] = max(MC1.x[t], param->minValue); MC1.x[t] = min(MC1.x[t], param->maxValue); } size_t eSta = (warpM + iBlock*eU) * 32; if(eSta >= (size_t)param->elh[0]) { continue; } size_t eEnd = ((eSta + (size_t)16) > (size_t)param->elh[0]) ? (size_t)param->elh[0] : (eSta + (size_t)16); size_t eC = eEnd - eSta; T* dst0 = (T*)(c + warpN * 16 * (size_t)param->elh[0] + eSta * 16); T* dst1 = (T*)(dst0 + 256); // First 8x32 wmma::store_matrix_sync(cache, MC0, 16, wmma::mem_row_major); // Second 8x32 wmma::store_matrix_sync(cache + 256, MC1, 16, wmma::mem_row_major); auto dst = dst0; auto src = cache; if (laneId >= 16) { dst = dst1; src = cache + 256; } size_t x = laneId % 16; for (size_t y = 0; y < eC; ++y) { dst[y * 16 + x] = src[y * 16 + x]; } } } void GemmPackedFullMain(CUDARuntime* runtime, const MatMulParam* cpuParam, const MatMulParam* param, void *c, const half *a, const half *b, const half* biasPtr, int bytes, int iBlock) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; // MNN_PRINT("%d: %d - %d - %d - %d\n", iBlock, cpuParam->elhPack[0], cpuParam->elhPack[1], cpuParam->elhPack[2], cpuParam->elh[0]); { int maxThreadInWarp = UP_DIV(cpuParam->elhPack[0] * cpuParam->elhPack[2], cores); int threads_num = std::min(prop.maxThreadsPerBlock, maxThreadInWarp * prop.warpSize); int basicMemory = 16 * 16 * sizeof(float) * prop.maxThreadsPerBlock / prop.warpSize; if (4 == bytes) { cudaFuncSetAttribute(GemmPackedFull<float>, cudaFuncAttributeMaxDynamicSharedMemorySize, prop.sharedMemPerMultiprocessor); GemmPackedFull<<<cores, threads_num, basicMemory>>>(param, iBlock, (float*)c, a, b, (float*)biasPtr); checkKernelErrors; } else { //MNN_PRINT("%d - %d, %d- %d\n", cpuParam->elhPack[0], cpuParam->elhPack[2], cpuParam->elh[0], cpuParam->elh[2]); cudaFuncSetAttribute(GemmPackedFull<half>, cudaFuncAttributeMaxDynamicSharedMemorySize, prop.sharedMemPerMultiprocessor); GemmPackedFull<<<cores, threads_num, basicMemory>>>(param, iBlock, (half*)c, a, b, (half*)biasPtr); checkKernelErrors; } } } void GemmPacked16x32(CUDARuntime* runtime, const MatMulParam* cpuParam, const MatMulParam* param, void *c, const half *a, const half *b, const half* biasPtr, int bytes, int iBlock) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; // MNN_PRINT("%d - %d - %d\n", cpuParam->elhPack[0], cpuParam->elhPack[1], cpuParam->elhPack[2]); { int hUP = cpuParam->elhPack[2]; int maxThreadInWarp = UP_DIV(cpuParam->elhPack[0] * hUP, cores); int threads_num = ALIMIN(512, maxThreadInWarp * prop.warpSize); //MNN_PRINT("GemmPacked16x32:%d-%d-%d-%d-%d\n\n", hUP, cpuParam->elhPack[0], cpuParam->elhPack[2], cpuParam->elhPack[0]*cpuParam->elhPack[2], threads_num); threads_num = ALIMIN(prop.maxThreadsPerBlock, threads_num); int basicMemory = 32 * 16 * sizeof(float) * (threads_num / prop.warpSize); if (4 == bytes) { cudaFuncSetAttribute(GemmPackedFull16x32<float>, cudaFuncAttributeMaxDynamicSharedMemorySize, basicMemory); GemmPackedFull16x32<<<cores, threads_num, basicMemory>>>(param, iBlock, (float*)c, a, b, (float*)biasPtr); checkKernelErrors; } else { cudaFuncSetAttribute(GemmPackedFull16x32<half>, cudaFuncAttributeMaxDynamicSharedMemorySize, basicMemory); GemmPackedFull16x32<<<cores, threads_num, basicMemory>>>(param, iBlock, (half*)c, a, b, (half*)biasPtr); checkKernelErrors; } } } void GemmPacked32x16(CUDARuntime* runtime, const MatMulParam* cpuParam, const MatMulParam* param, void *c, const half *a, const half *b, const half* biasPtr, int bytes, int iBlock) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; // MNN_PRINT("%d - %d - %d\n", cpuParam->elhPack[0], cpuParam->elhPack[1], cpuParam->elhPack[2]); { int eUP = cpuParam->elhPack[0]; int maxThreadInWarp = UP_DIV(eUP * cpuParam->elhPack[2], cores); int threads_num = ALIMIN(512, maxThreadInWarp * prop.warpSize); //MNN_PRINT("GemmPacked32x16:%d-%d-%d-%d-%d\n\n", eUP, cpuParam->elhPack[0], cpuParam->elhPack[2], cpuParam->elhPack[0]*cpuParam->elhPack[2], threads_num); threads_num = ALIMIN(prop.maxThreadsPerBlock, threads_num); int basicMemory = 32 * 16 * sizeof(float) * (threads_num / prop.warpSize); if (4 == bytes) { cudaFuncSetAttribute(GemmPackedFull32x16<float>, cudaFuncAttributeMaxDynamicSharedMemorySize, basicMemory); GemmPackedFull32x16<<<cores, threads_num, basicMemory>>>(param, iBlock, (float*)c, a, b, (float*)biasPtr); checkKernelErrors; } else { cudaFuncSetAttribute(GemmPackedFull32x16<half>, cudaFuncAttributeMaxDynamicSharedMemorySize, basicMemory); GemmPackedFull32x16<<<cores, threads_num, basicMemory>>>(param, iBlock, (half*)c, a, b, (half*)biasPtr); checkKernelErrors; } } } } }
3057d13ebc885d34905de80f63cfac601a444847.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include "GoLgeneric.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <cuda_device_runtime_api.h> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> #define getl(X,Y) local[((X)+1) + (blockDim.x+2) * ((Y)+1)] __global__ void cuda_kernel(int * src, int * dst, size_t width, size_t height) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; dim3 li(threadIdx.x, threadIdx.y); extern __shared__ int local[]; //Load own if (idx < width && idy < height) { getl(li.x, li.y) = get_rm(src, idx, idy); size_t idxm1 = (size_t) (idx == 0) * (width - 1) + (size_t) (idx > 0) * (idx - 1); size_t idxp1 = (size_t) (idx + 1 < width) * (idx + 1); size_t idym1 = (size_t) (idy == 0) * (height - 1) + (size_t) (idy > 0) * (idy - 1); size_t idyp1 = (size_t) (idy + 1 < height) * (idy + 1); if (li.x == 0) //Left edge getl(-1, li.y) = get_rm(src, idxm1, idy); if (li.y == 0) //Upper edge getl(li.x, -1) = get_rm(src, idx, idym1); if (li.x == 0 && li.y == 0) //Upper left corner getl(-1, -1) = get_rm(src, idxm1, idym1); if (li.x == blockDim.x - 1 || idx == width - 1) // right edge getl(li.x + 1, li.y) = get_rm(src, idxp1, idy); if (li.y == blockDim.y - 1 || idy == height - 1) //bottom edge getl(li.x, li.y + 1) = get_rm(src, idx, idyp1); if ((li.y == blockDim.y - 1 || idy == height - 1) && li.x == 0) // lower left corner getl(li.x - 1, li.y + 1) = get_rm(src, idxp1, idy); if ((li.x == blockDim.x - 1 || idx == width - 1) || idy == 0) //upper right corner getl(li.x + 1, li.y - 1) = get_rm(src, idx, idyp1); if ((li.y == blockDim.y - 1 || idy == height - 1) && (li.x == blockDim.x - 1 || idx == width - 1)) //lower right corner getl(li.x + 1, li.y + 1) = get_rm(src, idxp1, idyp1); } __syncthreads(); if (idx < width && idy < height) { //If we are not a edge int acc = 0; acc += getl(li.x - 1, li.y + 1); acc += getl(li.x - 1, li.y + 0); acc += getl(li.x - 1, li.y - 1); acc += getl(li.x - 0, li.y + 1); // acc += getl(li.x - 0, li.y + 0); acc += getl(li.x - 0, li.y - 1); acc += getl(li.x + 1, li.y + 1); acc += getl(li.x + 1, li.y + 0); acc += getl(li.x + 1, li.y - 1); //acc = 2 : x * 1 + 0 //acc = 3 : x * 0 + 1 //acc = ? : x * 0 + 0 get_rm(dst, idx, idy) = getl(li.x, li.y) * (acc==2) + (acc==3); } }
3057d13ebc885d34905de80f63cfac601a444847.cu
#include <stdlib.h> #include "GoLgeneric.h" #include <cuda.h> #include <cuda_runtime.h> #include <cuda_device_runtime_api.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #define getl(X,Y) local[((X)+1) + (blockDim.x+2) * ((Y)+1)] __global__ void cuda_kernel(int * src, int * dst, size_t width, size_t height) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; dim3 li(threadIdx.x, threadIdx.y); extern __shared__ int local[]; //Load own if (idx < width && idy < height) { getl(li.x, li.y) = get_rm(src, idx, idy); size_t idxm1 = (size_t) (idx == 0) * (width - 1) + (size_t) (idx > 0) * (idx - 1); size_t idxp1 = (size_t) (idx + 1 < width) * (idx + 1); size_t idym1 = (size_t) (idy == 0) * (height - 1) + (size_t) (idy > 0) * (idy - 1); size_t idyp1 = (size_t) (idy + 1 < height) * (idy + 1); if (li.x == 0) //Left edge getl(-1, li.y) = get_rm(src, idxm1, idy); if (li.y == 0) //Upper edge getl(li.x, -1) = get_rm(src, idx, idym1); if (li.x == 0 && li.y == 0) //Upper left corner getl(-1, -1) = get_rm(src, idxm1, idym1); if (li.x == blockDim.x - 1 || idx == width - 1) // right edge getl(li.x + 1, li.y) = get_rm(src, idxp1, idy); if (li.y == blockDim.y - 1 || idy == height - 1) //bottom edge getl(li.x, li.y + 1) = get_rm(src, idx, idyp1); if ((li.y == blockDim.y - 1 || idy == height - 1) && li.x == 0) // lower left corner getl(li.x - 1, li.y + 1) = get_rm(src, idxp1, idy); if ((li.x == blockDim.x - 1 || idx == width - 1) || idy == 0) //upper right corner getl(li.x + 1, li.y - 1) = get_rm(src, idx, idyp1); if ((li.y == blockDim.y - 1 || idy == height - 1) && (li.x == blockDim.x - 1 || idx == width - 1)) //lower right corner getl(li.x + 1, li.y + 1) = get_rm(src, idxp1, idyp1); } __syncthreads(); if (idx < width && idy < height) { //If we are not a edge int acc = 0; acc += getl(li.x - 1, li.y + 1); acc += getl(li.x - 1, li.y + 0); acc += getl(li.x - 1, li.y - 1); acc += getl(li.x - 0, li.y + 1); // acc += getl(li.x - 0, li.y + 0); acc += getl(li.x - 0, li.y - 1); acc += getl(li.x + 1, li.y + 1); acc += getl(li.x + 1, li.y + 0); acc += getl(li.x + 1, li.y - 1); //acc = 2 : x * 1 + 0 //acc = 3 : x * 0 + 1 //acc = ? : x * 0 + 0 get_rm(dst, idx, idy) = getl(li.x, li.y) * (acc==2) + (acc==3); } }
6d01750d1011615404412d92240a6f56860041e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdint.h> #include <stdio.h> #include <assert.h> #include "test_buffer.h" const int kernel_radius = 9; const int kernel_area = (kernel_radius*2+1)*(kernel_radius*2+1); const int width = 6400, height = 4800; #ifdef _MSC_VER #define ALWAYS_INLINE __forceinline #else #define ALWAYS_INLINE __attribute__((always_inline)) #endif template <typename T, bool static_addr=false> ALWAYS_INLINE inline __device__ T& write_pixel(buffer_t buf, int x, int y) { T *data = (T*)buf.dev; int x_min, y_min, x_stride, y_stride; if (static_addr) { assert(buf.stride[0] == 1); assert(buf.stride[1] > 6300 && buf.stride[1] < 6500); x_min = buf.min[0]; y_min = buf.min[1]; x_stride = 1; y_stride = buf.stride[1]; } else { x_min = 0; y_min = 0; x_stride = 1; y_stride = width; } int x_offset = (x - x_min) * x_stride; int y_offset = (y - y_min) * y_stride; return data[x_offset + y_offset]; } template <typename T, bool static_addr=false> ALWAYS_INLINE inline const __device__ T read_pixel(const buffer_t buf, int x, int y) { const T *data = (const T*)buf.dev; int x_min, y_min, x_stride, y_stride; if (static_addr) { assert(buf.stride[0] == 1); assert(buf.stride[1] > 6300 && buf.stride[1] < 6500); x_min = buf.min[0]; y_min = buf.min[1]; x_stride = 1; y_stride = buf.stride[1]; } else { x_min = -kernel_radius; y_min = -kernel_radius; x_stride = 1; y_stride = width+2*kernel_radius; } int x_offset = (x - x_min) * x_stride; int y_offset = (y - y_min) * y_stride; return __ldg(data + x_offset + y_offset); } __global__ void boxBlurBuf(const buffer_t in, buffer_t out) { int x = blockIdx.x * blockDim.x + threadIdx.x + out.min[0]; int y = blockIdx.y * blockDim.y + threadIdx.y + out.min[1]; #if 0 if (x < out.min[0] || y < out.min[1] || x >= out.extent[0] || y >= out.extent[1]) { return; } #endif float res = 0; for (int j = -kernel_radius; j <= kernel_radius; j++) { for (int i = -kernel_radius; i <= kernel_radius; i++) { res += read_pixel<float>(in, x+i, y+j); } } res /= float(kernel_area); write_pixel<float>(out, x,y) = res; } __global__ void boxBlurBufStatic(const buffer_t in, buffer_t out) { int x = blockIdx.x * blockDim.x + threadIdx.x + out.min[0]; int y = blockIdx.y * blockDim.y + threadIdx.y + out.min[1]; float res = 0; for (int j = -kernel_radius; j <= kernel_radius; j++) { for (int i = -kernel_radius; i <= kernel_radius; i++) { res += read_pixel<float,true>(in, x+i, y+j); } } res /= kernel_area; write_pixel<float,true>(out, x,y) = res; } // TODO: restrict on in/out DOUBLES performance! #define OUT_PIXEL(x,y) (out[(x)+width*(y)]) #define IN_PIXEL(x,y) (in[((x)+kernel_radius)+width*((y)+kernel_radius)]) __global__ void boxBlurStatic(const float * __restrict__ in, float *out) { // boxBlurStatic(const float *in, float *out) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; float res = 0; for (int j = -kernel_radius; j <= kernel_radius; j++) { for (int i = -kernel_radius; i <= kernel_radius; i++) { res += IN_PIXEL(x+i, y+j); } } res /= float(kernel_area); OUT_PIXEL(x,y) = res; } __global__ void boxBlurStaticNonRestrict(const float *in, float *out) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; float res = 0; for (int j = -kernel_radius; j <= kernel_radius; j++) { for (int i = -kernel_radius; i <= kernel_radius; i++) { res += IN_PIXEL(x+i, y+j); } } res /= float(kernel_area); OUT_PIXEL(x,y) = res; } #undef OUT_PIXEL #undef IN_PIXEL #ifndef __CUDA_ARCH__ const int block_width = 32, block_height = 32; dim3 blocks((width + block_width - 1) / block_width, (height + block_height - 1) / block_height); dim3 threads(block_width, block_height); using std::vector; using std::string; using std::pair; // template<class ...Args> // void variant(std::string name, void(*kernel)(Args...), Args... args) { // variants.push_back( // std::make_pair(name, [&]{ // kernel<<<blocks, threads>>>(args...); // }) // ); // } #define variant(nm,...) (variants.push_back(std::make_pair( \ #nm, [&]{\ hipLaunchKernelGGL(( (nm)), dim3(blocks),dim3(threads), 0, 0, __VA_ARGS__); \ } \ ))) int main (int argc, char const *argv[]) { int trials = 1; if (argc == 2) { trials = atoi(argv[1]); } Buffer<float> in(width+2*kernel_radius, height+2*kernel_radius), out(width, height); in.set_min(-kernel_radius, -kernel_radius); in.for_each_element([&](int x, int y) { in(x, y) = (x % 3 == 0 && y % 3 == 0) ? 1.f : 0.f; }); dev_malloc(in); dev_malloc(out); host_to_dev(in); host_to_dev(out); hipEvent_t startEv, endEv; hipEventCreate(&startEv); hipEventCreate(&endEv); typedef std::function<void(void)> Fn; vector<pair<string,Fn> > variants; variant(boxBlurBuf, *(in.raw_buffer()), *(out.raw_buffer()) ); variant(boxBlurBufStatic, *(in.raw_buffer()), *(out.raw_buffer()) ); variant(boxBlurStatic, (float*)in.raw_buffer()->dev, (float*)out.raw_buffer()->dev); variant(boxBlurStaticNonRestrict, (float*)in.raw_buffer()->dev, (float*)out.raw_buffer()->dev); for (auto &variant : variants) { std::string name; Fn fn; std::tie(name, fn) = variant; hipEventRecord(startEv); for (int i = 0; i < trials; i++) { fn(); } hipEventRecord(endEv); dev_to_host(in); dev_to_host(out); float elapsed; hipEventElapsedTime(&elapsed, startEv, endEv); printf( "\n-------\n" "%s\n" "TIME: %f ms / %d trials = %f ms\n", name.c_str(), elapsed, trials, elapsed/trials ); int64_t pixels = width*height; int64_t kernel_pixels = (kernel_radius*2+1)*(kernel_radius*2+1); printf("Inputs accumulated: %ldM\n", pixels*kernel_pixels/1000000); } dev_free(in); dev_free(out); return 0; } #endif //host-only
6d01750d1011615404412d92240a6f56860041e5.cu
#include <stdint.h> #include <stdio.h> #include <assert.h> #include "test_buffer.h" const int kernel_radius = 9; const int kernel_area = (kernel_radius*2+1)*(kernel_radius*2+1); const int width = 6400, height = 4800; #ifdef _MSC_VER #define ALWAYS_INLINE __forceinline #else #define ALWAYS_INLINE __attribute__((always_inline)) #endif template <typename T, bool static_addr=false> ALWAYS_INLINE inline __device__ T& write_pixel(buffer_t buf, int x, int y) { T *data = (T*)buf.dev; int x_min, y_min, x_stride, y_stride; if (static_addr) { assert(buf.stride[0] == 1); assert(buf.stride[1] > 6300 && buf.stride[1] < 6500); x_min = buf.min[0]; y_min = buf.min[1]; x_stride = 1; y_stride = buf.stride[1]; } else { x_min = 0; y_min = 0; x_stride = 1; y_stride = width; } int x_offset = (x - x_min) * x_stride; int y_offset = (y - y_min) * y_stride; return data[x_offset + y_offset]; } template <typename T, bool static_addr=false> ALWAYS_INLINE inline const __device__ T read_pixel(const buffer_t buf, int x, int y) { const T *data = (const T*)buf.dev; int x_min, y_min, x_stride, y_stride; if (static_addr) { assert(buf.stride[0] == 1); assert(buf.stride[1] > 6300 && buf.stride[1] < 6500); x_min = buf.min[0]; y_min = buf.min[1]; x_stride = 1; y_stride = buf.stride[1]; } else { x_min = -kernel_radius; y_min = -kernel_radius; x_stride = 1; y_stride = width+2*kernel_radius; } int x_offset = (x - x_min) * x_stride; int y_offset = (y - y_min) * y_stride; return __ldg(data + x_offset + y_offset); } __global__ void boxBlurBuf(const buffer_t in, buffer_t out) { int x = blockIdx.x * blockDim.x + threadIdx.x + out.min[0]; int y = blockIdx.y * blockDim.y + threadIdx.y + out.min[1]; #if 0 if (x < out.min[0] || y < out.min[1] || x >= out.extent[0] || y >= out.extent[1]) { return; } #endif float res = 0; for (int j = -kernel_radius; j <= kernel_radius; j++) { for (int i = -kernel_radius; i <= kernel_radius; i++) { res += read_pixel<float>(in, x+i, y+j); } } res /= float(kernel_area); write_pixel<float>(out, x,y) = res; } __global__ void boxBlurBufStatic(const buffer_t in, buffer_t out) { int x = blockIdx.x * blockDim.x + threadIdx.x + out.min[0]; int y = blockIdx.y * blockDim.y + threadIdx.y + out.min[1]; float res = 0; for (int j = -kernel_radius; j <= kernel_radius; j++) { for (int i = -kernel_radius; i <= kernel_radius; i++) { res += read_pixel<float,true>(in, x+i, y+j); } } res /= kernel_area; write_pixel<float,true>(out, x,y) = res; } // TODO: restrict on in/out DOUBLES performance! #define OUT_PIXEL(x,y) (out[(x)+width*(y)]) #define IN_PIXEL(x,y) (in[((x)+kernel_radius)+width*((y)+kernel_radius)]) __global__ void boxBlurStatic(const float * __restrict__ in, float *out) { // boxBlurStatic(const float *in, float *out) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; float res = 0; for (int j = -kernel_radius; j <= kernel_radius; j++) { for (int i = -kernel_radius; i <= kernel_radius; i++) { res += IN_PIXEL(x+i, y+j); } } res /= float(kernel_area); OUT_PIXEL(x,y) = res; } __global__ void boxBlurStaticNonRestrict(const float *in, float *out) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; float res = 0; for (int j = -kernel_radius; j <= kernel_radius; j++) { for (int i = -kernel_radius; i <= kernel_radius; i++) { res += IN_PIXEL(x+i, y+j); } } res /= float(kernel_area); OUT_PIXEL(x,y) = res; } #undef OUT_PIXEL #undef IN_PIXEL #ifndef __CUDA_ARCH__ const int block_width = 32, block_height = 32; dim3 blocks((width + block_width - 1) / block_width, (height + block_height - 1) / block_height); dim3 threads(block_width, block_height); using std::vector; using std::string; using std::pair; // template<class ...Args> // void variant(std::string name, void(*kernel)(Args...), Args... args) { // variants.push_back( // std::make_pair(name, [&]{ // kernel<<<blocks, threads>>>(args...); // }) // ); // } #define variant(nm,...) (variants.push_back(std::make_pair( \ #nm, [&]{\ (nm)<<<blocks,threads>>>(__VA_ARGS__); \ } \ ))) int main (int argc, char const *argv[]) { int trials = 1; if (argc == 2) { trials = atoi(argv[1]); } Buffer<float> in(width+2*kernel_radius, height+2*kernel_radius), out(width, height); in.set_min(-kernel_radius, -kernel_radius); in.for_each_element([&](int x, int y) { in(x, y) = (x % 3 == 0 && y % 3 == 0) ? 1.f : 0.f; }); dev_malloc(in); dev_malloc(out); host_to_dev(in); host_to_dev(out); cudaEvent_t startEv, endEv; cudaEventCreate(&startEv); cudaEventCreate(&endEv); typedef std::function<void(void)> Fn; vector<pair<string,Fn> > variants; variant(boxBlurBuf, *(in.raw_buffer()), *(out.raw_buffer()) ); variant(boxBlurBufStatic, *(in.raw_buffer()), *(out.raw_buffer()) ); variant(boxBlurStatic, (float*)in.raw_buffer()->dev, (float*)out.raw_buffer()->dev); variant(boxBlurStaticNonRestrict, (float*)in.raw_buffer()->dev, (float*)out.raw_buffer()->dev); for (auto &variant : variants) { std::string name; Fn fn; std::tie(name, fn) = variant; cudaEventRecord(startEv); for (int i = 0; i < trials; i++) { fn(); } cudaEventRecord(endEv); dev_to_host(in); dev_to_host(out); float elapsed; cudaEventElapsedTime(&elapsed, startEv, endEv); printf( "\n-------\n" "%s\n" "TIME: %f ms / %d trials = %f ms\n", name.c_str(), elapsed, trials, elapsed/trials ); int64_t pixels = width*height; int64_t kernel_pixels = (kernel_radius*2+1)*(kernel_radius*2+1); printf("Inputs accumulated: %ldM\n", pixels*kernel_pixels/1000000); } dev_free(in); dev_free(out); return 0; } #endif //host-only
8c87126797024c9fe3064abd3359c5cfc8118fa2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <helper_cuda.h> #include <helper_timer.h> #include "kernel.h" #include "kernel1.h" int device = 0; //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runCUDA( float *h_dataA, float* h_dataB, int width, int height, int passes, int threadsPerBlock, int shouldPrint); void runSerial( float * h_dataA, float * h_dataB, int width, int height, int passes, int shouldPrint); void printArray(float *arr, int rows, int cols, int shouldPrint); float * serial (float *a1, float*a2, int width, int height, int passes) ; void initializeArrays(float *a1, float *a2, int width, int height); void usage(); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { // jacobi threadsperblock passes width height [p] if(argc < 5 ){ usage(); return 1; } int threadsPerBlock = atoi(argv[1]); int passes = atoi(argv[2]); int width = atoi(argv[3]); int height = atoi(argv[4]); int shouldPrint=0; if(argc == 6 ) { if (argv[5][0]=='p'){ shouldPrint=1; } else { usage(); return 1; } } float * h_dataA= (float *)malloc(width * height * sizeof(float)); float * h_dataB= (float *)malloc(width * height * sizeof(float)); initializeArrays(h_dataA, h_dataB, width, height); if (threadsPerBlock == 0){ runSerial(h_dataA, h_dataB, width, height, passes, shouldPrint); } else { runCUDA(h_dataA, h_dataB, width, height, passes, threadsPerBlock, shouldPrint); } // Clean up Memory free( h_dataA); free( h_dataB); } //////////////////////////////////////////////////////////////////////////////// //! Run the CUDA version //////////////////////////////////////////////////////////////////////////////// void runCUDA( float *h_dataA, float* h_dataB, int width, int height, int passes, int threadsPerBlock, int shouldPrint){ // Use card 0 (See top of file to make sure you are using your assigned device.) checkCudaErrors(hipSetDevice(device)); // To ensure alignment, we'll use the code below to pad rows of the arrays when they are // allocated on the device. size_t pitch; // allocate device memory for data A float* d_dataA; checkCudaErrors( hipMallocPitch( (void**) &d_dataA, &pitch, width * sizeof(float), height)); // copy host memory to device memory for image A checkCudaErrors( hipMemcpy2D( d_dataA, pitch, h_dataA, width * sizeof(float), width * sizeof(float), height, hipMemcpyHostToDevice) ); // repeat for second device array float* d_dataB; checkCudaErrors( hipMallocPitch( (void**) &d_dataB, &pitch, width * sizeof(float), height)); // copy host memory to device memory for image B checkCudaErrors( hipMemcpy2D( d_dataB, pitch, h_dataB, width * sizeof(float), width * sizeof(float), height, hipMemcpyHostToDevice) ); //*************************** // setup CUDA execution parameters int blockHeight; int blockWidth; // When testing with small arrays, this code might be useful. Feel free to change it. if (threadsPerBlock > width - 2 ){ blockWidth = 16 * (int) ceil((width - 2) / 16.0); blockHeight = 1; } else { blockWidth = threadsPerBlock; blockHeight = 1; } int gridWidth = (int) ceil( (width - 2) / (float) blockWidth); int gridHeight = (int) ceil( (height - 2) / (float) blockHeight); // number of blocks required to process all the data. int numBlocks = gridWidth * gridHeight; // Each block gets a shared memory region of this size. unsigned int shared_mem_size = ((blockWidth + 2) * 4) * sizeof(float); printf("blockDim.x=%d blockDim.y=%d grid = %d x %d\n", blockWidth, blockHeight, gridWidth, gridHeight); printf("numBlocks = %d, threadsPerBlock = %d shared_mem_size = %d\n", numBlocks, threadsPerBlock, shared_mem_size); if(gridWidth > 65536 || gridHeight > 65536) { fprintf(stderr, "****Error: a block dimension is too large.\n"); } if(threadsPerBlock > 1024) { fprintf(stderr, "****Error: number of threads per block is too large.\n"); } if(shared_mem_size > 49152) { fprintf(stderr, "****Error: shared memory per block is too large.\n"); } // Format the grid, which is a collection of blocks. dim3 grid( gridWidth, gridHeight, 1); // Format the blocks. dim3 threads( blockWidth, blockHeight, 1); printArray(h_dataA, height, width, shouldPrint); StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); sdkStartTimer(&timer); float * temp; for(int r=0; r<passes; r++){ //execute the kernel // k1 <<< grid, threads, shared_mem_size >>>( d_dataA, d_dataB, pitch/sizeof(float), width); //uncomment the following line to use k0, the simple kernel, provived in kernel.cu hipLaunchKernelGGL(( k0) , dim3(grid), dim3(threads) , 0, 0, d_dataA, d_dataB, pitch/sizeof(float), width); // swap the device data pointers temp = d_dataA; d_dataA = d_dataB; d_dataB = temp; } // check if kernel execution generated an error hipError_t code = hipGetLastError(); if (code != hipSuccess){ printf ("Cuda Kerel Launch error -- %s\n", hipGetErrorString(code)); } hipDeviceSynchronize(); sdkStopTimer(&timer); //checkCudaErrors( cutStopTimer( timer)); // copy result from device to host checkCudaErrors( hipMemcpy2D( h_dataA, width * sizeof(float), d_dataA, pitch, width * sizeof(float), height,hipMemcpyDeviceToHost) ); printArray(h_dataA, height, width, shouldPrint); printf( "Processing time: %f (ms)\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); // cleanup memory checkCudaErrors(hipFree(d_dataA)); checkCudaErrors(hipFree(d_dataB)); } /* Run the serial jacobi code using the referenced arrays of floats with given width and height for * the specified number of passes. If the final parameter is non-zero, the initial and final states * of the arrays will be printed. In all cases, the execution time will be printed to stdout. * * For the first pass, values will be read from h_dataA and written to h_dataB. For subsequent * passes, the role of the arrays will be reversed. */ void runSerial( float * h_dataA, float * h_dataB, int width, int height, int passes, int shouldPrint){ printf("Running Serial Code.\n"); float * serialResult; printArray(h_dataA, height, width, shouldPrint); StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); sdkStartTimer(&timer); serialResult = serial(h_dataA, h_dataB, width, height, passes); sdkStopTimer(&timer); printArray(serialResult, height, width, shouldPrint); printf( "Processing time: %f (ms)\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); } /* Performs the specified number of passes of jacobi iteration on two arrays * of the given width and height. For the first pass, values will be read from * a1 and written to a2. For subsequent passes, the role of the arrays will * be exchanged. In all cases, a pointer to the most recently changed array * is returned. * * For each element, this code computes a weighted average of the neighbors * and then reduces this value by 5% to simulate heat loss. There is nothing * mathematically or physically rigorous about this calculation, and it is * simply meant to provide an interesting parallel programming example. */ float * serial (float *a1, float*a2, int width, int height, int passes) { int i,j,p; float * old=a1; float * New=a2; float * temp; for(p=0; p<passes; p++){ for(i=1; i<height-1; i++){ for(j=1; j<width-1; j++){ New[i*width +j] = ( 0.2f * old[i*width + j] + 0.1f * old[(i-1) * width + j ] + //N 0.1f * old[(i-1) * width + (j+1)] + //NE 0.1f * old[ i * width + (j+1)] + //E 0.1f * old[(i+1) * width + (j+1)] + //SE 0.1f * old[(i+1) * width + j ] + //S 0.1f * old[(i+1) * width + (j-1)] + //SW 0.1f * old[ i * width + (j-1)] + //W 0.1f * old[(i-1) * width + (j-1)] //NW ) * 0.95f; } } temp = New; New = old; old = temp; } return old; } /* Initialize the two arrays referenced by the first two parameters in preparation for * jacobi iteration. The width and height of the arrays are given by the integer parameters. * Border elements are set to 5.0 for both arrays, and the interior elements of a1 are * set to 1.0. Interior elements of a2 are not initialized. */ void initializeArrays(float *a1, float *a2, int width, int height){ int i, j; for(i=0; i<height; i++){ for(j=0; j<width; j++){ if(i==0 || j ==0 || i==height-1 || j==width-1){ a1[i*width + j] = 5.0; a2[i*width + j] = 5.0; }else { a1[i*width + j] = 1.0; } } } } /* Print the 2D array of floats referenced by the first parameter. The second and third * parameters specify its dimensions, while the last argument indicates whether printing * is actually descired at all. No output is produced if shouldPrint == 0. */ void printArray(float *arr, int rows, int cols, int shouldPrint){ if (!shouldPrint) return; int i,j; for(i=0; i<rows; i++){ for(j=0; j<cols; j++){ printf("%04.2f ", arr[i*cols + j]); } printf("\n"); } printf("\n"); } /* Prints a short but informative message about program usage.*/ void usage(){ fprintf(stderr, "usage: jacobi threadsperblock passes width height [p]\n"); fprintf(stderr, " (if threadsperblock == 0, serial code is run)\n"); }
8c87126797024c9fe3064abd3359c5cfc8118fa2.cu
// includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <helper_cuda.h> #include <helper_timer.h> #include "kernel.h" #include "kernel1.h" int device = 0; //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runCUDA( float *h_dataA, float* h_dataB, int width, int height, int passes, int threadsPerBlock, int shouldPrint); void runSerial( float * h_dataA, float * h_dataB, int width, int height, int passes, int shouldPrint); void printArray(float *arr, int rows, int cols, int shouldPrint); float * serial (float *a1, float*a2, int width, int height, int passes) ; void initializeArrays(float *a1, float *a2, int width, int height); void usage(); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { // jacobi threadsperblock passes width height [p] if(argc < 5 ){ usage(); return 1; } int threadsPerBlock = atoi(argv[1]); int passes = atoi(argv[2]); int width = atoi(argv[3]); int height = atoi(argv[4]); int shouldPrint=0; if(argc == 6 ) { if (argv[5][0]=='p'){ shouldPrint=1; } else { usage(); return 1; } } float * h_dataA= (float *)malloc(width * height * sizeof(float)); float * h_dataB= (float *)malloc(width * height * sizeof(float)); initializeArrays(h_dataA, h_dataB, width, height); if (threadsPerBlock == 0){ runSerial(h_dataA, h_dataB, width, height, passes, shouldPrint); } else { runCUDA(h_dataA, h_dataB, width, height, passes, threadsPerBlock, shouldPrint); } // Clean up Memory free( h_dataA); free( h_dataB); } //////////////////////////////////////////////////////////////////////////////// //! Run the CUDA version //////////////////////////////////////////////////////////////////////////////// void runCUDA( float *h_dataA, float* h_dataB, int width, int height, int passes, int threadsPerBlock, int shouldPrint){ // Use card 0 (See top of file to make sure you are using your assigned device.) checkCudaErrors(cudaSetDevice(device)); // To ensure alignment, we'll use the code below to pad rows of the arrays when they are // allocated on the device. size_t pitch; // allocate device memory for data A float* d_dataA; checkCudaErrors( cudaMallocPitch( (void**) &d_dataA, &pitch, width * sizeof(float), height)); // copy host memory to device memory for image A checkCudaErrors( cudaMemcpy2D( d_dataA, pitch, h_dataA, width * sizeof(float), width * sizeof(float), height, cudaMemcpyHostToDevice) ); // repeat for second device array float* d_dataB; checkCudaErrors( cudaMallocPitch( (void**) &d_dataB, &pitch, width * sizeof(float), height)); // copy host memory to device memory for image B checkCudaErrors( cudaMemcpy2D( d_dataB, pitch, h_dataB, width * sizeof(float), width * sizeof(float), height, cudaMemcpyHostToDevice) ); //*************************** // setup CUDA execution parameters int blockHeight; int blockWidth; // When testing with small arrays, this code might be useful. Feel free to change it. if (threadsPerBlock > width - 2 ){ blockWidth = 16 * (int) ceil((width - 2) / 16.0); blockHeight = 1; } else { blockWidth = threadsPerBlock; blockHeight = 1; } int gridWidth = (int) ceil( (width - 2) / (float) blockWidth); int gridHeight = (int) ceil( (height - 2) / (float) blockHeight); // number of blocks required to process all the data. int numBlocks = gridWidth * gridHeight; // Each block gets a shared memory region of this size. unsigned int shared_mem_size = ((blockWidth + 2) * 4) * sizeof(float); printf("blockDim.x=%d blockDim.y=%d grid = %d x %d\n", blockWidth, blockHeight, gridWidth, gridHeight); printf("numBlocks = %d, threadsPerBlock = %d shared_mem_size = %d\n", numBlocks, threadsPerBlock, shared_mem_size); if(gridWidth > 65536 || gridHeight > 65536) { fprintf(stderr, "****Error: a block dimension is too large.\n"); } if(threadsPerBlock > 1024) { fprintf(stderr, "****Error: number of threads per block is too large.\n"); } if(shared_mem_size > 49152) { fprintf(stderr, "****Error: shared memory per block is too large.\n"); } // Format the grid, which is a collection of blocks. dim3 grid( gridWidth, gridHeight, 1); // Format the blocks. dim3 threads( blockWidth, blockHeight, 1); printArray(h_dataA, height, width, shouldPrint); StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); sdkStartTimer(&timer); float * temp; for(int r=0; r<passes; r++){ //execute the kernel // k1 <<< grid, threads, shared_mem_size >>>( d_dataA, d_dataB, pitch/sizeof(float), width); //uncomment the following line to use k0, the simple kernel, provived in kernel.cu k0 <<< grid, threads >>>( d_dataA, d_dataB, pitch/sizeof(float), width); // swap the device data pointers temp = d_dataA; d_dataA = d_dataB; d_dataB = temp; } // check if kernel execution generated an error cudaError_t code = cudaGetLastError(); if (code != cudaSuccess){ printf ("Cuda Kerel Launch error -- %s\n", cudaGetErrorString(code)); } cudaThreadSynchronize(); sdkStopTimer(&timer); //checkCudaErrors( cutStopTimer( timer)); // copy result from device to host checkCudaErrors( cudaMemcpy2D( h_dataA, width * sizeof(float), d_dataA, pitch, width * sizeof(float), height,cudaMemcpyDeviceToHost) ); printArray(h_dataA, height, width, shouldPrint); printf( "Processing time: %f (ms)\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); // cleanup memory checkCudaErrors(cudaFree(d_dataA)); checkCudaErrors(cudaFree(d_dataB)); } /* Run the serial jacobi code using the referenced arrays of floats with given width and height for * the specified number of passes. If the final parameter is non-zero, the initial and final states * of the arrays will be printed. In all cases, the execution time will be printed to stdout. * * For the first pass, values will be read from h_dataA and written to h_dataB. For subsequent * passes, the role of the arrays will be reversed. */ void runSerial( float * h_dataA, float * h_dataB, int width, int height, int passes, int shouldPrint){ printf("Running Serial Code.\n"); float * serialResult; printArray(h_dataA, height, width, shouldPrint); StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); sdkStartTimer(&timer); serialResult = serial(h_dataA, h_dataB, width, height, passes); sdkStopTimer(&timer); printArray(serialResult, height, width, shouldPrint); printf( "Processing time: %f (ms)\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); } /* Performs the specified number of passes of jacobi iteration on two arrays * of the given width and height. For the first pass, values will be read from * a1 and written to a2. For subsequent passes, the role of the arrays will * be exchanged. In all cases, a pointer to the most recently changed array * is returned. * * For each element, this code computes a weighted average of the neighbors * and then reduces this value by 5% to simulate heat loss. There is nothing * mathematically or physically rigorous about this calculation, and it is * simply meant to provide an interesting parallel programming example. */ float * serial (float *a1, float*a2, int width, int height, int passes) { int i,j,p; float * old=a1; float * New=a2; float * temp; for(p=0; p<passes; p++){ for(i=1; i<height-1; i++){ for(j=1; j<width-1; j++){ New[i*width +j] = ( 0.2f * old[i*width + j] + 0.1f * old[(i-1) * width + j ] + //N 0.1f * old[(i-1) * width + (j+1)] + //NE 0.1f * old[ i * width + (j+1)] + //E 0.1f * old[(i+1) * width + (j+1)] + //SE 0.1f * old[(i+1) * width + j ] + //S 0.1f * old[(i+1) * width + (j-1)] + //SW 0.1f * old[ i * width + (j-1)] + //W 0.1f * old[(i-1) * width + (j-1)] //NW ) * 0.95f; } } temp = New; New = old; old = temp; } return old; } /* Initialize the two arrays referenced by the first two parameters in preparation for * jacobi iteration. The width and height of the arrays are given by the integer parameters. * Border elements are set to 5.0 for both arrays, and the interior elements of a1 are * set to 1.0. Interior elements of a2 are not initialized. */ void initializeArrays(float *a1, float *a2, int width, int height){ int i, j; for(i=0; i<height; i++){ for(j=0; j<width; j++){ if(i==0 || j ==0 || i==height-1 || j==width-1){ a1[i*width + j] = 5.0; a2[i*width + j] = 5.0; }else { a1[i*width + j] = 1.0; } } } } /* Print the 2D array of floats referenced by the first parameter. The second and third * parameters specify its dimensions, while the last argument indicates whether printing * is actually descired at all. No output is produced if shouldPrint == 0. */ void printArray(float *arr, int rows, int cols, int shouldPrint){ if (!shouldPrint) return; int i,j; for(i=0; i<rows; i++){ for(j=0; j<cols; j++){ printf("%04.2f ", arr[i*cols + j]); } printf("\n"); } printf("\n"); } /* Prints a short but informative message about program usage.*/ void usage(){ fprintf(stderr, "usage: jacobi threadsperblock passes width height [p]\n"); fprintf(stderr, " (if threadsperblock == 0, serial code is run)\n"); }
75b033ee4494eb52bfa872395785e57a0959e3f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This file is part of p4est. p4est is a C library to manage a collection (a forest) of multiple connected adaptive quadtrees or octrees in parallel. Copyright (C) 2010 The University of Texas System Additional copyright (C) 2011 individual authors Written by Carsten Burstedde, Lucas C. Wilcox, and Tobin Isaac p4est is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. p4est is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with p4est; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /** \file p4est_step3.c * * This 2D example program uses p4est to solve a simple advection problem. It * is numerically very simple, and intended to demonstrate several methods of * interacting with the p4est data after it has been refined and partitioned. * It demonstrates the construction of ghost layers (see p4est_ghost_t in * p4est_ghost.h) and communication of ghost-layer data, and it demonstrates * interacting with the quadrants and quadrant boundaries through the * p4est_iterate() routine (see p4est_iterate.h). */ /* p4est has two separate interfaces for 2D and 3D, p4est*.h and p8est*.h. * Most API functions are available for both dimensions. The header file * p4est_to_p8est.h #define's the 2D names to the 3D names such that most code * only needs to be written once. In this example, we rely on this. */ #include "main.h" #include <chrono> #include "time.h" using namespace std; using namespace std::chrono; /** We had 1. / 0. here to create a NaN but that is not portable. */ static const double step3_invalid = -1.; void alloc_cuda_memory_step3_quad_user_data(quad_user_data_allocate_info_t* quad_user_data_allocate_info) { step3_quad_user_data_to_cuda_t *user_data_to_cuda = (step3_quad_user_data_to_cuda_t*) malloc(sizeof(step3_quad_user_data_to_cuda_t)); step3_data_t *user_data = (step3_data_t*) quad_user_data_allocate_info->user_data; // to do what is 0x1 if(user_data != 0 && user_data != (void*)0x1) { step3_data_t *d_step3_user_data; gpuErrchk(hipMalloc((void**)&d_step3_user_data, sizeof(step3_data_t))); gpuErrchk(hipMemcpy(d_step3_user_data, user_data, sizeof(step3_data_t), hipMemcpyHostToDevice)); user_data_to_cuda->d_step3_user_data = d_step3_user_data; double *d_du; size_t d_du_size = P4EST_DIM; arrayPropMemoryAllocate((void**)&d_du, d_du_size, &(d_step3_user_data->du), sizeof(double*), user_data->du); user_data_to_cuda->d_du = d_du; } else { user_data_to_cuda->d_step3_user_data = user_data; user_data_to_cuda->d_du = NULL; } quad_user_data_allocate_info->cuda_memory_allocating_info = user_data_to_cuda; } void alloc_all_quads_cuda_memory_step3(all_quads_user_data_allocate_info_t* all_quads_user_data_allocate_info, sc_array_t* quadrants) { size_t d_quadrants_array_size = quadrants->elem_count; all_quads_user_data_allocate_info->quads_count = d_quadrants_array_size; size_t d_all_quads_user_data_bytes_count = d_quadrants_array_size * sizeof(step3_data_t); step3_data_t *d_all_quads_user_data; void **all_quads_host_user_data = (void**) malloc(d_quadrants_array_size * sizeof(void*)); step3_data_t *quads_user_data_temp = (step3_data_t*) malloc(d_all_quads_user_data_bytes_count); for(size_t i = 0; i < d_quadrants_array_size; i++) { p4est_quadrant_t *temp_quad = p4est_quadrant_array_index (quadrants, i); memcpy(quads_user_data_temp + i, temp_quad->p.user_data, sizeof(step3_data_t)); all_quads_host_user_data[i] = temp_quad->p.user_data; } gpuErrchk(hipMalloc((void**)&d_all_quads_user_data, d_all_quads_user_data_bytes_count)); gpuErrchk(hipMemcpy(d_all_quads_user_data, quads_user_data_temp, d_all_quads_user_data_bytes_count, hipMemcpyHostToDevice)); all_quads_user_data_allocate_info->d_all_quads_user_data = (void*)d_all_quads_user_data; all_quads_user_data_allocate_info->all_quads_user_data = all_quads_host_user_data; } void update_quad_cuda_step3_user_data(quad_user_data_allocate_info_t* old_user_data_allocate_info, quad_user_data_allocate_info_t* new_user_data_allocate_info) { step3_quad_user_data_to_cuda_t *user_data_to_cuda = (step3_quad_user_data_to_cuda_t*) malloc(sizeof(step3_quad_user_data_to_cuda_t)); step3_quad_user_data_to_cuda_t *old_user_data_to_cuda = (step3_quad_user_data_to_cuda_t*) old_user_data_allocate_info->cuda_memory_allocating_info; step3_data_t *old_user_data = (step3_data_t*) old_user_data_allocate_info->user_data; step3_data_t *new_user_data = (step3_data_t*) new_user_data_allocate_info->user_data; // to do what is 0x1 if(old_user_data != 0 && old_user_data != (void*)0x1 && new_user_data != 0 && new_user_data != (void*)0x1) { step3_data_t *d_step3_user_data = old_user_data_to_cuda->d_step3_user_data; gpuErrchk(hipMemcpy(d_step3_user_data, new_user_data, sizeof(step3_data_t), hipMemcpyHostToDevice)); user_data_to_cuda->d_step3_user_data = d_step3_user_data; double *d_du = old_user_data_to_cuda->d_du; size_t d_du_size = P4EST_DIM; arrayPropMemoryUpdate((void**)&d_du, d_du_size * sizeof(double), new_user_data->du); user_data_to_cuda->d_du = d_du; } else { user_data_to_cuda->d_step3_user_data = new_user_data; user_data_to_cuda->d_du = NULL; } new_user_data_allocate_info->cuda_memory_allocating_info = user_data_to_cuda; } void update_all_quads_cuda_user_data_step3(all_quads_user_data_allocate_info* old_user_data_allocate_info, all_quads_user_data_allocate_info* new_user_data_allocate_info) { size_t d_quadrants_array_size = old_user_data_allocate_info->quads_count; size_t d_all_quads_user_data_bytes_count = d_quadrants_array_size * sizeof(step3_data_t); void *d_all_quads_user_data = old_user_data_allocate_info->d_all_quads_user_data; step3_data_t **all_quads_host_user_data = (step3_data_t**)new_user_data_allocate_info->all_quads_user_data; step3_data_t *quads_user_data_temp = (step3_data_t*) malloc(d_all_quads_user_data_bytes_count); step3_data_t *quad_user_data_cursor = quads_user_data_temp; for(size_t i = 0; i < d_quadrants_array_size; i++, quad_user_data_cursor++) { step3_data_t *new_user_data = (step3_data_t*)(all_quads_host_user_data[i]); if(new_user_data != NULL) { memcpy(quad_user_data_cursor, new_user_data, sizeof(step3_data_t)); } } gpuErrchk(hipMemcpy(d_all_quads_user_data, quads_user_data_temp, d_all_quads_user_data_bytes_count, hipMemcpyHostToDevice)); new_user_data_allocate_info->d_all_quads_user_data = d_all_quads_user_data; new_user_data_allocate_info->quads_count = d_quadrants_array_size; } void free_cuda_memory_step3_quad_user_data(quad_user_data_allocate_info_t* quad_user_data_allocate_info) { step3_quad_user_data_to_cuda_t *user_data_to_cuda = (step3_quad_user_data_to_cuda_t*) quad_user_data_allocate_info->cuda_memory_allocating_info; if(user_data_to_cuda->d_du != NULL) { gpuErrchk(hipFree(user_data_to_cuda->d_du)); } if(user_data_to_cuda->d_step3_user_data != NULL && user_data_to_cuda->d_step3_user_data != (void*)0x1) { gpuErrchk(hipFree(user_data_to_cuda->d_step3_user_data)); } } void free_all_quads_cuda_memory_step3(all_quads_user_data_allocate_info_t* all_quads_user_data_allocate_info) { gpuErrchk(hipFree(all_quads_user_data_allocate_info->d_all_quads_user_data)); free(all_quads_user_data_allocate_info->all_quads_user_data); } void* get_cuda_allocated_user_data_step3_quad_user_data(quad_user_data_allocate_info_t* quad_user_data_allocate_info) { step3_quad_user_data_to_cuda_t *user_data_to_cuda = (step3_quad_user_data_to_cuda_t*) quad_user_data_allocate_info->cuda_memory_allocating_info; return user_data_to_cuda != NULL ? (void*) user_data_to_cuda->d_step3_user_data : NULL; } void download_quad_cuda_user_data_step3_to_host (quad_user_data_allocate_info_t* user_data_allocate_info) { step3_data_t *user_data = (step3_data_t*) user_data_allocate_info->user_data; step3_quad_user_data_to_cuda_t *user_data_to_cuda = (step3_quad_user_data_to_cuda_t*) user_data_allocate_info->cuda_memory_allocating_info; gpuErrchk(hipMemcpy(user_data, user_data_to_cuda->d_step3_user_data, sizeof(step3_data_t), hipMemcpyDeviceToHost)); } void download_all_quads_cuda_user_data_to_host_t_step3(all_quads_user_data_allocate_info_t* all_quads_user_data_allocate_info, sc_array_t* quadrants) { size_t quads_count = all_quads_user_data_allocate_info->quads_count; size_t user_data_size = sizeof(step3_data_t); size_t user_data_bytes_alloc = quads_count * user_data_size; step3_data_t *copied_user_data = (step3_data_t*)malloc(user_data_bytes_alloc); gpuErrchk(hipMemcpy(copied_user_data, all_quads_user_data_allocate_info->d_all_quads_user_data, user_data_bytes_alloc, hipMemcpyDeviceToHost)); step3_data_t *copied_user_data_cursor = copied_user_data; for(size_t i = 0; i < quads_count; i++, copied_user_data_cursor++) { p4est_quadrant_t *quad = p4est_quadrant_array_index(quadrants, i); memcpy(quad->p.user_data, copied_user_data_cursor, user_data_size); } free(copied_user_data); } void alloc_cuda_memory_step3_ctx(user_data_for_cuda_t* user_data_api) { step3_ctx_to_cuda_t *ctx_to_cuda = (step3_ctx_to_cuda_t*) malloc(sizeof(step3_ctx_to_cuda_t)); step3_ctx_t *ctx = (step3_ctx*) user_data_api->user_data; step3_ctx_t *d_step3_ctx; gpuErrchk(hipMalloc((void**)&d_step3_ctx, sizeof(step3_ctx_t))); gpuErrchk(hipMemcpy(d_step3_ctx, ctx, sizeof(step3_ctx_t), hipMemcpyHostToDevice)); ctx_to_cuda->d_step3_ctx = d_step3_ctx; user_data_api->cuda_memory_allocating_info = (void*) ctx_to_cuda; } void free_cuda_memory_step3_ctx(user_data_for_cuda_t* user_data_api) { step3_ctx_to_cuda *ctx_to_cuda = (step3_ctx_to_cuda*) user_data_api->cuda_memory_allocating_info; gpuErrchk(hipFree(ctx_to_cuda->d_step3_ctx)); } void* get_cuda_allocated_user_data_step3_ctx(user_data_for_cuda_t* user_data_api) { step3_ctx_to_cuda *ctx_to_cuda = (step3_ctx_to_cuda*) user_data_api->cuda_memory_allocating_info; return (void*) ctx_to_cuda->d_step3_ctx; } /** Compute the value and derivatives of the initial condition. * * \param [in] x the coordinates * \param [out] du the derivative at \a x * \param [in] ctx the example parameters * * \return the initial condition at \a x */ static double step3_initial_condition (double x[], double du[], step3_ctx_t * ctx) { int i; double *c = ctx->center; double bump_width = ctx->bump_width; double r2, d[P4EST_DIM]; double arg, retval; r2 = 0.; for (i = 0; i < P4EST_DIM; i++) { d[i] = x[i] - c[i]; r2 += d[i] * d[i]; } arg = -(1. / 2.) * r2 / bump_width / bump_width; retval = exp (arg); if (du) { for (i = 0; i < P4EST_DIM; i++) { du[i] = -(1. / bump_width / bump_width) * d[i] * retval; } } return retval; } /** Get the coordinates of the midpoint of a quadrant. * * \param [in] p4est the forest * \param [in] which_tree the tree in the forest containing \a q * \param [in] q the quadrant * \param [out] xyz the coordinates of the midpoint of \a q */ static void step3_get_midpoint (p4est_t * p4est, p4est_topidx_t which_tree, p4est_quadrant_t * q, double xyz[3]) { p4est_qcoord_t half_length = P4EST_QUADRANT_LEN (q->level) / 2; p4est_qcoord_to_vertex (p4est->connectivity, which_tree, q->x + half_length, q->y + half_length, #ifdef P4_TO_P8 q->z + half_length, #endif xyz); } /** Initialize the initial condition data of a quadrant. * * This function matches the p4est_init_t prototype that is used by * p4est_new(), p4est_refine(), p4est_coarsen(), and p4est_balance(). * * \param [in] p4est the forest * \param [in] which_tree the tree in the forest containing \a q * \param [in,out] q the quadrant whose data gets initialized */ static void step3_init_initial_condition (p4est_t * p4est, p4est_topidx_t which_tree, p4est_quadrant_t * q) { /* the data associated with a forest is accessible by user_pointer */ step3_ctx_t *ctx = (step3_ctx_t *) p4est->user_pointer; /* the data associated with a quadrant is accessible by p.user_data */ step3_data_t *data = (step3_data_t *) q->p.user_data; double midpoint[3]; step3_get_midpoint (p4est, which_tree, q, midpoint); /* initialize the data */ data->u = step3_initial_condition (midpoint, data->du, ctx); } /** Estimate the square of the approximation error on a quadrant. * * We compute our estimate by integrating the difference of a constant * approximation at the midpoint and a linear approximation that interpolates * at the midpoint. * * \param [in] q a quadrant * * \return the square of the error estimate for the state variables contained * in \a q's data. */ static double step3_error_sqr_estimate (p4est_quadrant_t * q) { step3_data_t *data = (step3_data_t *) q->p.user_data; int i; double diff2; double *du = data->du; double h = (double) P4EST_QUADRANT_LEN (q->level) / (double) P4EST_ROOT_LEN; double vol; #ifdef P4_TO_P8 vol = h * h * h; #else vol = h * h; #endif diff2 = 0.; /* use the approximate derivative to estimate the L2 error */ for (i = 0; i < P4EST_DIM; i++) { diff2 += du[i] * du[i] * (1. / 12.) * h * h * vol; } return diff2; } /** Refine by the L2 error estimate. * * Given the maximum global error, we enforce that each quadrant's portion of * the error must not exceed is fraction of the total volume of the domain * (which is 1). * * This function matches the p4est_refine_t prototype that is used by * p4est_refine() and p4est_refine_ext(). * * \param [in] p4est the forest * \param [in] which_tree the tree in the forest containing \a q * \param [in] q the quadrant * * \return 1 if \a q should be refined, 0 otherwise. */ static int step3_refine_err_estimate (p4est_t * p4est, p4est_topidx_t which_tree, p4est_quadrant_t * q) { step3_ctx_t *ctx = (step3_ctx_t *) p4est->user_pointer; double global_err = ctx->max_err; double global_err2 = global_err * global_err; double h = (double) P4EST_QUADRANT_LEN (q->level) / (double) P4EST_ROOT_LEN; double vol, err2; /* the quadrant's volume is also its volume fraction */ #ifdef P4_TO_P8 vol = h * h * h; #else vol = h * h; #endif err2 = step3_error_sqr_estimate (q); if (err2 > (global_err2 * vol * 0.001)) { return 1; } else { return 0; } } /** Coarsen by the L2 error estimate of the initial condition. * * Given the maximum global error, we enforce that each quadrant's portion of * the error must not exceed is fraction of the total volume of the domain * (which is 1). * * \param [in] p4est the forest * \param [in] which_tree the tree in the forest containing \a children * \param [in] children a family of quadrants * * \return 1 if \a children should be coarsened, 0 otherwise. */ static int step3_coarsen_initial_condition (p4est_t * p4est, p4est_topidx_t which_tree, p4est_quadrant_t * children[]) { p4est_quadrant_t parent; step3_ctx_t *ctx = (step3_ctx_t *) p4est->user_pointer; double global_err = ctx->max_err; double global_err2 = global_err * global_err; double h; step3_data_t parentdata; double parentmidpoint[3]; double vol, err2; /* get the parent of the first child (the parent of all children) */ p4est_quadrant_parent (children[0], &parent); step3_get_midpoint (p4est, which_tree, &parent, parentmidpoint); parentdata.u = step3_initial_condition (parentmidpoint, parentdata.du, ctx); h = (double) P4EST_QUADRANT_LEN (parent.level) / (double) P4EST_ROOT_LEN; /* the quadrant's volume is also its volume fraction */ #ifdef P4_TO_P8 vol = h * h * h; #else vol = h * h; #endif parent.p.user_data = (void *) (&parentdata); err2 = step3_error_sqr_estimate (&parent); if (err2 < global_err2 * vol) { return 1; } else { return 0; } } /** Coarsen by the L2 error estimate of the current state approximation. * * Given the maximum global error, we enforce that each quadrant's portion of * the error must not exceed its fraction of the total volume of the domain * (which is 1). * * This function matches the p4est_coarsen_t prototype that is used by * p4est_coarsen() and p4est_coarsen_ext(). * * \param [in] p4est the forest * \param [in] which_tree the tree in the forest containing \a children * \param [in] children a family of quadrants * * \return 1 if \a children should be coarsened, 0 otherwise. */ static int step3_coarsen_err_estimate (p4est_t * p4est, p4est_topidx_t which_tree, p4est_quadrant_t * children[]) { step3_ctx_t *ctx = (step3_ctx_t *) p4est->user_pointer; double global_err = ctx->max_err; double global_err2 = global_err * global_err; double h; step3_data_t *data; double vol, err2, childerr2; double parentu; double diff; int i; h = (double) P4EST_QUADRANT_LEN (children[0]->level) / (double) P4EST_ROOT_LEN; /* the quadrant's volume is also its volume fraction */ #ifdef P4_TO_P8 vol = h * h * h; #else vol = h * h; #endif /* compute the average */ parentu = 0.; for (i = 0; i < P4EST_CHILDREN; i++) { data = (step3_data_t *) children[i]->p.user_data; parentu += data->u / P4EST_CHILDREN; } err2 = 0.; for (i = 0; i < P4EST_CHILDREN; i++) { childerr2 = step3_error_sqr_estimate (children[i]); if (childerr2 > global_err2 * vol) { return 0; } err2 += step3_error_sqr_estimate (children[i]); diff = (parentu - data->u) * (parentu - data->u); err2 += diff * vol; } if (err2 < global_err2 * (vol * P4EST_CHILDREN)) { return 1; } else { return 0; } } /** Initialize the state variables of incoming quadrants from outgoing * quadrants. * * The functions p4est_refine_ext(), p4est_coarsen_ext(), and * p4est_balance_ext() take as an argument a p4est_replace_t callback function, * which allows one to setup the quadrant data of incoming quadrants from the * data of outgoing quadrants, before the outgoing data is destroyed. This * function matches the p4est_replace_t prototype. * * In this example, we linearly interpolate the state variable of a quadrant * that is refined to its children, and we average the midpoints of children * that are being coarsened to the parent. * * \param [in] p4est the forest * \param [in] which_tree the tree in the forest containing \a children * \param [in] num_outgoing the number of quadrants that are being replaced: * either 1 if a quadrant is being refined, or * P4EST_CHILDREN if a family of children are being * coarsened. * \param [in] outgoing the outgoing quadrants * \param [in] num_incoming the number of quadrants that are being added: * either P4EST_CHILDREN if a quadrant is being refined, or * 1 if a family of children are being * coarsened. * \param [in,out] incoming quadrants whose data are initialized. */ static void step3_replace_quads (p4est_t * p4est, p4est_topidx_t which_tree, int num_outgoing, p4est_quadrant_t * outgoing[], int num_incoming, p4est_quadrant_t * incoming[]) { step3_data_t *parent_data, *child_data; int i, j; double h; double du_old, du_est; if (num_outgoing > 1) { /* this is coarsening */ parent_data = (step3_data_t *) incoming[0]->p.user_data; parent_data->u = 0.; for (j = 0; j < P4EST_DIM; j++) { parent_data->du[j] = step3_invalid; } for (i = 0; i < P4EST_CHILDREN; i++) { child_data = (step3_data_t *) outgoing[i]->p.user_data; parent_data->u += child_data->u / P4EST_CHILDREN; for (j = 0; j < P4EST_DIM; j++) { du_old = parent_data->du[j]; du_est = child_data->du[j]; if (du_old == du_old) { if (du_est * du_old >= 0.) { if (fabs (du_est) < fabs (du_old)) { parent_data->du[j] = du_est; } } else { parent_data->du[j] = 0.; } } else { parent_data->du[j] = du_est; } } } } else { /* this is refinement */ parent_data = (step3_data_t *) outgoing[0]->p.user_data; h = (double) P4EST_QUADRANT_LEN (outgoing[0]->level) / (double) P4EST_ROOT_LEN; for (i = 0; i < P4EST_CHILDREN; i++) { child_data = (step3_data_t *) incoming[i]->p.user_data; child_data->u = parent_data->u; for (j = 0; j < P4EST_DIM; j++) { child_data->du[j] = parent_data->du[j]; child_data->u += (h / 4.) * parent_data->du[j] * ((i & (1 << j)) ? 1. : -1); } } } } /** Callback function for interpolating the solution from quadrant midpoints to * corners. * * The function p4est_iterate() takes as an argument a p4est_iter_volume_t * callback function, which it executes at every local quadrant (see * p4est_iterate.h). This function matches the p4est_iter_volume_t prototype. * * In this example, we use the callback function to interpolate the state * variable to the corners, and write those corners into an array so that they * can be written out. * * \param [in] info the information about this quadrant that has been * populated by p4est_iterate() * \param [in,out] user_data the user_data that was given as an argument to * p4est_iterate: in this case, it points to the * array of corner values that we want to write. * The values for the corner of the quadrant * described by \a info are written during the * execution of the callback. */ static void step3_interpolate_solution (p4est_iter_volume_info_t * info, void *user_data) { sc_array_t *u_interp = (sc_array_t *) user_data; /* we passed the array of values to fill as the user_data in the call to p4est_iterate */ p4est_t *p4est = info->p4est; p4est_quadrant_t *q = info->quad; p4est_topidx_t which_tree = info->treeid; p4est_locidx_t local_id = info->quadid; /* this is the index of q *within its tree's numbering*. We want to convert it its index for all the quadrants on this process, which we do below */ p4est_tree_t *tree; step3_data_t *data = (step3_data_t *) q->p.user_data; double h; p4est_locidx_t arrayoffset; double this_u; double *this_u_ptr; int i, j; tree = p4est_tree_array_index (p4est->trees, which_tree); local_id += tree->quadrants_offset; /* now the id is relative to the MPI process */ arrayoffset = P4EST_CHILDREN * local_id; /* each local quadrant has 2^d (P4EST_CHILDREN) values in u_interp */ h = (double) P4EST_QUADRANT_LEN (q->level) / (double) P4EST_ROOT_LEN; for (i = 0; i < P4EST_CHILDREN; i++) { this_u = data->u; /* loop over the derivative components and linearly interpolate from the * midpoint to the corners */ for (j = 0; j < P4EST_DIM; j++) { /* In order to know whether the direction from the midpoint to the corner is * negative or positive, we take advantage of the fact that the corners * are in z-order. If i is an odd number, it is on the +x side; if it * is even, it is on the -x side. If (i / 2) is an odd number, it is on * the +y side, etc. */ this_u += (h / 2) * data->du[j] * ((i & (1 << j)) ? 1. : -1.); } this_u_ptr = (double *) sc_array_index (u_interp, arrayoffset + i); this_u_ptr[0] = this_u; } } /** Write the state variable to vtk format, one file per process. * * \param [in] p4est the forest, whose quadrant data contains the state * \param [in] timestep the timestep number, used to name the output files */ static void step3_write_solution (cuda4est_t * cuda4est, int timestep) { p4est_t *p4est = cuda4est->p4est; char filename[BUFSIZ] = ""; int retval; sc_array_t *u_interp; p4est_locidx_t numquads; p4est_vtk_context_t *context; snprintf (filename, BUFSIZ, P4EST_STRING "_step3_%04d", timestep); numquads = p4est->local_num_quadrants; /* create a vector with one value for the corner of every local quadrant * (the number of children is always the same as the number of corners) */ u_interp = sc_array_new_size (sizeof (double), numquads * P4EST_CHILDREN); /* Use the iterator to visit every cell and fill in the solution values. * Using the iterator is not absolutely necessary in this case: we could * also loop over every tree (there is only one tree in this case) and loop * over every quadrant within every tree, but we are trying to demonstrate * the usage of p4est_iterate in this example */ p4est_iterate (p4est, NULL, /* we don't need any ghost quadrants for this loop */ (void *) u_interp, /* pass in u_interp so that we can fill it */ step3_interpolate_solution, /* callback function that interpolates from the cell center to the cell corners, defined above */ NULL, /* there is no callback for the faces between quadrants */ #ifdef P4_TO_P8 NULL, /* there is no callback for the edges between quadrants */ #endif NULL); /* there is no callback for the corners between quadrants */ /* create VTK output context and set its parameters */ context = p4est_vtk_context_new (p4est, filename); p4est_vtk_context_set_scale (context, 0.99); /* quadrant at almost full scale */ /* begin writing the output files */ context = p4est_vtk_write_header (context); SC_CHECK_ABORT (context != NULL, P4EST_STRING "_vtk: Error writing vtk header"); /* do not write the tree id's of each quadrant * (there is only one tree in this example) */ context = p4est_vtk_write_cell_dataf (context, 0, 1, /* do write the refinement level of each quadrant */ 1, /* do write the mpi process id of each quadrant */ 0, /* do not wrap the mpi rank (if this were > 0, the modulus of the rank relative to this number would be written instead of the rank) */ 0, /* there is no custom cell scalar data. */ 0, /* there is no custom cell vector data. */ context); /* mark the end of the variable cell data. */ SC_CHECK_ABORT (context != NULL, P4EST_STRING "_vtk: Error writing cell data"); /* write one scalar field: the solution value */ context = p4est_vtk_write_point_dataf (context, 1, 0, /* write no vector fields */ "solution", u_interp, context); /* mark the end of the variable cell data. */ SC_CHECK_ABORT (context != NULL, P4EST_STRING "_vtk: Error writing cell data"); retval = p4est_vtk_write_footer (context); SC_CHECK_ABORT (!retval, P4EST_STRING "_vtk: Error writing footer"); sc_array_destroy (u_interp); } /** Approximate the divergence of (vu) on each quadrant * * We use piecewise constant approximations on each quadrant, so the value is * always 0. * * Like step3_interpolate_solution(), this function matches the * p4est_iter_volume_t prototype used by p4est_iterate(). * * \param [in] info the information about the quadrant populated by * p4est_iterate() * \param [in] user_data not used */ static void step3_quad_divergence (p4est_iter_volume_info_t * info, void *user_data) { p4est_quadrant_t *q = info->quad; step3_data_t *data = (step3_data_t *) q->p.user_data; data->dudt = 0.; } /** Approximate the flux across a boundary between quadrants. * * We use a very simple upwind numerical flux. * * This function matches the p4est_iter_face_t prototype used by * p4est_iterate(). * * \param [in] info the information about the quadrants on either side of the * interface, populated by p4est_iterate() * \param [in] user_data the user_data given to p4est_iterate(): in this case, * it points to the ghost_data array, which contains the * step3_data_t data for all of the ghost cells, which * was populated by p4est_ghost_exchange_data() */ static void step3_upwind_flux (p4est_iter_face_info_t * info, void *user_data) { int i, j; p4est_t *p4est = info->p4est; step3_ctx_t *ctx = (step3_ctx_t *) p4est->user_pointer; step3_data_t *ghost_data = (step3_data_t *) user_data; step3_data_t *udata; p4est_quadrant_t *quad; double vdotn = 0.; double uavg; double q; double h, facearea; int which_face; int upwindside; p4est_iter_face_side_t *side[2]; sc_array_t *sides = &(info->sides); /* because there are no boundaries, every face has two sides */ P4EST_ASSERT (sides->elem_count == 2); side[0] = p4est_iter_fside_array_index_int (sides, 0); side[1] = p4est_iter_fside_array_index_int (sides, 1); /* which of the quadrant's faces the interface touches */ which_face = side[0]->face; switch (which_face) { case 0: /* -x side */ vdotn = -ctx->v[0]; break; case 1: /* +x side */ vdotn = ctx->v[0]; break; case 2: /* -y side */ vdotn = -ctx->v[1]; break; case 3: /* +y side */ vdotn = ctx->v[1]; break; #ifdef P4_TO_P8 case 4: /* -z side */ vdotn = -ctx->v[2]; break; case 5: /* +z side */ vdotn = ctx->v[2]; break; #endif } upwindside = vdotn >= 0. ? 0 : 1; /* Because we have non-conforming boundaries, one side of an interface can * either have one large ("full") quadrant or 2^(d-1) small ("hanging") * quadrants: we have to compute the average differently in each case. The * info populated by p4est_iterate() gives us the context we need to * proceed. */ uavg = 0; if (side[upwindside]->is_hanging) { /* there are 2^(d-1) (P4EST_HALF) subfaces */ for (j = 0; j < P4EST_HALF; j++) { if (side[upwindside]->is.hanging.is_ghost[j]) { /* *INDENT-OFF* */ udata = (step3_data_t *) &ghost_data[side[upwindside]->is.hanging.quadid[j]]; /* *INDENT-ON* */ } else { udata = (step3_data_t *) side[upwindside]->is.hanging.quad[j]->p.user_data; } uavg += udata->u; } uavg /= P4EST_HALF; } else { if (side[upwindside]->is.full.is_ghost) { udata = (step3_data_t *) & ghost_data[side[upwindside]->is.full.quadid]; } else { udata = (step3_data_t *) side[upwindside]->is.full.quad->p.user_data; } uavg = udata->u; } /* flux from side 0 to side 1 */ q = vdotn * uavg; for (i = 0; i < 2; i++) { if (side[i]->is_hanging) { /* there are 2^(d-1) (P4EST_HALF) subfaces */ for (j = 0; j < P4EST_HALF; j++) { quad = side[i]->is.hanging.quad[j]; h = (double) P4EST_QUADRANT_LEN (quad->level) / (double) P4EST_ROOT_LEN; #ifndef P4_TO_P8 facearea = h; #else facearea = h * h; #endif if (!side[i]->is.hanging.is_ghost[j]) { udata = (step3_data_t *) quad->p.user_data; if (i == upwindside) { udata->dudt += vdotn * udata->u * facearea * (i ? 1. : -1.); } else { udata->dudt += q * facearea * (i ? 1. : -1.); } } } } else { quad = side[i]->is.full.quad; h = (double) P4EST_QUADRANT_LEN (quad->level) / (double) P4EST_ROOT_LEN; #ifndef P4_TO_P8 facearea = h; #else facearea = h * h; #endif if (!side[i]->is.full.is_ghost) { udata = (step3_data_t *) quad->p.user_data; udata->dudt += q * facearea * (i ? 1. : -1.); } } } } /** Compute the new value of the state from the computed time derivative. * * We use a simple forward Euler scheme. * * The derivative was computed by a p4est_iterate() loop by the callbacks * step3_quad_divergence() and step3_upwind_flux(). Now we multiply this by * the timestep and add to the current solution. * * This function matches the p4est_iter_volume_t prototype used by * p4est_iterate(). * * \param [in] info the information about this quadrant that has been * populated by p4est_iterate() * \param [in] user_data the user_data given to p4est_iterate(): in this case, * it points to the timestep. */ static void step3_timestep_update (p4est_iter_volume_info_t * info, void *user_data) { p4est_quadrant_t *q = info->quad; step3_data_t *data = (step3_data_t *) q->p.user_data; double dt = *((double *) user_data); double vol; double h = (double) P4EST_QUADRANT_LEN (q->level) / (double) P4EST_ROOT_LEN; #ifdef P4_TO_P8 vol = h * h * h; #else vol = h * h; #endif data->u += dt * data->dudt / vol; } /** Reset the approximate derivatives. * * p4est_iterate() has an invariant to the order of callback execution: the * p4est_iter_volume_t callback will be executed on a quadrant before the * p4est_iter_face_t callbacks are executed on its faces. This function * resets the derivative stored in the quadrant's data before * step3_minmod_estimate() updates the derivative based on the face neighbors. * * This function matches the p4est_iter_volume_t prototype used by * p4est_iterate(). * * \param [in] info the information about this quadrant that has been * populated by p4est_iterate() * \param [in] user_data not used */ static void step3_reset_derivatives (p4est_iter_volume_info_t * info, void *user_data) { p4est_quadrant_t *q = info->quad; step3_data_t *data = (step3_data_t *) q->p.user_data; int j; for (j = 0; j < P4EST_DIM; j++) { data->du[j] = step3_invalid; } } // compute max __device__ void step3_cuda_reset_derivatives ( p4est_t *p4est, p4est_ghost_t *ghost_layer, p4est_quadrant_t *quad, p4est_locidx_t quadid, p4est_topidx_t treeid, void *user_data ) { p4est_quadrant_t *q = quad; step3_data_t *data = (step3_data_t *) q->p.user_data; int j; //printf("step3_invalid: %f\n", step3_invalid); for (j = 0; j < P4EST_DIM; j++) { data->du[j] = step3_invalid; } } __global__ void setup_step3_cuda_reset_derivatives_kernel(cuda_iter_volume_t *callback) { *callback = step3_cuda_reset_derivatives; } /** For two quadrants on either side of a face, estimate the derivative normal * to the face. * * This function matches the p4est_iter_face_t prototype used by * p4est_iterate(). * * \param [in] info the information about this quadrant that has been * populated by p4est_iterate() * \param [in] user_data the user_data given to p4est_iterate(): in this case, * it points to the ghost_data array, which contains the * step3_data_t data for all of the ghost cells, which * was populated by p4est_ghost_exchange_data() */ static void step3_minmod_estimate (p4est_iter_face_info_t * info, void *user_data) { int i, j; p4est_iter_face_side_t *side[2]; sc_array_t *sides = &(info->sides); step3_data_t *ghost_data = (step3_data_t *) user_data; step3_data_t *udata; p4est_quadrant_t *quad; double uavg[2]; double h[2]; double du_est, du_old; int which_dir; /* because there are no boundaries, every face has two sides */ P4EST_ASSERT (sides->elem_count == 2); side[0] = p4est_iter_fside_array_index_int (sides, 0); side[1] = p4est_iter_fside_array_index_int (sides, 1); which_dir = side[0]->face / 2; /* 0 == x, 1 == y, 2 == z */ for (i = 0; i < 2; i++) { uavg[i] = 0; if (side[i]->is_hanging) { /* there are 2^(d-1) (P4EST_HALF) subfaces */ for (j = 0; j < P4EST_HALF; j++) { quad = side[i]->is.hanging.quad[j]; h[i] = (double) P4EST_QUADRANT_LEN (quad->level) / (double) P4EST_ROOT_LEN; if (side[i]->is.hanging.is_ghost[j]) { udata = &ghost_data[side[i]->is.hanging.quadid[j]]; } else { udata = (step3_data_t *) side[i]->is.hanging.quad[j]->p.user_data; } uavg[i] += udata->u; } uavg[i] /= P4EST_HALF; } else { quad = side[i]->is.full.quad; h[i] = (double) P4EST_QUADRANT_LEN (quad->level) / (double) P4EST_ROOT_LEN; if (side[i]->is.full.is_ghost) { udata = &ghost_data[side[i]->is.full.quadid]; } else { udata = (step3_data_t *) side[i]->is.full.quad->p.user_data; } uavg[i] = udata->u; } } du_est = (uavg[1] - uavg[0]) / ((h[0] + h[1]) / 2.); for (i = 0; i < 2; i++) { if (side[i]->is_hanging) { /* there are 2^(d-1) (P4EST_HALF) subfaces */ for (j = 0; j < P4EST_HALF; j++) { quad = side[i]->is.hanging.quad[j]; if (!side[i]->is.hanging.is_ghost[j]) { udata = (step3_data_t *) quad->p.user_data; du_old = udata->du[which_dir]; if (du_old == du_old) { /* there has already been an update */ if (du_est * du_old >= 0.) { if (fabs (du_est) < fabs (du_old)) { udata->du[which_dir] = du_est; } } else { udata->du[which_dir] = 0.; } } else { udata->du[which_dir] = du_est; } } } } else { quad = side[i]->is.full.quad; if (!side[i]->is.full.is_ghost) { udata = (step3_data_t *) quad->p.user_data; du_old = udata->du[which_dir]; if (du_old == du_old) { /* there has already been an update */ if (du_est * du_old >= 0.) { if (fabs (du_est) < fabs (du_old)) { udata->du[which_dir] = du_est; } } else { udata->du[which_dir] = 0.; } } else { udata->du[which_dir] = du_est; } } } } } __device__ void step3_cuda_minmod_estimate ( p4est_t* p4est, p4est_ghost_t* ghost_layer, p4est_iter_face_side_t* side, void *user_data) { int i, j; //p4est_iter_face_side_t *side[2]; //sc_array_t *sides = &(info->sides); step3_data_t *ghost_data = (step3_data_t *) user_data; step3_data_t *udata; p4est_quadrant_t *quad; double uavg[2]; double h[2]; double du_est, du_old; int which_dir; /* because there are no boundaries, every face has two sides */ //P4EST_ASSERT (sides->elem_count == 2); //side[0] = p4est_iter_fside_array_index_int (sides, 0); //side[1] = p4est_iter_fside_array_index_int (sides, 1); which_dir = side[0].face / 2; /* 0 == x, 1 == y, 2 == z */ for (i = 0; i < 2; i++) { uavg[i] = 0; if (side[i].is_hanging) { /* there are 2^(d-1) (P4EST_HALF) subfaces */ for (j = 0; j < P4EST_DEVICE_HALF; j++) { quad = side[i].is.hanging.quad[j]; h[i] = (double) P4EST_DEVICE_QUADRANT_LEN (quad->level) / (double) P4EST_DEVICE_ROOT_LEN; if (side[i].is.hanging.is_ghost[j]) { udata = &ghost_data[side[i].is.hanging.quadid[j]]; } else { udata = (step3_data_t *) side[i].is.hanging.quad[j]->p.user_data; } uavg[i] += udata->u; } uavg[i] /= P4EST_DEVICE_HALF; } else { quad = side[i].is.full.quad; h[i] = (double) P4EST_DEVICE_QUADRANT_LEN (quad->level) / (double) P4EST_DEVICE_ROOT_LEN; if (side[i].is.full.is_ghost) { udata = &ghost_data[side[i].is.full.quadid]; } else { udata = (step3_data_t *) side[i].is.full.quad->p.user_data; } uavg[i] = udata->u; } } du_est = (uavg[1] - uavg[0]) / ((h[0] + h[1]) / 2.); for (i = 0; i < 2; i++) { if (side[i].is_hanging) { /* there are 2^(d-1) (P4EST_DEVICE_HALF) subfaces */ for (j = 0; j < P4EST_DEVICE_HALF; j++) { quad = side[i].is.hanging.quad[j]; if (!side[i].is.hanging.is_ghost[j]) { udata = (step3_data_t *) quad->p.user_data; du_old = udata->du[which_dir]; if (du_old == du_old) { /* there has already been an update */ if (du_est * du_old >= 0.) { if (fabs (du_est) < fabs (du_old)) { udata->du[which_dir] = du_est; } } else { udata->du[which_dir] = 0.; } } else { udata->du[which_dir] = du_est; } } } } else { quad = side[i].is.full.quad; if (!side[i].is.full.is_ghost) { udata = (step3_data_t *) quad->p.user_data; du_old = udata->du[which_dir]; if (du_old == du_old) { /* there has already been an update */ if (du_est * du_old >= 0.) { if (fabs (du_est) < fabs (du_old)) { udata->du[which_dir] = du_est; } } else { udata->du[which_dir] = 0.; } } else { udata->du[which_dir] = du_est; } } } } } __global__ void setup_step3_cuda_minmod_estimate_kernel(cuda_iter_face_t *callback) { *callback = step3_cuda_minmod_estimate; } /** Compute the maximum state value. * * This function updates the maximum value from the value of a single cell. * * This function matches the p4est_iter_volume_t prototype used by * p4est_iterate(). * * \param [in] info the information about this quadrant that has been * populated by p4est_iterate() * \param [in,out] user_data the user_data given to p4est_iterate(): in this case, * it points to the maximum value that will be updated */ static void step3_compute_max (p4est_iter_volume_info_t * info, void *user_data) { p4est_quadrant_t *q = info->quad; step3_data_t *data = (step3_data_t *) q->p.user_data; double umax = *((double *) user_data); umax = SC_MAX (data->u, umax); *((double *) user_data) = umax; } __device__ static double atomicMax(double* address, double val) { unsigned long long int* address_as_i = (unsigned long long int*) address; unsigned long long int old = *address_as_i, assumed; do { assumed = old; old = ::atomicCAS(address_as_i, assumed, __double_as_longlong(::fmax(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } // compute max __device__ void step3_cuda_compute_max ( p4est_t *p4est, p4est_ghost_t *ghost_layer, p4est_quadrant_t *quad, p4est_locidx_t quadid, p4est_topidx_t treeid, void *user_data ) { p4est_quadrant_t *q = quad; step3_data_t *data = (step3_data_t *) q->p.user_data; // i don't know printf(""); atomicMax((double *)user_data, data->u); } __global__ void setup_step3_cuda_compute_max_kernel(cuda_iter_volume_t *callback) { *callback = step3_cuda_compute_max; } void step3_compute_max_alloc_cuda_memory(user_data_for_cuda_t* user_data_api) { step3_compute_max_user_data_to_cuda_t *user_data_to_cuda = (step3_compute_max_user_data_to_cuda_t*) malloc(sizeof(step3_compute_max_user_data_to_cuda_t)); double *user_data = (double*) user_data_api->user_data; double *d_compute_max_user_data; gpuErrchk(hipMalloc((void**)&d_compute_max_user_data, sizeof(double))); gpuErrchk(hipMemcpy(d_compute_max_user_data, user_data, sizeof(double), hipMemcpyHostToDevice)); user_data_to_cuda->d_user_data = d_compute_max_user_data; user_data_api->cuda_memory_allocating_info = user_data_to_cuda; } void step3_compute_max_free_cuda_memory(user_data_for_cuda_t* user_data_api) { step3_compute_max_user_data_to_cuda_t *allocate_info = (step3_compute_max_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; gpuErrchk(hipFree(allocate_info->d_user_data)); } void* step3_compute_max_get_cuda_allocated_user_data(user_data_for_cuda_t* user_data_api) { step3_compute_max_user_data_to_cuda_t *allocate_info = (step3_compute_max_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; return (void*) allocate_info->d_user_data; } void step3_compute_max_copy_user_data_from_device(user_data_for_cuda_t* user_data_api) { step3_compute_max_user_data_to_cuda_t *allocate_info = (step3_compute_max_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; gpuErrchk(hipMemcpy(user_data_api->user_data, allocate_info->d_user_data, sizeof(double), hipMemcpyDeviceToHost)); } // compute max // timestep update __device__ void step3_cuda_timestep_update ( p4est_t *p4est, p4est_ghost_t *ghost_layer, p4est_quadrant_t *quad, p4est_locidx_t quadid, p4est_topidx_t treeid, void *user_data ) { p4est_quadrant_t *q = quad; step3_data_t *data = (step3_data_t *) q->p.user_data; double dt = *((double *) user_data); double vol; double h = (double) P4EST_QUADRANT_LEN (q->level) / (double) P4EST_ROOT_LEN; #ifdef P4_TO_P8 vol = h * h * h; #else vol = h * h; #endif data->u += dt * data->dudt / vol; } __global__ void setup_step3_cuda_timestep_update_kernel(cuda_iter_volume_t *callback) { *callback = step3_cuda_timestep_update; } __device__ void step3_cuda_quad_divergence ( p4est_t *p4est, p4est_ghost_t *ghost_layer, p4est_quadrant_t *quad, p4est_locidx_t quadid, p4est_topidx_t treeid, void *user_data ) { p4est_quadrant_t *q = quad; step3_data_t *data = (step3_data_t *) q->p.user_data; data->dudt = 0.; } __global__ void setup_step3_cuda_quad_divergence_kernel(cuda_iter_volume_t *callback) { *callback = step3_cuda_quad_divergence; } __device__ void step3_cuda_upwind_flux ( p4est_t* p4est, p4est_ghost_t* ghost_layer, p4est_iter_face_side_t* side, void *user_data) { int i, j; step3_ctx_t *ctx = (step3_ctx_t *) p4est->user_pointer; step3_data_t *ghost_data = (step3_data_t *) user_data; step3_data_t *udata; p4est_quadrant_t *quad; double vdotn = 0.; double uavg; double q; double h, facearea; int which_face; int upwindside; /* because there are no boundaries, every face has two sides */ //P4EST_ASSERT (sides->elem_count == 2); /* which of the quadrant's faces the interface touches */ which_face = side[0].face; switch (which_face) { case 0: /* -x side */ vdotn = -ctx->v[0]; break; case 1: /* +x side */ vdotn = ctx->v[0]; break; case 2: /* -y side */ vdotn = -ctx->v[1]; break; case 3: /* +y side */ vdotn = ctx->v[1]; break; #ifdef P4_TO_P8 case 4: /* -z side */ vdotn = -ctx->v[2]; break; case 5: /* +z side */ vdotn = ctx->v[2]; break; #endif } upwindside = vdotn >= 0. ? 0 : 1; /* Because we have non-conforming boundaries, one side of an interface can * either have one large ("full") quadrant or 2^(d-1) small ("hanging") * quadrants: we have to compute the average differently in each case. The * info populated by p4est_iterate() gives us the context we need to * proceed. */ uavg = 0; if (side[upwindside].is_hanging) { /* there are 2^(d-1) (P4EST_HALF) subfaces */ for (j = 0; j < P4EST_DEVICE_HALF; j++) { if (side[upwindside].is.hanging.is_ghost[j]) { /* *INDENT-OFF* */ udata = (step3_data_t *) &ghost_data[side[upwindside].is.hanging.quadid[j]]; /* *INDENT-ON* */ } else { udata = (step3_data_t *) side[upwindside].is.hanging.quad[j]->p.user_data; } uavg += udata->u; } uavg /= P4EST_DEVICE_HALF; } else { if (side[upwindside].is.full.is_ghost) { udata = (step3_data_t *) & ghost_data[side[upwindside].is.full.quadid]; } else { udata = (step3_data_t *) side[upwindside].is.full.quad->p.user_data; } uavg = udata->u; } /* flux from side 0 to side 1 */ q = vdotn * uavg; for (i = 0; i < 2; i++) { if (side[i].is_hanging) { /* there are 2^(d-1) (P4EST_HALF) subfaces */ for (j = 0; j < P4EST_DEVICE_HALF; j++) { quad = side[i].is.hanging.quad[j]; h = (double) P4EST_DEVICE_QUADRANT_LEN (quad->level) / (double) P4EST_DEVICE_ROOT_LEN; #ifndef P4_TO_P8 facearea = h; #else facearea = h * h; #endif if (!side[i].is.hanging.is_ghost[j]) { udata = (step3_data_t *) quad->p.user_data; if (i == upwindside) { udata->dudt += vdotn * udata->u * facearea * (i ? 1. : -1.); } else { udata->dudt += q * facearea * (i ? 1. : -1.); } } } } else { quad = side[i].is.full.quad; h = (double) P4EST_DEVICE_QUADRANT_LEN (quad->level) / (double) P4EST_DEVICE_ROOT_LEN; #ifndef P4_TO_P8 facearea = h; #else facearea = h * h; #endif if (!side[i].is.full.is_ghost) { udata = (step3_data_t *) quad->p.user_data; udata->dudt += q * facearea * (i ? 1. : -1.); } } } } __global__ void setup_step3_cuda_upwind_flux_kernel(cuda_iter_face_t *callback) { *callback = step3_cuda_upwind_flux; } void step3_timestep_update_alloc_cuda_memory(user_data_for_cuda_t* user_data_api) { step3_timestep_update_user_data_to_cuda_t *user_data_to_cuda = (step3_timestep_update_user_data_to_cuda_t*) malloc(sizeof(step3_timestep_update_user_data_to_cuda_t)); double *user_data = (double*) user_data_api->user_data; double *d_timestep_update_user_data; gpuErrchk(hipMalloc((void**)&d_timestep_update_user_data, sizeof(double))); gpuErrchk(hipMemcpy(d_timestep_update_user_data, user_data, sizeof(double), hipMemcpyHostToDevice)); user_data_to_cuda->d_user_data = d_timestep_update_user_data; user_data_api->cuda_memory_allocating_info = user_data_to_cuda; } void step3_timestep_update_free_cuda_memory(user_data_for_cuda_t* user_data_api) { step3_timestep_update_user_data_to_cuda_t *allocate_info = (step3_timestep_update_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; gpuErrchk(hipFree(allocate_info->d_user_data)); } void* step3_timestep_update_get_cuda_allocated_user_data(user_data_for_cuda_t* user_data_api) { step3_timestep_update_user_data_to_cuda_t *allocate_info = (step3_timestep_update_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; return (void*) allocate_info->d_user_data; } void step3_timestep_update_copy_user_data_from_device(user_data_for_cuda_t* user_data_api) { step3_timestep_update_user_data_to_cuda_t *allocate_info = (step3_timestep_update_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; gpuErrchk(hipMemcpy(user_data_api->user_data, allocate_info->d_user_data, sizeof(double), hipMemcpyDeviceToHost)); } void step3_ghost_data_alloc_cuda_memory(user_data_for_cuda_t* user_data_api) { step3_ghost_data_user_data_to_cuda_t *user_data_to_cuda = (step3_ghost_data_user_data_to_cuda_t*) malloc(sizeof(step3_ghost_data_user_data_to_cuda_t)); step3_data_t *user_data = (step3_data_t*) user_data_api->user_data; step3_data_t *d_ghost_data_user_data; size_t alloc_memory_size = user_data_api->user_data_elem_count * sizeof(step3_data_t); gpuErrchk(hipMalloc((void**)&d_ghost_data_user_data, alloc_memory_size)); gpuErrchk(hipMemcpy(d_ghost_data_user_data, user_data, alloc_memory_size, hipMemcpyHostToDevice)); user_data_to_cuda->d_user_data = d_ghost_data_user_data; user_data_api->cuda_memory_allocating_info = user_data_to_cuda; } void step3_ghost_data_free_cuda_memory(user_data_for_cuda_t* user_data_api) { step3_ghost_data_user_data_to_cuda_t *allocate_info = (step3_ghost_data_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; if(allocate_info->d_user_data) { gpuErrchk(hipFree(allocate_info->d_user_data)); } } void* step3_ghost_data_get_cuda_allocated_user_data(user_data_for_cuda_t* user_data_api) { step3_ghost_data_user_data_to_cuda_t *allocate_info = (step3_ghost_data_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; return (void*) allocate_info->d_user_data; } void step3_ghost_data_copy_user_data_from_device(user_data_for_cuda_t* user_data_api) { if(user_data_api->user_data_elem_count) { step3_ghost_data_user_data_to_cuda_t *allocate_info = (step3_ghost_data_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; gpuErrchk(hipMemcpy(user_data_api->user_data, allocate_info->d_user_data, sizeof(double), hipMemcpyDeviceToHost)); } } // timestep update /** Compute the timestep. * * Find the smallest quadrant and scale the timestep based on that length and * the advection velocity. * * \param [in] p4est the forest * \return the timestep. */ static double step3_get_timestep (p4est_t * p4est) { step3_ctx_t *ctx = (step3_ctx_t *) p4est->user_pointer; p4est_topidx_t t, flt, llt; p4est_tree_t *tree; int max_level, global_max_level; int mpiret, i; double min_h, vnorm; double dt; /* compute the timestep by finding the smallest quadrant */ flt = p4est->first_local_tree; llt = p4est->last_local_tree; max_level = 0; for (t = flt; t <= llt; t++) { tree = p4est_tree_array_index (p4est->trees, t); max_level = SC_MAX (max_level, tree->maxlevel); } mpiret = sc_MPI_Allreduce (&max_level, &global_max_level, 1, sc_MPI_INT, sc_MPI_MAX, p4est->mpicomm); SC_CHECK_MPI (mpiret); min_h = (double) P4EST_QUADRANT_LEN (global_max_level) / (double) P4EST_ROOT_LEN; vnorm = 0; for (i = 0; i < P4EST_DIM; i++) { vnorm += ctx->v[i] * ctx->v[i]; } vnorm = sqrt (vnorm); dt = min_h / 2. / vnorm; return dt; } /** Timestep the advection problem. * * Update the state, refine, repartition, and write the solution to file. * * \param [in,out] p4est the forest, whose state is updated * \param [in] time the end time */ static void step3_timestep (cuda4est_t *cuda4est, double time) { double ghost_allocation = 0; double p4est_reallocation = 0; double quadrants_reallocation = 0; double faces_reallocation = 0; double reset_derivatives_running = 0; double compute_max_running = 0; double flux_compute_running = 0; double timestep_update_running = 0; double downloading_quads = 0; bool quadrants_is_fresh = false; clock_t start = clock(); clock_t stop = clock(); double duration = (double)(stop - start) / CLOCKS_PER_SEC; p4est_t * p4est = cuda4est->p4est; double t = 0.; double dt = 0.; int i; step3_data_t *ghost_data; step3_ctx_t *ctx = (step3_ctx_t *) p4est->user_pointer; int refine_period = ctx->refine_period; int repartition_period = ctx->repartition_period; int write_period = ctx->write_period; int recursive = 0; int allowed_level = P4EST_QMAXLEVEL; int allowcoarsening = 1; int callbackorphans = 0; int mpiret; double orig_max_err = ctx->max_err; double umax, global_umax; p4est_ghost_t *ghost; cuda_iter_volume_api_t *step3_cuda_compute_max_api = (cuda_iter_volume_api_t*)malloc(sizeof(cuda_iter_volume_api_t)); step3_cuda_compute_max_api->callback = step3_cuda_compute_max; step3_cuda_compute_max_api->setup_kernel = setup_step3_cuda_compute_max_kernel; user_data_for_cuda_t *step3_user_data_api_compute_max = (user_data_for_cuda_t*) malloc(sizeof(user_data_for_cuda_t)); step3_user_data_api_compute_max->user_data = &umax; step3_user_data_api_compute_max->alloc_cuda_memory = step3_compute_max_alloc_cuda_memory; step3_user_data_api_compute_max->free_cuda_memory = step3_compute_max_free_cuda_memory; step3_user_data_api_compute_max->get_cuda_allocated_user_data = step3_compute_max_get_cuda_allocated_user_data; step3_user_data_api_compute_max->copy_user_data_from_device = step3_compute_max_copy_user_data_from_device; cuda_iter_volume_api_t *step3_cuda_timestep_update_api = (cuda_iter_volume_api_t*)malloc(sizeof(cuda_iter_volume_api_t)); step3_cuda_timestep_update_api->callback = step3_cuda_timestep_update; step3_cuda_timestep_update_api->setup_kernel = setup_step3_cuda_timestep_update_kernel; user_data_for_cuda_t *step3_user_data_api_timestep_update = (user_data_for_cuda_t*) malloc(sizeof(user_data_for_cuda_t)); step3_user_data_api_timestep_update->user_data = &dt; step3_user_data_api_timestep_update->alloc_cuda_memory = step3_timestep_update_alloc_cuda_memory; step3_user_data_api_timestep_update->free_cuda_memory = step3_timestep_update_free_cuda_memory; step3_user_data_api_timestep_update->get_cuda_allocated_user_data = step3_timestep_update_get_cuda_allocated_user_data; step3_user_data_api_timestep_update->copy_user_data_from_device = step3_timestep_update_copy_user_data_from_device; cuda_iter_volume_api_t *step3_cuda_quad_divergence_api = (cuda_iter_volume_api_t*)malloc(sizeof(cuda_iter_volume_api_t)); step3_cuda_quad_divergence_api->callback= step3_cuda_quad_divergence; step3_cuda_quad_divergence_api->setup_kernel = setup_step3_cuda_quad_divergence_kernel; cuda_iter_face_api_t *step3_cuda_upwind_flux_api = (cuda_iter_face_api_t*)malloc(sizeof(cuda_iter_face_api_t)); step3_cuda_upwind_flux_api->callback = step3_cuda_upwind_flux; step3_cuda_upwind_flux_api->setup_kernel = setup_step3_cuda_upwind_flux_kernel; user_data_for_cuda_t *step3_user_data_api_ghost_data = (user_data_for_cuda_t*)malloc(sizeof(user_data_for_cuda_t)); step3_user_data_api_ghost_data->user_data_elem_count = 0; step3_user_data_api_ghost_data->alloc_cuda_memory = step3_ghost_data_alloc_cuda_memory; step3_user_data_api_ghost_data->free_cuda_memory = step3_ghost_data_free_cuda_memory; step3_user_data_api_ghost_data->get_cuda_allocated_user_data = step3_ghost_data_get_cuda_allocated_user_data; step3_user_data_api_ghost_data->copy_user_data_from_device = step3_ghost_data_copy_user_data_from_device; cuda_iter_volume_api_t *step3_cuda_reset_derivatives_api = (cuda_iter_volume_api_t*)malloc(sizeof(cuda_iter_volume_api_t)); step3_cuda_reset_derivatives_api->callback = step3_cuda_reset_derivatives; step3_cuda_reset_derivatives_api->setup_kernel = setup_step3_cuda_reset_derivatives_kernel; cuda_iter_face_api_t *step3_cuda_minmod_estimate_api = (cuda_iter_face_api_t*)malloc(sizeof(cuda_iter_face_api_t)); step3_cuda_minmod_estimate_api->callback = step3_cuda_minmod_estimate; step3_cuda_minmod_estimate_api->setup_kernel = setup_step3_cuda_minmod_estimate_kernel; /* create the ghost quadrants */ ghost = p4est_ghost_new (p4est, P4EST_CONNECT_FULL); /* create space for storing the ghost data */ ghost_data = P4EST_ALLOC (step3_data_t, ghost->ghosts.elem_count); /* synchronize the ghost data */ p4est_ghost_exchange_data (p4est, ghost, ghost_data); start = clock(); p4est_ghost_to_cuda_t* malloc_ghost = mallocForGhost(p4est, ghost); exchangeGhostDataToCuda(malloc_ghost, ghost); cuda4est->ghost_to_cuda = malloc_ghost; step3_user_data_api_ghost_data->user_data = ghost_data; step3_user_data_api_ghost_data->user_data_elem_count = ghost->ghosts.elem_count; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; ghost_allocation+=duration; // p4est memory allocation start start = clock(); p4est_cuda_memory_allocate_info_t *p4est_memory_allocate_info = p4est_memory_alloc(cuda4est); cuda4est->p4est_memory_allocate_info = p4est_memory_allocate_info; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; p4est_reallocation+=duration; // p4est memory allocation end // quadrants memory allocation start sc_array_t *trees = p4est->trees; p4est_tree_t *tree; sc_array_t *quadrants; start = clock(); tree = p4est_tree_array_index (trees, p4est->first_local_tree); quadrants = &(tree->quadrants); p4est_quadrants_to_cuda_t *quads_to_cuda = mallocForQuadrants(cuda4est, quadrants, cuda4est->quad_user_data_api); cuda4est->quads_to_cuda = quads_to_cuda; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; quadrants_reallocation+=duration; start = clock(); mallocFacesSides(cuda4est, quadrants, quads_to_cuda, ghost, malloc_ghost); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; faces_reallocation+=duration; // quadrants memory allocation end start= clock(); cuda_iterate (cuda4est, ghost, (void *) ghost_data, step3_user_data_api_ghost_data, step3_reset_derivatives, step3_cuda_reset_derivatives_api, step3_minmod_estimate, step3_cuda_minmod_estimate_api, #ifdef P4_TO_P8 NULL, #endif NULL); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; reset_derivatives_running+=duration; /* initialize du/dx estimates */ /* p4est_iterate (p4est, ghost, (void *) ghost_data, step3_reset_derivatives, step3_minmod_estimate, #ifdef P4_TO_P8 NULL, #endif NULL); */ quadrants_is_fresh = false; for (t = 0., i = 0; t < time; t += dt, i++) { P4EST_GLOBAL_PRODUCTIONF ("time %f\n", t); /* refine */ if (!(i % refine_period)) { if (i) { start = clock(); if(!quadrants_is_fresh) { downloadQuadrantsFromCuda(quads_to_cuda, quadrants, cuda4est->quad_user_data_api); quadrants_is_fresh = true; } stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; downloading_quads+=duration; /* compute umax */ umax = 0.; /* initialize derivative estimates */ //start = clock(); start=clock(); p4est_iterate (p4est, NULL, (void *) &umax, step3_compute_max, NULL, #ifdef P4_TO_P8 NULL, #endif NULL); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; compute_max_running+=duration; //stop = clock(); //duration = (double)(stop - start) / CLOCKS_PER_SEC; //cout << "Time taken by p4est_find_max: " //<< duration << " seconds" << endl; /* start = clock(); cuda_iterate (cuda4est, NULL, &umax, step3_user_data_api_compute_max, step3_compute_max, step3_cuda_compute_max_api, NULL, NULL, #ifdef P4_TO_P8 NULL, #endif NULL); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; cout << "Time taken by cuda_find_max: " << duration << " seconds" << endl; */ mpiret = sc_MPI_Allreduce (&umax, &global_umax, 1, sc_MPI_DOUBLE, sc_MPI_MAX, p4est->mpicomm); SC_CHECK_MPI (mpiret); ctx->max_err = orig_max_err * global_umax; P4EST_GLOBAL_PRODUCTIONF ("u_max %f\n", global_umax); /* adapt */ p4est_refine_ext (p4est, recursive, allowed_level, step3_refine_err_estimate, NULL, step3_replace_quads); p4est_coarsen_ext (p4est, recursive, callbackorphans, step3_coarsen_err_estimate, NULL, step3_replace_quads); p4est_balance_ext (p4est, P4EST_CONNECT_FACE, NULL, step3_replace_quads); p4est_ghost_destroy (ghost); P4EST_FREE (ghost_data); ghost = NULL; ghost_data = NULL; step3_user_data_api_ghost_data->user_data = ghost_data; step3_user_data_api_ghost_data->user_data_elem_count = 0; // p4est memory reallocation start start = clock(); //p4est_memory_free(p4est_memory_allocate_info, cuda4est->quad_user_data_api); //p4est_memory_allocate_info = p4est_memory_alloc(cuda4est); //cuda4est->p4est_memory_allocate_info = p4est_memory_allocate_info; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; p4est_reallocation+=duration; //cout << "Time taken by p4est_reallocation: " //<< duration << " seconds" << endl; // p4est memory reallocation end // quadrants memory reallocation start start = clock(); freeMemoryForQuadrants(quads_to_cuda, cuda4est->quad_user_data_api); tree = p4est_tree_array_index (trees, p4est->first_local_tree); quadrants = &(tree->quadrants); quads_to_cuda = mallocForQuadrants(cuda4est, quadrants, cuda4est->quad_user_data_api); cuda4est->quads_to_cuda = quads_to_cuda; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; quadrants_reallocation+=duration; //cout << "Time taken by quadrants_reallocation: " //<< duration << " seconds" << endl; start = clock(); mallocFacesSides(cuda4est, quadrants, quads_to_cuda, ghost, malloc_ghost); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; faces_reallocation+=duration; //cout << "Time taken by faces_reallocation: " //<< duration << " seconds" << endl; // quadrants memory reallocation end } dt = step3_get_timestep (p4est); } long revision_before_exchange = p4est->revision; long revision_after_exchange = p4est->revision; /* repartition */ if (i && !(i % repartition_period)) { p4est_partition (p4est, allowcoarsening, NULL); revision_after_exchange = p4est->revision; if(revision_after_exchange != revision_before_exchange) { start=clock(); freeMemoryForQuadrants(quads_to_cuda, cuda4est->quad_user_data_api); tree = p4est_tree_array_index (trees, p4est->first_local_tree); quadrants = &(tree->quadrants); quads_to_cuda = mallocForQuadrants(cuda4est, quadrants, cuda4est->quad_user_data_api); cuda4est->quads_to_cuda = quads_to_cuda; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; quadrants_reallocation+=duration; } if (ghost) { p4est_ghost_destroy (ghost); P4EST_FREE (ghost_data); ghost = NULL; ghost_data = NULL; step3_user_data_api_ghost_data->user_data = ghost_data; step3_user_data_api_ghost_data->user_data_elem_count = 0; } } /* write out solution */ if (!(i % write_period)) { start = clock(); if(!quadrants_is_fresh) { downloadQuadrantsFromCuda(quads_to_cuda, quadrants, cuda4est->quad_user_data_api); quadrants_is_fresh = true; } stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; downloading_quads+=duration; step3_write_solution (cuda4est, i); } /* synchronize the ghost data */ if (!ghost) { ghost = p4est_ghost_new (p4est, P4EST_CONNECT_FULL); ghost_data = P4EST_ALLOC (step3_data_t, ghost->ghosts.elem_count); p4est_ghost_exchange_data (p4est, ghost, ghost_data); start=clock(); freeMemoryForGhost(malloc_ghost); malloc_ghost = mallocForGhost(p4est, ghost); exchangeGhostDataToCuda(malloc_ghost, ghost); cuda4est->ghost_to_cuda = malloc_ghost; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; ghost_allocation+=duration; start = clock(); if(revision_after_exchange == revision_before_exchange) { freeMemoryForFacesSides(quads_to_cuda); } mallocFacesSides(cuda4est, quadrants, quads_to_cuda, ghost, malloc_ghost); step3_user_data_api_ghost_data->user_data = ghost_data; step3_user_data_api_ghost_data->user_data_elem_count = ghost->ghosts.elem_count; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; faces_reallocation+=duration; } // quadrants memory reallocation start //freeMemoryForQuadrants(quads_to_cuda, cuda4est->quad_user_data_api); //tree = p4est_tree_array_index (trees, p4est->first_local_tree); //quadrants = &(tree->quadrants); //quads_to_cuda = mallocForQuadrants(cuda4est, quadrants, cuda4est->quad_user_data_api); //cuda4est->quads_to_cuda = quads_to_cuda; //mallocFacesSides(cuda4est, quadrants, quads_to_cuda, ghost, malloc_ghost); // quadrants memory reallocation end /* compute du/dt */ /* *INDENT-OFF* */ //downloadQuadrantsFromCuda(quads_to_cuda, quadrants, cuda4est->quad_user_data_api); start = clock(); cuda_iterate (cuda4est, ghost, (void *) ghost_data, step3_user_data_api_ghost_data, step3_quad_divergence, step3_cuda_quad_divergence_api, step3_upwind_flux, step3_cuda_upwind_flux_api, #ifdef P4_TO_P8 NULL, #endif NULL); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; flux_compute_running+=duration; //cout << "Time taken by cuda_iterate: " // << duration << " seconds" << endl; //download cuda quadrants user data start //downloadQuadrantsFromCuda(quads_to_cuda, quadrants, cuda4est->quad_user_data_api); // download cuda quadrants user data end /* start = clock(); p4est_iterate (p4est, ghost, (void *) ghost_data, step3_quad_divergence, step3_upwind_flux, #ifdef P4_TO_P8 NULL, #endif NULL); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; cout << "Time taken by p4est_iterate: " << duration << " seconds" << endl; */ /* *INDENT-ON* */ // quadrants memory reallocation start //freeMemoryForQuadrants(quads_to_cuda, cuda4est->quad_user_data_api); //tree = p4est_tree_array_index (trees, p4est->first_local_tree); //quadrants = &(tree->quadrants); //quads_to_cuda = mallocForQuadrants(cuda4est, quadrants, cuda4est->quad_user_data_api); //cuda4est->quads_to_cuda = quads_to_cuda; //mallocFacesSides(cuda4est, quadrants, quads_to_cuda, ghost, malloc_ghost); // quadrants memory reallocation end start = clock(); cuda_iterate (cuda4est, NULL, (void *) &dt, step3_user_data_api_timestep_update, step3_timestep_update, step3_cuda_timestep_update_api, NULL, NULL, #ifdef P4_TO_P8 NULL, #endif NULL ); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; timestep_update_running+=duration; //cout << "Time taken by cuda_timestep_update: " // << duration << " seconds" << endl; // download cuda quadrants user data start //downloadQuadrantsFromCuda(quads_to_cuda, quadrants, cuda4est->quad_user_data_api); // download cuda quadrants user data end /* update u */ /* start = clock(); p4est_iterate (p4est, NULL, (void *) &dt, step3_timestep_update, NULL, #ifdef P4_TO_P8 NULL, #endif NULL); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; cout << "Time taken by p4est_timestep_update: " << duration << " seconds" << endl; */ // quadrants memory reallocation start //freeMemoryForQuadrants(quads_to_cuda, cuda4est->quad_user_data_api); //tree = p4est_tree_array_index (trees, p4est->first_local_tree); //quadrants = &(tree->quadrants); //quads_to_cuda = mallocForQuadrants(cuda4est, quadrants, cuda4est->quad_user_data_api); //cuda4est->quads_to_cuda = quads_to_cuda; //mallocFacesSides(cuda4est, quadrants, quads_to_cuda, ghost, malloc_ghost); // quadrants memory reallocation end /* synchronize the ghost data */ p4est_ghost_exchange_data (p4est, ghost, ghost_data); start=clock(); //freeMemoryForGhost(malloc_ghost); //malloc_ghost = mallocForGhost(p4est, ghost); freeGhostDataFromCuda(malloc_ghost); exchangeGhostDataToCuda(malloc_ghost, ghost); //cuda4est->ghost_to_cuda = malloc_ghost; //freeMemoryForFacesSides(quads_to_cuda); //mallocFacesSides(cuda4est, quadrants, quads_to_cuda, ghost, malloc_ghost); step3_user_data_api_ghost_data->user_data = ghost_data; step3_user_data_api_ghost_data->user_data_elem_count = ghost->ghosts.elem_count; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; ghost_allocation+=duration; start = clock(); cuda_iterate (cuda4est, ghost, (void *) ghost_data, step3_user_data_api_ghost_data, step3_reset_derivatives, step3_cuda_reset_derivatives_api, step3_minmod_estimate, step3_cuda_minmod_estimate_api, #ifdef P4_TO_P8 NULL, #endif NULL); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; reset_derivatives_running+=duration; //cout << "Time taken by cuda_reset_derivatives: " // << duration << " seconds" << endl; start = clock(); //downloadQuadrantsFromCuda(quads_to_cuda, quadrants, cuda4est->quad_user_data_api); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; downloading_quads+=duration; //cout << "Time taken by cuda download_quads: " // << duration << " seconds" << endl; /* update du/dx estimate */ /* start = clock(); p4est_iterate (p4est, ghost, (void *) ghost_data, step3_reset_derivatives, step3_minmod_estimate, #ifdef P4_TO_P8 NULL, #endif NULL); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; cout << "Time taken by p4est_reset_derivatives: " << duration << " seconds" << endl; */ // quadrants memory reallocation start start = clock(); //freeMemoryForQuadrants(quads_to_cuda, cuda4est->quad_user_data_api); //tree = p4est_tree_array_index (trees, p4est->first_local_tree); //quadrants = &(tree->quadrants); //quads_to_cuda = mallocForQuadrants(cuda4est, quadrants, cuda4est->quad_user_data_api); //cuda4est->quads_to_cuda = quads_to_cuda; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; quadrants_reallocation+=duration; start = clock(); //mallocFacesSides(cuda4est, quadrants, quads_to_cuda, ghost, malloc_ghost); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; faces_reallocation+=duration; // quadrants memory reallocation end quadrants_is_fresh = false; } P4EST_FREE (ghost_data); p4est_ghost_destroy (ghost); free(step3_cuda_compute_max_api); free(step3_cuda_timestep_update_api); start=clock(); p4est_memory_free(p4est_memory_allocate_info, cuda4est->quad_user_data_api); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; p4est_reallocation+=duration; start = clock(); freeMemoryForQuadrants(quads_to_cuda, cuda4est->quad_user_data_api); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; quadrants_reallocation+=duration; double summary_time = ghost_allocation + p4est_reallocation + quadrants_reallocation + faces_reallocation + reset_derivatives_running + compute_max_running + flux_compute_running + timestep_update_running + downloading_quads; printf("summary_time: %f\n", summary_time); printf("ghost_allocation: %f, in procent: %f\n", ghost_allocation, ghost_allocation/summary_time); printf("p4est_reallocation: %f, in procent: %f\n", p4est_reallocation, p4est_reallocation/summary_time); printf("quadrants_reallocation: %f, in procent: %f\n", quadrants_reallocation, quadrants_reallocation/summary_time); printf("faces_reallocation: %f, in procent: %f\n", faces_reallocation, faces_reallocation/summary_time); printf("reset_derivatives_running: %f, in procent: %f\n", reset_derivatives_running, reset_derivatives_running/summary_time); printf("compute_max_running: %f, in procent: %f\n", compute_max_running, compute_max_running/summary_time); printf("flux_compute_running: %f, in procent: %f\n", flux_compute_running, flux_compute_running/summary_time); printf("timestep_update_running: %f, in procent: %f\n", timestep_update_running, timestep_update_running/summary_time); printf("downloading_quads: %f, in procent: %f\n", downloading_quads, downloading_quads/summary_time); } /** The main step 3 program. * * Setup of the example parameters; create the forest, with the state variable * stored in the quadrant data; refine, balance, and partition the forest; * timestep; clean up, and exit. */ int main (int argc, char **argv) { auto start = std::chrono::high_resolution_clock::now(); int mpiret; int recursive, partforcoarsen; sc_MPI_Comm mpicomm; p4est_t *p4est; p4est_connectivity_t *conn; step3_ctx_t ctx; /* Initialize MPI; see sc_mpi.h. * If configure --enable-mpi is given these are true MPI calls. * Else these are dummy functions that simulate a single-processor run. */ mpiret = sc_MPI_Init (&argc, &argv); SC_CHECK_MPI (mpiret); mpicomm = sc_MPI_COMM_WORLD; /* These functions are optional. If called they store the MPI rank as a * static variable so subsequent global p4est log messages are only issued * from processor zero. Here we turn off most of the logging; see sc.h. */ sc_init (mpicomm, 1, 1, NULL, SC_LP_ESSENTIAL); p4est_init (NULL, SC_LP_PRODUCTION); P4EST_GLOBAL_PRODUCTIONF ("This is the p4est %dD demo example/steps/%s_step3\n", P4EST_DIM, P4EST_STRING); ctx.bump_width = 0.1; ctx.max_err = 2.e-2; ctx.center[0] = 0.5; ctx.center[1] = 0.5; #ifdef P4_TO_P8 ctx.center[2] = 0.5; #endif #ifndef P4_TO_P8 /* randomly chosen advection direction */ ctx.v[0] = -0.445868402501118; ctx.v[1] = -0.895098523991131; #else ctx.v[0] = 0.485191768970225; ctx.v[1] = -0.427996381877778; ctx.v[2] = 0.762501176669961; #endif ctx.refine_period = 2; ctx.repartition_period = 4; ctx.write_period = 8; /* Create a forest that consists of just one periodic quadtree/octree. */ #ifndef P4_TO_P8 conn = p4est_connectivity_new_periodic (); #else conn = p8est_connectivity_new_periodic (); #endif /* *INDENT-OFF* */ p4est = p4est_new_ext (mpicomm, /* communicator */ conn, /* connectivity */ 0, /* minimum quadrants per MPI process */ 4, /* minimum level of refinement */ 1, /* fill uniform */ sizeof (step3_data_t), /* data size */ step3_init_initial_condition, /* initializes data */ (void *) (&ctx)); /* context */ cuda4est_t *cuda4est = (cuda4est_t*) malloc(sizeof(cuda4est_t)); cuda4est->p4est = p4est; user_data_for_cuda_t *user_data_api = (user_data_for_cuda_t*) malloc(sizeof(user_data_for_cuda_t)); user_data_api->user_data = &ctx; user_data_api->alloc_cuda_memory = alloc_cuda_memory_step3_ctx; user_data_api->free_cuda_memory = free_cuda_memory_step3_ctx; user_data_api->get_cuda_allocated_user_data = get_cuda_allocated_user_data_step3_ctx; cuda4est->user_data_api = user_data_api; quad_user_data_api_t *quad_user_data_api = (quad_user_data_api_t*) malloc(sizeof(quad_user_data_api_t)); quad_user_data_api->alloc_cuda_memory = alloc_cuda_memory_step3_quad_user_data; quad_user_data_api->alloc_cuda_memory_for_all_quads = alloc_all_quads_cuda_memory_step3; quad_user_data_api->free_cuda_memory = free_cuda_memory_step3_quad_user_data; quad_user_data_api->free_cuda_memory_for_all_quads = free_all_quads_cuda_memory_step3; quad_user_data_api->get_cuda_allocated_user_data = get_cuda_allocated_user_data_step3_quad_user_data; quad_user_data_api->update_quad_cuda_user_data = update_quad_cuda_step3_user_data; quad_user_data_api->update_all_quads_cuda_user_data = update_all_quads_cuda_user_data_step3; quad_user_data_api->download_quad_cuda_user_data_to_host = download_quad_cuda_user_data_step3_to_host; quad_user_data_api->download_all_quads_cuda_user_data_to_host = download_all_quads_cuda_user_data_to_host_t_step3; cuda4est->quad_user_data_api = quad_user_data_api; /* *INDENT-ON* */ /* refine and coarsen based on an interpolation error estimate */ recursive = 1; p4est_refine (p4est, recursive, step3_refine_err_estimate, step3_init_initial_condition); p4est_coarsen (p4est, recursive, step3_coarsen_initial_condition, step3_init_initial_condition); /* Partition: The quadrants are redistributed for equal element count. The * partition can optionally be modified such that a family of octants, which * are possibly ready for coarsening, are never split between processors. */ partforcoarsen = 1; /* If we call the 2:1 balance we ensure that neighbors do not differ in size * by more than a factor of 2. This can optionally include diagonal * neighbors across edges or corners as well; see p4est.h. */ p4est_balance (p4est, P4EST_CONNECT_FACE, step3_init_initial_condition); p4est_partition (p4est, partforcoarsen, NULL); /* time step */ //step3_timestep (cuda4est, 0.1); step3_timestep (cuda4est, 1); /* Destroy the p4est and the connectivity structure. */ p4est_destroy (p4est); p4est_connectivity_destroy (conn); /* Verify that allocations internal to p4est and sc do not leak memory. * This should be called if sc_init () has been called earlier. */ sc_finalize (); /* This is standard MPI programs. Without --enable-mpi, this is a dummy. */ mpiret = sc_MPI_Finalize (); SC_CHECK_MPI (mpiret); auto stop = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start); // To get the value of duration use the count() // member function on the duration object std::cout << "time duration: "<< duration.count() << std::endl; return 0; }
75b033ee4494eb52bfa872395785e57a0959e3f6.cu
/* This file is part of p4est. p4est is a C library to manage a collection (a forest) of multiple connected adaptive quadtrees or octrees in parallel. Copyright (C) 2010 The University of Texas System Additional copyright (C) 2011 individual authors Written by Carsten Burstedde, Lucas C. Wilcox, and Tobin Isaac p4est is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. p4est is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with p4est; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /** \file p4est_step3.c * * This 2D example program uses p4est to solve a simple advection problem. It * is numerically very simple, and intended to demonstrate several methods of * interacting with the p4est data after it has been refined and partitioned. * It demonstrates the construction of ghost layers (see p4est_ghost_t in * p4est_ghost.h) and communication of ghost-layer data, and it demonstrates * interacting with the quadrants and quadrant boundaries through the * p4est_iterate() routine (see p4est_iterate.h). */ /* p4est has two separate interfaces for 2D and 3D, p4est*.h and p8est*.h. * Most API functions are available for both dimensions. The header file * p4est_to_p8est.h #define's the 2D names to the 3D names such that most code * only needs to be written once. In this example, we rely on this. */ #include "main.h" #include <chrono> #include "time.h" using namespace std; using namespace std::chrono; /** We had 1. / 0. here to create a NaN but that is not portable. */ static const double step3_invalid = -1.; void alloc_cuda_memory_step3_quad_user_data(quad_user_data_allocate_info_t* quad_user_data_allocate_info) { step3_quad_user_data_to_cuda_t *user_data_to_cuda = (step3_quad_user_data_to_cuda_t*) malloc(sizeof(step3_quad_user_data_to_cuda_t)); step3_data_t *user_data = (step3_data_t*) quad_user_data_allocate_info->user_data; // to do what is 0x1 if(user_data != 0 && user_data != (void*)0x1) { step3_data_t *d_step3_user_data; gpuErrchk(cudaMalloc((void**)&d_step3_user_data, sizeof(step3_data_t))); gpuErrchk(cudaMemcpy(d_step3_user_data, user_data, sizeof(step3_data_t), cudaMemcpyHostToDevice)); user_data_to_cuda->d_step3_user_data = d_step3_user_data; double *d_du; size_t d_du_size = P4EST_DIM; arrayPropMemoryAllocate((void**)&d_du, d_du_size, &(d_step3_user_data->du), sizeof(double*), user_data->du); user_data_to_cuda->d_du = d_du; } else { user_data_to_cuda->d_step3_user_data = user_data; user_data_to_cuda->d_du = NULL; } quad_user_data_allocate_info->cuda_memory_allocating_info = user_data_to_cuda; } void alloc_all_quads_cuda_memory_step3(all_quads_user_data_allocate_info_t* all_quads_user_data_allocate_info, sc_array_t* quadrants) { size_t d_quadrants_array_size = quadrants->elem_count; all_quads_user_data_allocate_info->quads_count = d_quadrants_array_size; size_t d_all_quads_user_data_bytes_count = d_quadrants_array_size * sizeof(step3_data_t); step3_data_t *d_all_quads_user_data; void **all_quads_host_user_data = (void**) malloc(d_quadrants_array_size * sizeof(void*)); step3_data_t *quads_user_data_temp = (step3_data_t*) malloc(d_all_quads_user_data_bytes_count); for(size_t i = 0; i < d_quadrants_array_size; i++) { p4est_quadrant_t *temp_quad = p4est_quadrant_array_index (quadrants, i); memcpy(quads_user_data_temp + i, temp_quad->p.user_data, sizeof(step3_data_t)); all_quads_host_user_data[i] = temp_quad->p.user_data; } gpuErrchk(cudaMalloc((void**)&d_all_quads_user_data, d_all_quads_user_data_bytes_count)); gpuErrchk(cudaMemcpy(d_all_quads_user_data, quads_user_data_temp, d_all_quads_user_data_bytes_count, cudaMemcpyHostToDevice)); all_quads_user_data_allocate_info->d_all_quads_user_data = (void*)d_all_quads_user_data; all_quads_user_data_allocate_info->all_quads_user_data = all_quads_host_user_data; } void update_quad_cuda_step3_user_data(quad_user_data_allocate_info_t* old_user_data_allocate_info, quad_user_data_allocate_info_t* new_user_data_allocate_info) { step3_quad_user_data_to_cuda_t *user_data_to_cuda = (step3_quad_user_data_to_cuda_t*) malloc(sizeof(step3_quad_user_data_to_cuda_t)); step3_quad_user_data_to_cuda_t *old_user_data_to_cuda = (step3_quad_user_data_to_cuda_t*) old_user_data_allocate_info->cuda_memory_allocating_info; step3_data_t *old_user_data = (step3_data_t*) old_user_data_allocate_info->user_data; step3_data_t *new_user_data = (step3_data_t*) new_user_data_allocate_info->user_data; // to do what is 0x1 if(old_user_data != 0 && old_user_data != (void*)0x1 && new_user_data != 0 && new_user_data != (void*)0x1) { step3_data_t *d_step3_user_data = old_user_data_to_cuda->d_step3_user_data; gpuErrchk(cudaMemcpy(d_step3_user_data, new_user_data, sizeof(step3_data_t), cudaMemcpyHostToDevice)); user_data_to_cuda->d_step3_user_data = d_step3_user_data; double *d_du = old_user_data_to_cuda->d_du; size_t d_du_size = P4EST_DIM; arrayPropMemoryUpdate((void**)&d_du, d_du_size * sizeof(double), new_user_data->du); user_data_to_cuda->d_du = d_du; } else { user_data_to_cuda->d_step3_user_data = new_user_data; user_data_to_cuda->d_du = NULL; } new_user_data_allocate_info->cuda_memory_allocating_info = user_data_to_cuda; } void update_all_quads_cuda_user_data_step3(all_quads_user_data_allocate_info* old_user_data_allocate_info, all_quads_user_data_allocate_info* new_user_data_allocate_info) { size_t d_quadrants_array_size = old_user_data_allocate_info->quads_count; size_t d_all_quads_user_data_bytes_count = d_quadrants_array_size * sizeof(step3_data_t); void *d_all_quads_user_data = old_user_data_allocate_info->d_all_quads_user_data; step3_data_t **all_quads_host_user_data = (step3_data_t**)new_user_data_allocate_info->all_quads_user_data; step3_data_t *quads_user_data_temp = (step3_data_t*) malloc(d_all_quads_user_data_bytes_count); step3_data_t *quad_user_data_cursor = quads_user_data_temp; for(size_t i = 0; i < d_quadrants_array_size; i++, quad_user_data_cursor++) { step3_data_t *new_user_data = (step3_data_t*)(all_quads_host_user_data[i]); if(new_user_data != NULL) { memcpy(quad_user_data_cursor, new_user_data, sizeof(step3_data_t)); } } gpuErrchk(cudaMemcpy(d_all_quads_user_data, quads_user_data_temp, d_all_quads_user_data_bytes_count, cudaMemcpyHostToDevice)); new_user_data_allocate_info->d_all_quads_user_data = d_all_quads_user_data; new_user_data_allocate_info->quads_count = d_quadrants_array_size; } void free_cuda_memory_step3_quad_user_data(quad_user_data_allocate_info_t* quad_user_data_allocate_info) { step3_quad_user_data_to_cuda_t *user_data_to_cuda = (step3_quad_user_data_to_cuda_t*) quad_user_data_allocate_info->cuda_memory_allocating_info; if(user_data_to_cuda->d_du != NULL) { gpuErrchk(cudaFree(user_data_to_cuda->d_du)); } if(user_data_to_cuda->d_step3_user_data != NULL && user_data_to_cuda->d_step3_user_data != (void*)0x1) { gpuErrchk(cudaFree(user_data_to_cuda->d_step3_user_data)); } } void free_all_quads_cuda_memory_step3(all_quads_user_data_allocate_info_t* all_quads_user_data_allocate_info) { gpuErrchk(cudaFree(all_quads_user_data_allocate_info->d_all_quads_user_data)); free(all_quads_user_data_allocate_info->all_quads_user_data); } void* get_cuda_allocated_user_data_step3_quad_user_data(quad_user_data_allocate_info_t* quad_user_data_allocate_info) { step3_quad_user_data_to_cuda_t *user_data_to_cuda = (step3_quad_user_data_to_cuda_t*) quad_user_data_allocate_info->cuda_memory_allocating_info; return user_data_to_cuda != NULL ? (void*) user_data_to_cuda->d_step3_user_data : NULL; } void download_quad_cuda_user_data_step3_to_host (quad_user_data_allocate_info_t* user_data_allocate_info) { step3_data_t *user_data = (step3_data_t*) user_data_allocate_info->user_data; step3_quad_user_data_to_cuda_t *user_data_to_cuda = (step3_quad_user_data_to_cuda_t*) user_data_allocate_info->cuda_memory_allocating_info; gpuErrchk(cudaMemcpy(user_data, user_data_to_cuda->d_step3_user_data, sizeof(step3_data_t), cudaMemcpyDeviceToHost)); } void download_all_quads_cuda_user_data_to_host_t_step3(all_quads_user_data_allocate_info_t* all_quads_user_data_allocate_info, sc_array_t* quadrants) { size_t quads_count = all_quads_user_data_allocate_info->quads_count; size_t user_data_size = sizeof(step3_data_t); size_t user_data_bytes_alloc = quads_count * user_data_size; step3_data_t *copied_user_data = (step3_data_t*)malloc(user_data_bytes_alloc); gpuErrchk(cudaMemcpy(copied_user_data, all_quads_user_data_allocate_info->d_all_quads_user_data, user_data_bytes_alloc, cudaMemcpyDeviceToHost)); step3_data_t *copied_user_data_cursor = copied_user_data; for(size_t i = 0; i < quads_count; i++, copied_user_data_cursor++) { p4est_quadrant_t *quad = p4est_quadrant_array_index(quadrants, i); memcpy(quad->p.user_data, copied_user_data_cursor, user_data_size); } free(copied_user_data); } void alloc_cuda_memory_step3_ctx(user_data_for_cuda_t* user_data_api) { step3_ctx_to_cuda_t *ctx_to_cuda = (step3_ctx_to_cuda_t*) malloc(sizeof(step3_ctx_to_cuda_t)); step3_ctx_t *ctx = (step3_ctx*) user_data_api->user_data; step3_ctx_t *d_step3_ctx; gpuErrchk(cudaMalloc((void**)&d_step3_ctx, sizeof(step3_ctx_t))); gpuErrchk(cudaMemcpy(d_step3_ctx, ctx, sizeof(step3_ctx_t), cudaMemcpyHostToDevice)); ctx_to_cuda->d_step3_ctx = d_step3_ctx; user_data_api->cuda_memory_allocating_info = (void*) ctx_to_cuda; } void free_cuda_memory_step3_ctx(user_data_for_cuda_t* user_data_api) { step3_ctx_to_cuda *ctx_to_cuda = (step3_ctx_to_cuda*) user_data_api->cuda_memory_allocating_info; gpuErrchk(cudaFree(ctx_to_cuda->d_step3_ctx)); } void* get_cuda_allocated_user_data_step3_ctx(user_data_for_cuda_t* user_data_api) { step3_ctx_to_cuda *ctx_to_cuda = (step3_ctx_to_cuda*) user_data_api->cuda_memory_allocating_info; return (void*) ctx_to_cuda->d_step3_ctx; } /** Compute the value and derivatives of the initial condition. * * \param [in] x the coordinates * \param [out] du the derivative at \a x * \param [in] ctx the example parameters * * \return the initial condition at \a x */ static double step3_initial_condition (double x[], double du[], step3_ctx_t * ctx) { int i; double *c = ctx->center; double bump_width = ctx->bump_width; double r2, d[P4EST_DIM]; double arg, retval; r2 = 0.; for (i = 0; i < P4EST_DIM; i++) { d[i] = x[i] - c[i]; r2 += d[i] * d[i]; } arg = -(1. / 2.) * r2 / bump_width / bump_width; retval = exp (arg); if (du) { for (i = 0; i < P4EST_DIM; i++) { du[i] = -(1. / bump_width / bump_width) * d[i] * retval; } } return retval; } /** Get the coordinates of the midpoint of a quadrant. * * \param [in] p4est the forest * \param [in] which_tree the tree in the forest containing \a q * \param [in] q the quadrant * \param [out] xyz the coordinates of the midpoint of \a q */ static void step3_get_midpoint (p4est_t * p4est, p4est_topidx_t which_tree, p4est_quadrant_t * q, double xyz[3]) { p4est_qcoord_t half_length = P4EST_QUADRANT_LEN (q->level) / 2; p4est_qcoord_to_vertex (p4est->connectivity, which_tree, q->x + half_length, q->y + half_length, #ifdef P4_TO_P8 q->z + half_length, #endif xyz); } /** Initialize the initial condition data of a quadrant. * * This function matches the p4est_init_t prototype that is used by * p4est_new(), p4est_refine(), p4est_coarsen(), and p4est_balance(). * * \param [in] p4est the forest * \param [in] which_tree the tree in the forest containing \a q * \param [in,out] q the quadrant whose data gets initialized */ static void step3_init_initial_condition (p4est_t * p4est, p4est_topidx_t which_tree, p4est_quadrant_t * q) { /* the data associated with a forest is accessible by user_pointer */ step3_ctx_t *ctx = (step3_ctx_t *) p4est->user_pointer; /* the data associated with a quadrant is accessible by p.user_data */ step3_data_t *data = (step3_data_t *) q->p.user_data; double midpoint[3]; step3_get_midpoint (p4est, which_tree, q, midpoint); /* initialize the data */ data->u = step3_initial_condition (midpoint, data->du, ctx); } /** Estimate the square of the approximation error on a quadrant. * * We compute our estimate by integrating the difference of a constant * approximation at the midpoint and a linear approximation that interpolates * at the midpoint. * * \param [in] q a quadrant * * \return the square of the error estimate for the state variables contained * in \a q's data. */ static double step3_error_sqr_estimate (p4est_quadrant_t * q) { step3_data_t *data = (step3_data_t *) q->p.user_data; int i; double diff2; double *du = data->du; double h = (double) P4EST_QUADRANT_LEN (q->level) / (double) P4EST_ROOT_LEN; double vol; #ifdef P4_TO_P8 vol = h * h * h; #else vol = h * h; #endif diff2 = 0.; /* use the approximate derivative to estimate the L2 error */ for (i = 0; i < P4EST_DIM; i++) { diff2 += du[i] * du[i] * (1. / 12.) * h * h * vol; } return diff2; } /** Refine by the L2 error estimate. * * Given the maximum global error, we enforce that each quadrant's portion of * the error must not exceed is fraction of the total volume of the domain * (which is 1). * * This function matches the p4est_refine_t prototype that is used by * p4est_refine() and p4est_refine_ext(). * * \param [in] p4est the forest * \param [in] which_tree the tree in the forest containing \a q * \param [in] q the quadrant * * \return 1 if \a q should be refined, 0 otherwise. */ static int step3_refine_err_estimate (p4est_t * p4est, p4est_topidx_t which_tree, p4est_quadrant_t * q) { step3_ctx_t *ctx = (step3_ctx_t *) p4est->user_pointer; double global_err = ctx->max_err; double global_err2 = global_err * global_err; double h = (double) P4EST_QUADRANT_LEN (q->level) / (double) P4EST_ROOT_LEN; double vol, err2; /* the quadrant's volume is also its volume fraction */ #ifdef P4_TO_P8 vol = h * h * h; #else vol = h * h; #endif err2 = step3_error_sqr_estimate (q); if (err2 > (global_err2 * vol * 0.001)) { return 1; } else { return 0; } } /** Coarsen by the L2 error estimate of the initial condition. * * Given the maximum global error, we enforce that each quadrant's portion of * the error must not exceed is fraction of the total volume of the domain * (which is 1). * * \param [in] p4est the forest * \param [in] which_tree the tree in the forest containing \a children * \param [in] children a family of quadrants * * \return 1 if \a children should be coarsened, 0 otherwise. */ static int step3_coarsen_initial_condition (p4est_t * p4est, p4est_topidx_t which_tree, p4est_quadrant_t * children[]) { p4est_quadrant_t parent; step3_ctx_t *ctx = (step3_ctx_t *) p4est->user_pointer; double global_err = ctx->max_err; double global_err2 = global_err * global_err; double h; step3_data_t parentdata; double parentmidpoint[3]; double vol, err2; /* get the parent of the first child (the parent of all children) */ p4est_quadrant_parent (children[0], &parent); step3_get_midpoint (p4est, which_tree, &parent, parentmidpoint); parentdata.u = step3_initial_condition (parentmidpoint, parentdata.du, ctx); h = (double) P4EST_QUADRANT_LEN (parent.level) / (double) P4EST_ROOT_LEN; /* the quadrant's volume is also its volume fraction */ #ifdef P4_TO_P8 vol = h * h * h; #else vol = h * h; #endif parent.p.user_data = (void *) (&parentdata); err2 = step3_error_sqr_estimate (&parent); if (err2 < global_err2 * vol) { return 1; } else { return 0; } } /** Coarsen by the L2 error estimate of the current state approximation. * * Given the maximum global error, we enforce that each quadrant's portion of * the error must not exceed its fraction of the total volume of the domain * (which is 1). * * This function matches the p4est_coarsen_t prototype that is used by * p4est_coarsen() and p4est_coarsen_ext(). * * \param [in] p4est the forest * \param [in] which_tree the tree in the forest containing \a children * \param [in] children a family of quadrants * * \return 1 if \a children should be coarsened, 0 otherwise. */ static int step3_coarsen_err_estimate (p4est_t * p4est, p4est_topidx_t which_tree, p4est_quadrant_t * children[]) { step3_ctx_t *ctx = (step3_ctx_t *) p4est->user_pointer; double global_err = ctx->max_err; double global_err2 = global_err * global_err; double h; step3_data_t *data; double vol, err2, childerr2; double parentu; double diff; int i; h = (double) P4EST_QUADRANT_LEN (children[0]->level) / (double) P4EST_ROOT_LEN; /* the quadrant's volume is also its volume fraction */ #ifdef P4_TO_P8 vol = h * h * h; #else vol = h * h; #endif /* compute the average */ parentu = 0.; for (i = 0; i < P4EST_CHILDREN; i++) { data = (step3_data_t *) children[i]->p.user_data; parentu += data->u / P4EST_CHILDREN; } err2 = 0.; for (i = 0; i < P4EST_CHILDREN; i++) { childerr2 = step3_error_sqr_estimate (children[i]); if (childerr2 > global_err2 * vol) { return 0; } err2 += step3_error_sqr_estimate (children[i]); diff = (parentu - data->u) * (parentu - data->u); err2 += diff * vol; } if (err2 < global_err2 * (vol * P4EST_CHILDREN)) { return 1; } else { return 0; } } /** Initialize the state variables of incoming quadrants from outgoing * quadrants. * * The functions p4est_refine_ext(), p4est_coarsen_ext(), and * p4est_balance_ext() take as an argument a p4est_replace_t callback function, * which allows one to setup the quadrant data of incoming quadrants from the * data of outgoing quadrants, before the outgoing data is destroyed. This * function matches the p4est_replace_t prototype. * * In this example, we linearly interpolate the state variable of a quadrant * that is refined to its children, and we average the midpoints of children * that are being coarsened to the parent. * * \param [in] p4est the forest * \param [in] which_tree the tree in the forest containing \a children * \param [in] num_outgoing the number of quadrants that are being replaced: * either 1 if a quadrant is being refined, or * P4EST_CHILDREN if a family of children are being * coarsened. * \param [in] outgoing the outgoing quadrants * \param [in] num_incoming the number of quadrants that are being added: * either P4EST_CHILDREN if a quadrant is being refined, or * 1 if a family of children are being * coarsened. * \param [in,out] incoming quadrants whose data are initialized. */ static void step3_replace_quads (p4est_t * p4est, p4est_topidx_t which_tree, int num_outgoing, p4est_quadrant_t * outgoing[], int num_incoming, p4est_quadrant_t * incoming[]) { step3_data_t *parent_data, *child_data; int i, j; double h; double du_old, du_est; if (num_outgoing > 1) { /* this is coarsening */ parent_data = (step3_data_t *) incoming[0]->p.user_data; parent_data->u = 0.; for (j = 0; j < P4EST_DIM; j++) { parent_data->du[j] = step3_invalid; } for (i = 0; i < P4EST_CHILDREN; i++) { child_data = (step3_data_t *) outgoing[i]->p.user_data; parent_data->u += child_data->u / P4EST_CHILDREN; for (j = 0; j < P4EST_DIM; j++) { du_old = parent_data->du[j]; du_est = child_data->du[j]; if (du_old == du_old) { if (du_est * du_old >= 0.) { if (fabs (du_est) < fabs (du_old)) { parent_data->du[j] = du_est; } } else { parent_data->du[j] = 0.; } } else { parent_data->du[j] = du_est; } } } } else { /* this is refinement */ parent_data = (step3_data_t *) outgoing[0]->p.user_data; h = (double) P4EST_QUADRANT_LEN (outgoing[0]->level) / (double) P4EST_ROOT_LEN; for (i = 0; i < P4EST_CHILDREN; i++) { child_data = (step3_data_t *) incoming[i]->p.user_data; child_data->u = parent_data->u; for (j = 0; j < P4EST_DIM; j++) { child_data->du[j] = parent_data->du[j]; child_data->u += (h / 4.) * parent_data->du[j] * ((i & (1 << j)) ? 1. : -1); } } } } /** Callback function for interpolating the solution from quadrant midpoints to * corners. * * The function p4est_iterate() takes as an argument a p4est_iter_volume_t * callback function, which it executes at every local quadrant (see * p4est_iterate.h). This function matches the p4est_iter_volume_t prototype. * * In this example, we use the callback function to interpolate the state * variable to the corners, and write those corners into an array so that they * can be written out. * * \param [in] info the information about this quadrant that has been * populated by p4est_iterate() * \param [in,out] user_data the user_data that was given as an argument to * p4est_iterate: in this case, it points to the * array of corner values that we want to write. * The values for the corner of the quadrant * described by \a info are written during the * execution of the callback. */ static void step3_interpolate_solution (p4est_iter_volume_info_t * info, void *user_data) { sc_array_t *u_interp = (sc_array_t *) user_data; /* we passed the array of values to fill as the user_data in the call to p4est_iterate */ p4est_t *p4est = info->p4est; p4est_quadrant_t *q = info->quad; p4est_topidx_t which_tree = info->treeid; p4est_locidx_t local_id = info->quadid; /* this is the index of q *within its tree's numbering*. We want to convert it its index for all the quadrants on this process, which we do below */ p4est_tree_t *tree; step3_data_t *data = (step3_data_t *) q->p.user_data; double h; p4est_locidx_t arrayoffset; double this_u; double *this_u_ptr; int i, j; tree = p4est_tree_array_index (p4est->trees, which_tree); local_id += tree->quadrants_offset; /* now the id is relative to the MPI process */ arrayoffset = P4EST_CHILDREN * local_id; /* each local quadrant has 2^d (P4EST_CHILDREN) values in u_interp */ h = (double) P4EST_QUADRANT_LEN (q->level) / (double) P4EST_ROOT_LEN; for (i = 0; i < P4EST_CHILDREN; i++) { this_u = data->u; /* loop over the derivative components and linearly interpolate from the * midpoint to the corners */ for (j = 0; j < P4EST_DIM; j++) { /* In order to know whether the direction from the midpoint to the corner is * negative or positive, we take advantage of the fact that the corners * are in z-order. If i is an odd number, it is on the +x side; if it * is even, it is on the -x side. If (i / 2) is an odd number, it is on * the +y side, etc. */ this_u += (h / 2) * data->du[j] * ((i & (1 << j)) ? 1. : -1.); } this_u_ptr = (double *) sc_array_index (u_interp, arrayoffset + i); this_u_ptr[0] = this_u; } } /** Write the state variable to vtk format, one file per process. * * \param [in] p4est the forest, whose quadrant data contains the state * \param [in] timestep the timestep number, used to name the output files */ static void step3_write_solution (cuda4est_t * cuda4est, int timestep) { p4est_t *p4est = cuda4est->p4est; char filename[BUFSIZ] = ""; int retval; sc_array_t *u_interp; p4est_locidx_t numquads; p4est_vtk_context_t *context; snprintf (filename, BUFSIZ, P4EST_STRING "_step3_%04d", timestep); numquads = p4est->local_num_quadrants; /* create a vector with one value for the corner of every local quadrant * (the number of children is always the same as the number of corners) */ u_interp = sc_array_new_size (sizeof (double), numquads * P4EST_CHILDREN); /* Use the iterator to visit every cell and fill in the solution values. * Using the iterator is not absolutely necessary in this case: we could * also loop over every tree (there is only one tree in this case) and loop * over every quadrant within every tree, but we are trying to demonstrate * the usage of p4est_iterate in this example */ p4est_iterate (p4est, NULL, /* we don't need any ghost quadrants for this loop */ (void *) u_interp, /* pass in u_interp so that we can fill it */ step3_interpolate_solution, /* callback function that interpolates from the cell center to the cell corners, defined above */ NULL, /* there is no callback for the faces between quadrants */ #ifdef P4_TO_P8 NULL, /* there is no callback for the edges between quadrants */ #endif NULL); /* there is no callback for the corners between quadrants */ /* create VTK output context and set its parameters */ context = p4est_vtk_context_new (p4est, filename); p4est_vtk_context_set_scale (context, 0.99); /* quadrant at almost full scale */ /* begin writing the output files */ context = p4est_vtk_write_header (context); SC_CHECK_ABORT (context != NULL, P4EST_STRING "_vtk: Error writing vtk header"); /* do not write the tree id's of each quadrant * (there is only one tree in this example) */ context = p4est_vtk_write_cell_dataf (context, 0, 1, /* do write the refinement level of each quadrant */ 1, /* do write the mpi process id of each quadrant */ 0, /* do not wrap the mpi rank (if this were > 0, the modulus of the rank relative to this number would be written instead of the rank) */ 0, /* there is no custom cell scalar data. */ 0, /* there is no custom cell vector data. */ context); /* mark the end of the variable cell data. */ SC_CHECK_ABORT (context != NULL, P4EST_STRING "_vtk: Error writing cell data"); /* write one scalar field: the solution value */ context = p4est_vtk_write_point_dataf (context, 1, 0, /* write no vector fields */ "solution", u_interp, context); /* mark the end of the variable cell data. */ SC_CHECK_ABORT (context != NULL, P4EST_STRING "_vtk: Error writing cell data"); retval = p4est_vtk_write_footer (context); SC_CHECK_ABORT (!retval, P4EST_STRING "_vtk: Error writing footer"); sc_array_destroy (u_interp); } /** Approximate the divergence of (vu) on each quadrant * * We use piecewise constant approximations on each quadrant, so the value is * always 0. * * Like step3_interpolate_solution(), this function matches the * p4est_iter_volume_t prototype used by p4est_iterate(). * * \param [in] info the information about the quadrant populated by * p4est_iterate() * \param [in] user_data not used */ static void step3_quad_divergence (p4est_iter_volume_info_t * info, void *user_data) { p4est_quadrant_t *q = info->quad; step3_data_t *data = (step3_data_t *) q->p.user_data; data->dudt = 0.; } /** Approximate the flux across a boundary between quadrants. * * We use a very simple upwind numerical flux. * * This function matches the p4est_iter_face_t prototype used by * p4est_iterate(). * * \param [in] info the information about the quadrants on either side of the * interface, populated by p4est_iterate() * \param [in] user_data the user_data given to p4est_iterate(): in this case, * it points to the ghost_data array, which contains the * step3_data_t data for all of the ghost cells, which * was populated by p4est_ghost_exchange_data() */ static void step3_upwind_flux (p4est_iter_face_info_t * info, void *user_data) { int i, j; p4est_t *p4est = info->p4est; step3_ctx_t *ctx = (step3_ctx_t *) p4est->user_pointer; step3_data_t *ghost_data = (step3_data_t *) user_data; step3_data_t *udata; p4est_quadrant_t *quad; double vdotn = 0.; double uavg; double q; double h, facearea; int which_face; int upwindside; p4est_iter_face_side_t *side[2]; sc_array_t *sides = &(info->sides); /* because there are no boundaries, every face has two sides */ P4EST_ASSERT (sides->elem_count == 2); side[0] = p4est_iter_fside_array_index_int (sides, 0); side[1] = p4est_iter_fside_array_index_int (sides, 1); /* which of the quadrant's faces the interface touches */ which_face = side[0]->face; switch (which_face) { case 0: /* -x side */ vdotn = -ctx->v[0]; break; case 1: /* +x side */ vdotn = ctx->v[0]; break; case 2: /* -y side */ vdotn = -ctx->v[1]; break; case 3: /* +y side */ vdotn = ctx->v[1]; break; #ifdef P4_TO_P8 case 4: /* -z side */ vdotn = -ctx->v[2]; break; case 5: /* +z side */ vdotn = ctx->v[2]; break; #endif } upwindside = vdotn >= 0. ? 0 : 1; /* Because we have non-conforming boundaries, one side of an interface can * either have one large ("full") quadrant or 2^(d-1) small ("hanging") * quadrants: we have to compute the average differently in each case. The * info populated by p4est_iterate() gives us the context we need to * proceed. */ uavg = 0; if (side[upwindside]->is_hanging) { /* there are 2^(d-1) (P4EST_HALF) subfaces */ for (j = 0; j < P4EST_HALF; j++) { if (side[upwindside]->is.hanging.is_ghost[j]) { /* *INDENT-OFF* */ udata = (step3_data_t *) &ghost_data[side[upwindside]->is.hanging.quadid[j]]; /* *INDENT-ON* */ } else { udata = (step3_data_t *) side[upwindside]->is.hanging.quad[j]->p.user_data; } uavg += udata->u; } uavg /= P4EST_HALF; } else { if (side[upwindside]->is.full.is_ghost) { udata = (step3_data_t *) & ghost_data[side[upwindside]->is.full.quadid]; } else { udata = (step3_data_t *) side[upwindside]->is.full.quad->p.user_data; } uavg = udata->u; } /* flux from side 0 to side 1 */ q = vdotn * uavg; for (i = 0; i < 2; i++) { if (side[i]->is_hanging) { /* there are 2^(d-1) (P4EST_HALF) subfaces */ for (j = 0; j < P4EST_HALF; j++) { quad = side[i]->is.hanging.quad[j]; h = (double) P4EST_QUADRANT_LEN (quad->level) / (double) P4EST_ROOT_LEN; #ifndef P4_TO_P8 facearea = h; #else facearea = h * h; #endif if (!side[i]->is.hanging.is_ghost[j]) { udata = (step3_data_t *) quad->p.user_data; if (i == upwindside) { udata->dudt += vdotn * udata->u * facearea * (i ? 1. : -1.); } else { udata->dudt += q * facearea * (i ? 1. : -1.); } } } } else { quad = side[i]->is.full.quad; h = (double) P4EST_QUADRANT_LEN (quad->level) / (double) P4EST_ROOT_LEN; #ifndef P4_TO_P8 facearea = h; #else facearea = h * h; #endif if (!side[i]->is.full.is_ghost) { udata = (step3_data_t *) quad->p.user_data; udata->dudt += q * facearea * (i ? 1. : -1.); } } } } /** Compute the new value of the state from the computed time derivative. * * We use a simple forward Euler scheme. * * The derivative was computed by a p4est_iterate() loop by the callbacks * step3_quad_divergence() and step3_upwind_flux(). Now we multiply this by * the timestep and add to the current solution. * * This function matches the p4est_iter_volume_t prototype used by * p4est_iterate(). * * \param [in] info the information about this quadrant that has been * populated by p4est_iterate() * \param [in] user_data the user_data given to p4est_iterate(): in this case, * it points to the timestep. */ static void step3_timestep_update (p4est_iter_volume_info_t * info, void *user_data) { p4est_quadrant_t *q = info->quad; step3_data_t *data = (step3_data_t *) q->p.user_data; double dt = *((double *) user_data); double vol; double h = (double) P4EST_QUADRANT_LEN (q->level) / (double) P4EST_ROOT_LEN; #ifdef P4_TO_P8 vol = h * h * h; #else vol = h * h; #endif data->u += dt * data->dudt / vol; } /** Reset the approximate derivatives. * * p4est_iterate() has an invariant to the order of callback execution: the * p4est_iter_volume_t callback will be executed on a quadrant before the * p4est_iter_face_t callbacks are executed on its faces. This function * resets the derivative stored in the quadrant's data before * step3_minmod_estimate() updates the derivative based on the face neighbors. * * This function matches the p4est_iter_volume_t prototype used by * p4est_iterate(). * * \param [in] info the information about this quadrant that has been * populated by p4est_iterate() * \param [in] user_data not used */ static void step3_reset_derivatives (p4est_iter_volume_info_t * info, void *user_data) { p4est_quadrant_t *q = info->quad; step3_data_t *data = (step3_data_t *) q->p.user_data; int j; for (j = 0; j < P4EST_DIM; j++) { data->du[j] = step3_invalid; } } // compute max __device__ void step3_cuda_reset_derivatives ( p4est_t *p4est, p4est_ghost_t *ghost_layer, p4est_quadrant_t *quad, p4est_locidx_t quadid, p4est_topidx_t treeid, void *user_data ) { p4est_quadrant_t *q = quad; step3_data_t *data = (step3_data_t *) q->p.user_data; int j; //printf("step3_invalid: %f\n", step3_invalid); for (j = 0; j < P4EST_DIM; j++) { data->du[j] = step3_invalid; } } __global__ void setup_step3_cuda_reset_derivatives_kernel(cuda_iter_volume_t *callback) { *callback = step3_cuda_reset_derivatives; } /** For two quadrants on either side of a face, estimate the derivative normal * to the face. * * This function matches the p4est_iter_face_t prototype used by * p4est_iterate(). * * \param [in] info the information about this quadrant that has been * populated by p4est_iterate() * \param [in] user_data the user_data given to p4est_iterate(): in this case, * it points to the ghost_data array, which contains the * step3_data_t data for all of the ghost cells, which * was populated by p4est_ghost_exchange_data() */ static void step3_minmod_estimate (p4est_iter_face_info_t * info, void *user_data) { int i, j; p4est_iter_face_side_t *side[2]; sc_array_t *sides = &(info->sides); step3_data_t *ghost_data = (step3_data_t *) user_data; step3_data_t *udata; p4est_quadrant_t *quad; double uavg[2]; double h[2]; double du_est, du_old; int which_dir; /* because there are no boundaries, every face has two sides */ P4EST_ASSERT (sides->elem_count == 2); side[0] = p4est_iter_fside_array_index_int (sides, 0); side[1] = p4est_iter_fside_array_index_int (sides, 1); which_dir = side[0]->face / 2; /* 0 == x, 1 == y, 2 == z */ for (i = 0; i < 2; i++) { uavg[i] = 0; if (side[i]->is_hanging) { /* there are 2^(d-1) (P4EST_HALF) subfaces */ for (j = 0; j < P4EST_HALF; j++) { quad = side[i]->is.hanging.quad[j]; h[i] = (double) P4EST_QUADRANT_LEN (quad->level) / (double) P4EST_ROOT_LEN; if (side[i]->is.hanging.is_ghost[j]) { udata = &ghost_data[side[i]->is.hanging.quadid[j]]; } else { udata = (step3_data_t *) side[i]->is.hanging.quad[j]->p.user_data; } uavg[i] += udata->u; } uavg[i] /= P4EST_HALF; } else { quad = side[i]->is.full.quad; h[i] = (double) P4EST_QUADRANT_LEN (quad->level) / (double) P4EST_ROOT_LEN; if (side[i]->is.full.is_ghost) { udata = &ghost_data[side[i]->is.full.quadid]; } else { udata = (step3_data_t *) side[i]->is.full.quad->p.user_data; } uavg[i] = udata->u; } } du_est = (uavg[1] - uavg[0]) / ((h[0] + h[1]) / 2.); for (i = 0; i < 2; i++) { if (side[i]->is_hanging) { /* there are 2^(d-1) (P4EST_HALF) subfaces */ for (j = 0; j < P4EST_HALF; j++) { quad = side[i]->is.hanging.quad[j]; if (!side[i]->is.hanging.is_ghost[j]) { udata = (step3_data_t *) quad->p.user_data; du_old = udata->du[which_dir]; if (du_old == du_old) { /* there has already been an update */ if (du_est * du_old >= 0.) { if (fabs (du_est) < fabs (du_old)) { udata->du[which_dir] = du_est; } } else { udata->du[which_dir] = 0.; } } else { udata->du[which_dir] = du_est; } } } } else { quad = side[i]->is.full.quad; if (!side[i]->is.full.is_ghost) { udata = (step3_data_t *) quad->p.user_data; du_old = udata->du[which_dir]; if (du_old == du_old) { /* there has already been an update */ if (du_est * du_old >= 0.) { if (fabs (du_est) < fabs (du_old)) { udata->du[which_dir] = du_est; } } else { udata->du[which_dir] = 0.; } } else { udata->du[which_dir] = du_est; } } } } } __device__ void step3_cuda_minmod_estimate ( p4est_t* p4est, p4est_ghost_t* ghost_layer, p4est_iter_face_side_t* side, void *user_data) { int i, j; //p4est_iter_face_side_t *side[2]; //sc_array_t *sides = &(info->sides); step3_data_t *ghost_data = (step3_data_t *) user_data; step3_data_t *udata; p4est_quadrant_t *quad; double uavg[2]; double h[2]; double du_est, du_old; int which_dir; /* because there are no boundaries, every face has two sides */ //P4EST_ASSERT (sides->elem_count == 2); //side[0] = p4est_iter_fside_array_index_int (sides, 0); //side[1] = p4est_iter_fside_array_index_int (sides, 1); which_dir = side[0].face / 2; /* 0 == x, 1 == y, 2 == z */ for (i = 0; i < 2; i++) { uavg[i] = 0; if (side[i].is_hanging) { /* there are 2^(d-1) (P4EST_HALF) subfaces */ for (j = 0; j < P4EST_DEVICE_HALF; j++) { quad = side[i].is.hanging.quad[j]; h[i] = (double) P4EST_DEVICE_QUADRANT_LEN (quad->level) / (double) P4EST_DEVICE_ROOT_LEN; if (side[i].is.hanging.is_ghost[j]) { udata = &ghost_data[side[i].is.hanging.quadid[j]]; } else { udata = (step3_data_t *) side[i].is.hanging.quad[j]->p.user_data; } uavg[i] += udata->u; } uavg[i] /= P4EST_DEVICE_HALF; } else { quad = side[i].is.full.quad; h[i] = (double) P4EST_DEVICE_QUADRANT_LEN (quad->level) / (double) P4EST_DEVICE_ROOT_LEN; if (side[i].is.full.is_ghost) { udata = &ghost_data[side[i].is.full.quadid]; } else { udata = (step3_data_t *) side[i].is.full.quad->p.user_data; } uavg[i] = udata->u; } } du_est = (uavg[1] - uavg[0]) / ((h[0] + h[1]) / 2.); for (i = 0; i < 2; i++) { if (side[i].is_hanging) { /* there are 2^(d-1) (P4EST_DEVICE_HALF) subfaces */ for (j = 0; j < P4EST_DEVICE_HALF; j++) { quad = side[i].is.hanging.quad[j]; if (!side[i].is.hanging.is_ghost[j]) { udata = (step3_data_t *) quad->p.user_data; du_old = udata->du[which_dir]; if (du_old == du_old) { /* there has already been an update */ if (du_est * du_old >= 0.) { if (fabs (du_est) < fabs (du_old)) { udata->du[which_dir] = du_est; } } else { udata->du[which_dir] = 0.; } } else { udata->du[which_dir] = du_est; } } } } else { quad = side[i].is.full.quad; if (!side[i].is.full.is_ghost) { udata = (step3_data_t *) quad->p.user_data; du_old = udata->du[which_dir]; if (du_old == du_old) { /* there has already been an update */ if (du_est * du_old >= 0.) { if (fabs (du_est) < fabs (du_old)) { udata->du[which_dir] = du_est; } } else { udata->du[which_dir] = 0.; } } else { udata->du[which_dir] = du_est; } } } } } __global__ void setup_step3_cuda_minmod_estimate_kernel(cuda_iter_face_t *callback) { *callback = step3_cuda_minmod_estimate; } /** Compute the maximum state value. * * This function updates the maximum value from the value of a single cell. * * This function matches the p4est_iter_volume_t prototype used by * p4est_iterate(). * * \param [in] info the information about this quadrant that has been * populated by p4est_iterate() * \param [in,out] user_data the user_data given to p4est_iterate(): in this case, * it points to the maximum value that will be updated */ static void step3_compute_max (p4est_iter_volume_info_t * info, void *user_data) { p4est_quadrant_t *q = info->quad; step3_data_t *data = (step3_data_t *) q->p.user_data; double umax = *((double *) user_data); umax = SC_MAX (data->u, umax); *((double *) user_data) = umax; } __device__ static double atomicMax(double* address, double val) { unsigned long long int* address_as_i = (unsigned long long int*) address; unsigned long long int old = *address_as_i, assumed; do { assumed = old; old = ::atomicCAS(address_as_i, assumed, __double_as_longlong(::fmax(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } // compute max __device__ void step3_cuda_compute_max ( p4est_t *p4est, p4est_ghost_t *ghost_layer, p4est_quadrant_t *quad, p4est_locidx_t quadid, p4est_topidx_t treeid, void *user_data ) { p4est_quadrant_t *q = quad; step3_data_t *data = (step3_data_t *) q->p.user_data; // i don't know printf(""); atomicMax((double *)user_data, data->u); } __global__ void setup_step3_cuda_compute_max_kernel(cuda_iter_volume_t *callback) { *callback = step3_cuda_compute_max; } void step3_compute_max_alloc_cuda_memory(user_data_for_cuda_t* user_data_api) { step3_compute_max_user_data_to_cuda_t *user_data_to_cuda = (step3_compute_max_user_data_to_cuda_t*) malloc(sizeof(step3_compute_max_user_data_to_cuda_t)); double *user_data = (double*) user_data_api->user_data; double *d_compute_max_user_data; gpuErrchk(cudaMalloc((void**)&d_compute_max_user_data, sizeof(double))); gpuErrchk(cudaMemcpy(d_compute_max_user_data, user_data, sizeof(double), cudaMemcpyHostToDevice)); user_data_to_cuda->d_user_data = d_compute_max_user_data; user_data_api->cuda_memory_allocating_info = user_data_to_cuda; } void step3_compute_max_free_cuda_memory(user_data_for_cuda_t* user_data_api) { step3_compute_max_user_data_to_cuda_t *allocate_info = (step3_compute_max_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; gpuErrchk(cudaFree(allocate_info->d_user_data)); } void* step3_compute_max_get_cuda_allocated_user_data(user_data_for_cuda_t* user_data_api) { step3_compute_max_user_data_to_cuda_t *allocate_info = (step3_compute_max_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; return (void*) allocate_info->d_user_data; } void step3_compute_max_copy_user_data_from_device(user_data_for_cuda_t* user_data_api) { step3_compute_max_user_data_to_cuda_t *allocate_info = (step3_compute_max_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; gpuErrchk(cudaMemcpy(user_data_api->user_data, allocate_info->d_user_data, sizeof(double), cudaMemcpyDeviceToHost)); } // compute max // timestep update __device__ void step3_cuda_timestep_update ( p4est_t *p4est, p4est_ghost_t *ghost_layer, p4est_quadrant_t *quad, p4est_locidx_t quadid, p4est_topidx_t treeid, void *user_data ) { p4est_quadrant_t *q = quad; step3_data_t *data = (step3_data_t *) q->p.user_data; double dt = *((double *) user_data); double vol; double h = (double) P4EST_QUADRANT_LEN (q->level) / (double) P4EST_ROOT_LEN; #ifdef P4_TO_P8 vol = h * h * h; #else vol = h * h; #endif data->u += dt * data->dudt / vol; } __global__ void setup_step3_cuda_timestep_update_kernel(cuda_iter_volume_t *callback) { *callback = step3_cuda_timestep_update; } __device__ void step3_cuda_quad_divergence ( p4est_t *p4est, p4est_ghost_t *ghost_layer, p4est_quadrant_t *quad, p4est_locidx_t quadid, p4est_topidx_t treeid, void *user_data ) { p4est_quadrant_t *q = quad; step3_data_t *data = (step3_data_t *) q->p.user_data; data->dudt = 0.; } __global__ void setup_step3_cuda_quad_divergence_kernel(cuda_iter_volume_t *callback) { *callback = step3_cuda_quad_divergence; } __device__ void step3_cuda_upwind_flux ( p4est_t* p4est, p4est_ghost_t* ghost_layer, p4est_iter_face_side_t* side, void *user_data) { int i, j; step3_ctx_t *ctx = (step3_ctx_t *) p4est->user_pointer; step3_data_t *ghost_data = (step3_data_t *) user_data; step3_data_t *udata; p4est_quadrant_t *quad; double vdotn = 0.; double uavg; double q; double h, facearea; int which_face; int upwindside; /* because there are no boundaries, every face has two sides */ //P4EST_ASSERT (sides->elem_count == 2); /* which of the quadrant's faces the interface touches */ which_face = side[0].face; switch (which_face) { case 0: /* -x side */ vdotn = -ctx->v[0]; break; case 1: /* +x side */ vdotn = ctx->v[0]; break; case 2: /* -y side */ vdotn = -ctx->v[1]; break; case 3: /* +y side */ vdotn = ctx->v[1]; break; #ifdef P4_TO_P8 case 4: /* -z side */ vdotn = -ctx->v[2]; break; case 5: /* +z side */ vdotn = ctx->v[2]; break; #endif } upwindside = vdotn >= 0. ? 0 : 1; /* Because we have non-conforming boundaries, one side of an interface can * either have one large ("full") quadrant or 2^(d-1) small ("hanging") * quadrants: we have to compute the average differently in each case. The * info populated by p4est_iterate() gives us the context we need to * proceed. */ uavg = 0; if (side[upwindside].is_hanging) { /* there are 2^(d-1) (P4EST_HALF) subfaces */ for (j = 0; j < P4EST_DEVICE_HALF; j++) { if (side[upwindside].is.hanging.is_ghost[j]) { /* *INDENT-OFF* */ udata = (step3_data_t *) &ghost_data[side[upwindside].is.hanging.quadid[j]]; /* *INDENT-ON* */ } else { udata = (step3_data_t *) side[upwindside].is.hanging.quad[j]->p.user_data; } uavg += udata->u; } uavg /= P4EST_DEVICE_HALF; } else { if (side[upwindside].is.full.is_ghost) { udata = (step3_data_t *) & ghost_data[side[upwindside].is.full.quadid]; } else { udata = (step3_data_t *) side[upwindside].is.full.quad->p.user_data; } uavg = udata->u; } /* flux from side 0 to side 1 */ q = vdotn * uavg; for (i = 0; i < 2; i++) { if (side[i].is_hanging) { /* there are 2^(d-1) (P4EST_HALF) subfaces */ for (j = 0; j < P4EST_DEVICE_HALF; j++) { quad = side[i].is.hanging.quad[j]; h = (double) P4EST_DEVICE_QUADRANT_LEN (quad->level) / (double) P4EST_DEVICE_ROOT_LEN; #ifndef P4_TO_P8 facearea = h; #else facearea = h * h; #endif if (!side[i].is.hanging.is_ghost[j]) { udata = (step3_data_t *) quad->p.user_data; if (i == upwindside) { udata->dudt += vdotn * udata->u * facearea * (i ? 1. : -1.); } else { udata->dudt += q * facearea * (i ? 1. : -1.); } } } } else { quad = side[i].is.full.quad; h = (double) P4EST_DEVICE_QUADRANT_LEN (quad->level) / (double) P4EST_DEVICE_ROOT_LEN; #ifndef P4_TO_P8 facearea = h; #else facearea = h * h; #endif if (!side[i].is.full.is_ghost) { udata = (step3_data_t *) quad->p.user_data; udata->dudt += q * facearea * (i ? 1. : -1.); } } } } __global__ void setup_step3_cuda_upwind_flux_kernel(cuda_iter_face_t *callback) { *callback = step3_cuda_upwind_flux; } void step3_timestep_update_alloc_cuda_memory(user_data_for_cuda_t* user_data_api) { step3_timestep_update_user_data_to_cuda_t *user_data_to_cuda = (step3_timestep_update_user_data_to_cuda_t*) malloc(sizeof(step3_timestep_update_user_data_to_cuda_t)); double *user_data = (double*) user_data_api->user_data; double *d_timestep_update_user_data; gpuErrchk(cudaMalloc((void**)&d_timestep_update_user_data, sizeof(double))); gpuErrchk(cudaMemcpy(d_timestep_update_user_data, user_data, sizeof(double), cudaMemcpyHostToDevice)); user_data_to_cuda->d_user_data = d_timestep_update_user_data; user_data_api->cuda_memory_allocating_info = user_data_to_cuda; } void step3_timestep_update_free_cuda_memory(user_data_for_cuda_t* user_data_api) { step3_timestep_update_user_data_to_cuda_t *allocate_info = (step3_timestep_update_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; gpuErrchk(cudaFree(allocate_info->d_user_data)); } void* step3_timestep_update_get_cuda_allocated_user_data(user_data_for_cuda_t* user_data_api) { step3_timestep_update_user_data_to_cuda_t *allocate_info = (step3_timestep_update_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; return (void*) allocate_info->d_user_data; } void step3_timestep_update_copy_user_data_from_device(user_data_for_cuda_t* user_data_api) { step3_timestep_update_user_data_to_cuda_t *allocate_info = (step3_timestep_update_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; gpuErrchk(cudaMemcpy(user_data_api->user_data, allocate_info->d_user_data, sizeof(double), cudaMemcpyDeviceToHost)); } void step3_ghost_data_alloc_cuda_memory(user_data_for_cuda_t* user_data_api) { step3_ghost_data_user_data_to_cuda_t *user_data_to_cuda = (step3_ghost_data_user_data_to_cuda_t*) malloc(sizeof(step3_ghost_data_user_data_to_cuda_t)); step3_data_t *user_data = (step3_data_t*) user_data_api->user_data; step3_data_t *d_ghost_data_user_data; size_t alloc_memory_size = user_data_api->user_data_elem_count * sizeof(step3_data_t); gpuErrchk(cudaMalloc((void**)&d_ghost_data_user_data, alloc_memory_size)); gpuErrchk(cudaMemcpy(d_ghost_data_user_data, user_data, alloc_memory_size, cudaMemcpyHostToDevice)); user_data_to_cuda->d_user_data = d_ghost_data_user_data; user_data_api->cuda_memory_allocating_info = user_data_to_cuda; } void step3_ghost_data_free_cuda_memory(user_data_for_cuda_t* user_data_api) { step3_ghost_data_user_data_to_cuda_t *allocate_info = (step3_ghost_data_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; if(allocate_info->d_user_data) { gpuErrchk(cudaFree(allocate_info->d_user_data)); } } void* step3_ghost_data_get_cuda_allocated_user_data(user_data_for_cuda_t* user_data_api) { step3_ghost_data_user_data_to_cuda_t *allocate_info = (step3_ghost_data_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; return (void*) allocate_info->d_user_data; } void step3_ghost_data_copy_user_data_from_device(user_data_for_cuda_t* user_data_api) { if(user_data_api->user_data_elem_count) { step3_ghost_data_user_data_to_cuda_t *allocate_info = (step3_ghost_data_user_data_to_cuda_t*) user_data_api->cuda_memory_allocating_info; gpuErrchk(cudaMemcpy(user_data_api->user_data, allocate_info->d_user_data, sizeof(double), cudaMemcpyDeviceToHost)); } } // timestep update /** Compute the timestep. * * Find the smallest quadrant and scale the timestep based on that length and * the advection velocity. * * \param [in] p4est the forest * \return the timestep. */ static double step3_get_timestep (p4est_t * p4est) { step3_ctx_t *ctx = (step3_ctx_t *) p4est->user_pointer; p4est_topidx_t t, flt, llt; p4est_tree_t *tree; int max_level, global_max_level; int mpiret, i; double min_h, vnorm; double dt; /* compute the timestep by finding the smallest quadrant */ flt = p4est->first_local_tree; llt = p4est->last_local_tree; max_level = 0; for (t = flt; t <= llt; t++) { tree = p4est_tree_array_index (p4est->trees, t); max_level = SC_MAX (max_level, tree->maxlevel); } mpiret = sc_MPI_Allreduce (&max_level, &global_max_level, 1, sc_MPI_INT, sc_MPI_MAX, p4est->mpicomm); SC_CHECK_MPI (mpiret); min_h = (double) P4EST_QUADRANT_LEN (global_max_level) / (double) P4EST_ROOT_LEN; vnorm = 0; for (i = 0; i < P4EST_DIM; i++) { vnorm += ctx->v[i] * ctx->v[i]; } vnorm = sqrt (vnorm); dt = min_h / 2. / vnorm; return dt; } /** Timestep the advection problem. * * Update the state, refine, repartition, and write the solution to file. * * \param [in,out] p4est the forest, whose state is updated * \param [in] time the end time */ static void step3_timestep (cuda4est_t *cuda4est, double time) { double ghost_allocation = 0; double p4est_reallocation = 0; double quadrants_reallocation = 0; double faces_reallocation = 0; double reset_derivatives_running = 0; double compute_max_running = 0; double flux_compute_running = 0; double timestep_update_running = 0; double downloading_quads = 0; bool quadrants_is_fresh = false; clock_t start = clock(); clock_t stop = clock(); double duration = (double)(stop - start) / CLOCKS_PER_SEC; p4est_t * p4est = cuda4est->p4est; double t = 0.; double dt = 0.; int i; step3_data_t *ghost_data; step3_ctx_t *ctx = (step3_ctx_t *) p4est->user_pointer; int refine_period = ctx->refine_period; int repartition_period = ctx->repartition_period; int write_period = ctx->write_period; int recursive = 0; int allowed_level = P4EST_QMAXLEVEL; int allowcoarsening = 1; int callbackorphans = 0; int mpiret; double orig_max_err = ctx->max_err; double umax, global_umax; p4est_ghost_t *ghost; cuda_iter_volume_api_t *step3_cuda_compute_max_api = (cuda_iter_volume_api_t*)malloc(sizeof(cuda_iter_volume_api_t)); step3_cuda_compute_max_api->callback = step3_cuda_compute_max; step3_cuda_compute_max_api->setup_kernel = setup_step3_cuda_compute_max_kernel; user_data_for_cuda_t *step3_user_data_api_compute_max = (user_data_for_cuda_t*) malloc(sizeof(user_data_for_cuda_t)); step3_user_data_api_compute_max->user_data = &umax; step3_user_data_api_compute_max->alloc_cuda_memory = step3_compute_max_alloc_cuda_memory; step3_user_data_api_compute_max->free_cuda_memory = step3_compute_max_free_cuda_memory; step3_user_data_api_compute_max->get_cuda_allocated_user_data = step3_compute_max_get_cuda_allocated_user_data; step3_user_data_api_compute_max->copy_user_data_from_device = step3_compute_max_copy_user_data_from_device; cuda_iter_volume_api_t *step3_cuda_timestep_update_api = (cuda_iter_volume_api_t*)malloc(sizeof(cuda_iter_volume_api_t)); step3_cuda_timestep_update_api->callback = step3_cuda_timestep_update; step3_cuda_timestep_update_api->setup_kernel = setup_step3_cuda_timestep_update_kernel; user_data_for_cuda_t *step3_user_data_api_timestep_update = (user_data_for_cuda_t*) malloc(sizeof(user_data_for_cuda_t)); step3_user_data_api_timestep_update->user_data = &dt; step3_user_data_api_timestep_update->alloc_cuda_memory = step3_timestep_update_alloc_cuda_memory; step3_user_data_api_timestep_update->free_cuda_memory = step3_timestep_update_free_cuda_memory; step3_user_data_api_timestep_update->get_cuda_allocated_user_data = step3_timestep_update_get_cuda_allocated_user_data; step3_user_data_api_timestep_update->copy_user_data_from_device = step3_timestep_update_copy_user_data_from_device; cuda_iter_volume_api_t *step3_cuda_quad_divergence_api = (cuda_iter_volume_api_t*)malloc(sizeof(cuda_iter_volume_api_t)); step3_cuda_quad_divergence_api->callback= step3_cuda_quad_divergence; step3_cuda_quad_divergence_api->setup_kernel = setup_step3_cuda_quad_divergence_kernel; cuda_iter_face_api_t *step3_cuda_upwind_flux_api = (cuda_iter_face_api_t*)malloc(sizeof(cuda_iter_face_api_t)); step3_cuda_upwind_flux_api->callback = step3_cuda_upwind_flux; step3_cuda_upwind_flux_api->setup_kernel = setup_step3_cuda_upwind_flux_kernel; user_data_for_cuda_t *step3_user_data_api_ghost_data = (user_data_for_cuda_t*)malloc(sizeof(user_data_for_cuda_t)); step3_user_data_api_ghost_data->user_data_elem_count = 0; step3_user_data_api_ghost_data->alloc_cuda_memory = step3_ghost_data_alloc_cuda_memory; step3_user_data_api_ghost_data->free_cuda_memory = step3_ghost_data_free_cuda_memory; step3_user_data_api_ghost_data->get_cuda_allocated_user_data = step3_ghost_data_get_cuda_allocated_user_data; step3_user_data_api_ghost_data->copy_user_data_from_device = step3_ghost_data_copy_user_data_from_device; cuda_iter_volume_api_t *step3_cuda_reset_derivatives_api = (cuda_iter_volume_api_t*)malloc(sizeof(cuda_iter_volume_api_t)); step3_cuda_reset_derivatives_api->callback = step3_cuda_reset_derivatives; step3_cuda_reset_derivatives_api->setup_kernel = setup_step3_cuda_reset_derivatives_kernel; cuda_iter_face_api_t *step3_cuda_minmod_estimate_api = (cuda_iter_face_api_t*)malloc(sizeof(cuda_iter_face_api_t)); step3_cuda_minmod_estimate_api->callback = step3_cuda_minmod_estimate; step3_cuda_minmod_estimate_api->setup_kernel = setup_step3_cuda_minmod_estimate_kernel; /* create the ghost quadrants */ ghost = p4est_ghost_new (p4est, P4EST_CONNECT_FULL); /* create space for storing the ghost data */ ghost_data = P4EST_ALLOC (step3_data_t, ghost->ghosts.elem_count); /* synchronize the ghost data */ p4est_ghost_exchange_data (p4est, ghost, ghost_data); start = clock(); p4est_ghost_to_cuda_t* malloc_ghost = mallocForGhost(p4est, ghost); exchangeGhostDataToCuda(malloc_ghost, ghost); cuda4est->ghost_to_cuda = malloc_ghost; step3_user_data_api_ghost_data->user_data = ghost_data; step3_user_data_api_ghost_data->user_data_elem_count = ghost->ghosts.elem_count; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; ghost_allocation+=duration; // p4est memory allocation start start = clock(); p4est_cuda_memory_allocate_info_t *p4est_memory_allocate_info = p4est_memory_alloc(cuda4est); cuda4est->p4est_memory_allocate_info = p4est_memory_allocate_info; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; p4est_reallocation+=duration; // p4est memory allocation end // quadrants memory allocation start sc_array_t *trees = p4est->trees; p4est_tree_t *tree; sc_array_t *quadrants; start = clock(); tree = p4est_tree_array_index (trees, p4est->first_local_tree); quadrants = &(tree->quadrants); p4est_quadrants_to_cuda_t *quads_to_cuda = mallocForQuadrants(cuda4est, quadrants, cuda4est->quad_user_data_api); cuda4est->quads_to_cuda = quads_to_cuda; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; quadrants_reallocation+=duration; start = clock(); mallocFacesSides(cuda4est, quadrants, quads_to_cuda, ghost, malloc_ghost); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; faces_reallocation+=duration; // quadrants memory allocation end start= clock(); cuda_iterate (cuda4est, ghost, (void *) ghost_data, step3_user_data_api_ghost_data, step3_reset_derivatives, step3_cuda_reset_derivatives_api, step3_minmod_estimate, step3_cuda_minmod_estimate_api, #ifdef P4_TO_P8 NULL, #endif NULL); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; reset_derivatives_running+=duration; /* initialize du/dx estimates */ /* p4est_iterate (p4est, ghost, (void *) ghost_data, step3_reset_derivatives, step3_minmod_estimate, #ifdef P4_TO_P8 NULL, #endif NULL); */ quadrants_is_fresh = false; for (t = 0., i = 0; t < time; t += dt, i++) { P4EST_GLOBAL_PRODUCTIONF ("time %f\n", t); /* refine */ if (!(i % refine_period)) { if (i) { start = clock(); if(!quadrants_is_fresh) { downloadQuadrantsFromCuda(quads_to_cuda, quadrants, cuda4est->quad_user_data_api); quadrants_is_fresh = true; } stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; downloading_quads+=duration; /* compute umax */ umax = 0.; /* initialize derivative estimates */ //start = clock(); start=clock(); p4est_iterate (p4est, NULL, (void *) &umax, step3_compute_max, NULL, #ifdef P4_TO_P8 NULL, #endif NULL); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; compute_max_running+=duration; //stop = clock(); //duration = (double)(stop - start) / CLOCKS_PER_SEC; //cout << "Time taken by p4est_find_max: " //<< duration << " seconds" << endl; /* start = clock(); cuda_iterate (cuda4est, NULL, &umax, step3_user_data_api_compute_max, step3_compute_max, step3_cuda_compute_max_api, NULL, NULL, #ifdef P4_TO_P8 NULL, #endif NULL); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; cout << "Time taken by cuda_find_max: " << duration << " seconds" << endl; */ mpiret = sc_MPI_Allreduce (&umax, &global_umax, 1, sc_MPI_DOUBLE, sc_MPI_MAX, p4est->mpicomm); SC_CHECK_MPI (mpiret); ctx->max_err = orig_max_err * global_umax; P4EST_GLOBAL_PRODUCTIONF ("u_max %f\n", global_umax); /* adapt */ p4est_refine_ext (p4est, recursive, allowed_level, step3_refine_err_estimate, NULL, step3_replace_quads); p4est_coarsen_ext (p4est, recursive, callbackorphans, step3_coarsen_err_estimate, NULL, step3_replace_quads); p4est_balance_ext (p4est, P4EST_CONNECT_FACE, NULL, step3_replace_quads); p4est_ghost_destroy (ghost); P4EST_FREE (ghost_data); ghost = NULL; ghost_data = NULL; step3_user_data_api_ghost_data->user_data = ghost_data; step3_user_data_api_ghost_data->user_data_elem_count = 0; // p4est memory reallocation start start = clock(); //p4est_memory_free(p4est_memory_allocate_info, cuda4est->quad_user_data_api); //p4est_memory_allocate_info = p4est_memory_alloc(cuda4est); //cuda4est->p4est_memory_allocate_info = p4est_memory_allocate_info; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; p4est_reallocation+=duration; //cout << "Time taken by p4est_reallocation: " //<< duration << " seconds" << endl; // p4est memory reallocation end // quadrants memory reallocation start start = clock(); freeMemoryForQuadrants(quads_to_cuda, cuda4est->quad_user_data_api); tree = p4est_tree_array_index (trees, p4est->first_local_tree); quadrants = &(tree->quadrants); quads_to_cuda = mallocForQuadrants(cuda4est, quadrants, cuda4est->quad_user_data_api); cuda4est->quads_to_cuda = quads_to_cuda; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; quadrants_reallocation+=duration; //cout << "Time taken by quadrants_reallocation: " //<< duration << " seconds" << endl; start = clock(); mallocFacesSides(cuda4est, quadrants, quads_to_cuda, ghost, malloc_ghost); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; faces_reallocation+=duration; //cout << "Time taken by faces_reallocation: " //<< duration << " seconds" << endl; // quadrants memory reallocation end } dt = step3_get_timestep (p4est); } long revision_before_exchange = p4est->revision; long revision_after_exchange = p4est->revision; /* repartition */ if (i && !(i % repartition_period)) { p4est_partition (p4est, allowcoarsening, NULL); revision_after_exchange = p4est->revision; if(revision_after_exchange != revision_before_exchange) { start=clock(); freeMemoryForQuadrants(quads_to_cuda, cuda4est->quad_user_data_api); tree = p4est_tree_array_index (trees, p4est->first_local_tree); quadrants = &(tree->quadrants); quads_to_cuda = mallocForQuadrants(cuda4est, quadrants, cuda4est->quad_user_data_api); cuda4est->quads_to_cuda = quads_to_cuda; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; quadrants_reallocation+=duration; } if (ghost) { p4est_ghost_destroy (ghost); P4EST_FREE (ghost_data); ghost = NULL; ghost_data = NULL; step3_user_data_api_ghost_data->user_data = ghost_data; step3_user_data_api_ghost_data->user_data_elem_count = 0; } } /* write out solution */ if (!(i % write_period)) { start = clock(); if(!quadrants_is_fresh) { downloadQuadrantsFromCuda(quads_to_cuda, quadrants, cuda4est->quad_user_data_api); quadrants_is_fresh = true; } stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; downloading_quads+=duration; step3_write_solution (cuda4est, i); } /* synchronize the ghost data */ if (!ghost) { ghost = p4est_ghost_new (p4est, P4EST_CONNECT_FULL); ghost_data = P4EST_ALLOC (step3_data_t, ghost->ghosts.elem_count); p4est_ghost_exchange_data (p4est, ghost, ghost_data); start=clock(); freeMemoryForGhost(malloc_ghost); malloc_ghost = mallocForGhost(p4est, ghost); exchangeGhostDataToCuda(malloc_ghost, ghost); cuda4est->ghost_to_cuda = malloc_ghost; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; ghost_allocation+=duration; start = clock(); if(revision_after_exchange == revision_before_exchange) { freeMemoryForFacesSides(quads_to_cuda); } mallocFacesSides(cuda4est, quadrants, quads_to_cuda, ghost, malloc_ghost); step3_user_data_api_ghost_data->user_data = ghost_data; step3_user_data_api_ghost_data->user_data_elem_count = ghost->ghosts.elem_count; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; faces_reallocation+=duration; } // quadrants memory reallocation start //freeMemoryForQuadrants(quads_to_cuda, cuda4est->quad_user_data_api); //tree = p4est_tree_array_index (trees, p4est->first_local_tree); //quadrants = &(tree->quadrants); //quads_to_cuda = mallocForQuadrants(cuda4est, quadrants, cuda4est->quad_user_data_api); //cuda4est->quads_to_cuda = quads_to_cuda; //mallocFacesSides(cuda4est, quadrants, quads_to_cuda, ghost, malloc_ghost); // quadrants memory reallocation end /* compute du/dt */ /* *INDENT-OFF* */ //downloadQuadrantsFromCuda(quads_to_cuda, quadrants, cuda4est->quad_user_data_api); start = clock(); cuda_iterate (cuda4est, ghost, (void *) ghost_data, step3_user_data_api_ghost_data, step3_quad_divergence, step3_cuda_quad_divergence_api, step3_upwind_flux, step3_cuda_upwind_flux_api, #ifdef P4_TO_P8 NULL, #endif NULL); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; flux_compute_running+=duration; //cout << "Time taken by cuda_iterate: " // << duration << " seconds" << endl; //download cuda quadrants user data start //downloadQuadrantsFromCuda(quads_to_cuda, quadrants, cuda4est->quad_user_data_api); // download cuda quadrants user data end /* start = clock(); p4est_iterate (p4est, ghost, (void *) ghost_data, step3_quad_divergence, step3_upwind_flux, #ifdef P4_TO_P8 NULL, #endif NULL); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; cout << "Time taken by p4est_iterate: " << duration << " seconds" << endl; */ /* *INDENT-ON* */ // quadrants memory reallocation start //freeMemoryForQuadrants(quads_to_cuda, cuda4est->quad_user_data_api); //tree = p4est_tree_array_index (trees, p4est->first_local_tree); //quadrants = &(tree->quadrants); //quads_to_cuda = mallocForQuadrants(cuda4est, quadrants, cuda4est->quad_user_data_api); //cuda4est->quads_to_cuda = quads_to_cuda; //mallocFacesSides(cuda4est, quadrants, quads_to_cuda, ghost, malloc_ghost); // quadrants memory reallocation end start = clock(); cuda_iterate (cuda4est, NULL, (void *) &dt, step3_user_data_api_timestep_update, step3_timestep_update, step3_cuda_timestep_update_api, NULL, NULL, #ifdef P4_TO_P8 NULL, #endif NULL ); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; timestep_update_running+=duration; //cout << "Time taken by cuda_timestep_update: " // << duration << " seconds" << endl; // download cuda quadrants user data start //downloadQuadrantsFromCuda(quads_to_cuda, quadrants, cuda4est->quad_user_data_api); // download cuda quadrants user data end /* update u */ /* start = clock(); p4est_iterate (p4est, NULL, (void *) &dt, step3_timestep_update, NULL, #ifdef P4_TO_P8 NULL, #endif NULL); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; cout << "Time taken by p4est_timestep_update: " << duration << " seconds" << endl; */ // quadrants memory reallocation start //freeMemoryForQuadrants(quads_to_cuda, cuda4est->quad_user_data_api); //tree = p4est_tree_array_index (trees, p4est->first_local_tree); //quadrants = &(tree->quadrants); //quads_to_cuda = mallocForQuadrants(cuda4est, quadrants, cuda4est->quad_user_data_api); //cuda4est->quads_to_cuda = quads_to_cuda; //mallocFacesSides(cuda4est, quadrants, quads_to_cuda, ghost, malloc_ghost); // quadrants memory reallocation end /* synchronize the ghost data */ p4est_ghost_exchange_data (p4est, ghost, ghost_data); start=clock(); //freeMemoryForGhost(malloc_ghost); //malloc_ghost = mallocForGhost(p4est, ghost); freeGhostDataFromCuda(malloc_ghost); exchangeGhostDataToCuda(malloc_ghost, ghost); //cuda4est->ghost_to_cuda = malloc_ghost; //freeMemoryForFacesSides(quads_to_cuda); //mallocFacesSides(cuda4est, quadrants, quads_to_cuda, ghost, malloc_ghost); step3_user_data_api_ghost_data->user_data = ghost_data; step3_user_data_api_ghost_data->user_data_elem_count = ghost->ghosts.elem_count; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; ghost_allocation+=duration; start = clock(); cuda_iterate (cuda4est, ghost, (void *) ghost_data, step3_user_data_api_ghost_data, step3_reset_derivatives, step3_cuda_reset_derivatives_api, step3_minmod_estimate, step3_cuda_minmod_estimate_api, #ifdef P4_TO_P8 NULL, #endif NULL); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; reset_derivatives_running+=duration; //cout << "Time taken by cuda_reset_derivatives: " // << duration << " seconds" << endl; start = clock(); //downloadQuadrantsFromCuda(quads_to_cuda, quadrants, cuda4est->quad_user_data_api); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; downloading_quads+=duration; //cout << "Time taken by cuda download_quads: " // << duration << " seconds" << endl; /* update du/dx estimate */ /* start = clock(); p4est_iterate (p4est, ghost, (void *) ghost_data, step3_reset_derivatives, step3_minmod_estimate, #ifdef P4_TO_P8 NULL, #endif NULL); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; cout << "Time taken by p4est_reset_derivatives: " << duration << " seconds" << endl; */ // quadrants memory reallocation start start = clock(); //freeMemoryForQuadrants(quads_to_cuda, cuda4est->quad_user_data_api); //tree = p4est_tree_array_index (trees, p4est->first_local_tree); //quadrants = &(tree->quadrants); //quads_to_cuda = mallocForQuadrants(cuda4est, quadrants, cuda4est->quad_user_data_api); //cuda4est->quads_to_cuda = quads_to_cuda; stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; quadrants_reallocation+=duration; start = clock(); //mallocFacesSides(cuda4est, quadrants, quads_to_cuda, ghost, malloc_ghost); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; faces_reallocation+=duration; // quadrants memory reallocation end quadrants_is_fresh = false; } P4EST_FREE (ghost_data); p4est_ghost_destroy (ghost); free(step3_cuda_compute_max_api); free(step3_cuda_timestep_update_api); start=clock(); p4est_memory_free(p4est_memory_allocate_info, cuda4est->quad_user_data_api); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; p4est_reallocation+=duration; start = clock(); freeMemoryForQuadrants(quads_to_cuda, cuda4est->quad_user_data_api); stop = clock(); duration = (double)(stop - start) / CLOCKS_PER_SEC; quadrants_reallocation+=duration; double summary_time = ghost_allocation + p4est_reallocation + quadrants_reallocation + faces_reallocation + reset_derivatives_running + compute_max_running + flux_compute_running + timestep_update_running + downloading_quads; printf("summary_time: %f\n", summary_time); printf("ghost_allocation: %f, in procent: %f\n", ghost_allocation, ghost_allocation/summary_time); printf("p4est_reallocation: %f, in procent: %f\n", p4est_reallocation, p4est_reallocation/summary_time); printf("quadrants_reallocation: %f, in procent: %f\n", quadrants_reallocation, quadrants_reallocation/summary_time); printf("faces_reallocation: %f, in procent: %f\n", faces_reallocation, faces_reallocation/summary_time); printf("reset_derivatives_running: %f, in procent: %f\n", reset_derivatives_running, reset_derivatives_running/summary_time); printf("compute_max_running: %f, in procent: %f\n", compute_max_running, compute_max_running/summary_time); printf("flux_compute_running: %f, in procent: %f\n", flux_compute_running, flux_compute_running/summary_time); printf("timestep_update_running: %f, in procent: %f\n", timestep_update_running, timestep_update_running/summary_time); printf("downloading_quads: %f, in procent: %f\n", downloading_quads, downloading_quads/summary_time); } /** The main step 3 program. * * Setup of the example parameters; create the forest, with the state variable * stored in the quadrant data; refine, balance, and partition the forest; * timestep; clean up, and exit. */ int main (int argc, char **argv) { auto start = std::chrono::high_resolution_clock::now(); int mpiret; int recursive, partforcoarsen; sc_MPI_Comm mpicomm; p4est_t *p4est; p4est_connectivity_t *conn; step3_ctx_t ctx; /* Initialize MPI; see sc_mpi.h. * If configure --enable-mpi is given these are true MPI calls. * Else these are dummy functions that simulate a single-processor run. */ mpiret = sc_MPI_Init (&argc, &argv); SC_CHECK_MPI (mpiret); mpicomm = sc_MPI_COMM_WORLD; /* These functions are optional. If called they store the MPI rank as a * static variable so subsequent global p4est log messages are only issued * from processor zero. Here we turn off most of the logging; see sc.h. */ sc_init (mpicomm, 1, 1, NULL, SC_LP_ESSENTIAL); p4est_init (NULL, SC_LP_PRODUCTION); P4EST_GLOBAL_PRODUCTIONF ("This is the p4est %dD demo example/steps/%s_step3\n", P4EST_DIM, P4EST_STRING); ctx.bump_width = 0.1; ctx.max_err = 2.e-2; ctx.center[0] = 0.5; ctx.center[1] = 0.5; #ifdef P4_TO_P8 ctx.center[2] = 0.5; #endif #ifndef P4_TO_P8 /* randomly chosen advection direction */ ctx.v[0] = -0.445868402501118; ctx.v[1] = -0.895098523991131; #else ctx.v[0] = 0.485191768970225; ctx.v[1] = -0.427996381877778; ctx.v[2] = 0.762501176669961; #endif ctx.refine_period = 2; ctx.repartition_period = 4; ctx.write_period = 8; /* Create a forest that consists of just one periodic quadtree/octree. */ #ifndef P4_TO_P8 conn = p4est_connectivity_new_periodic (); #else conn = p8est_connectivity_new_periodic (); #endif /* *INDENT-OFF* */ p4est = p4est_new_ext (mpicomm, /* communicator */ conn, /* connectivity */ 0, /* minimum quadrants per MPI process */ 4, /* minimum level of refinement */ 1, /* fill uniform */ sizeof (step3_data_t), /* data size */ step3_init_initial_condition, /* initializes data */ (void *) (&ctx)); /* context */ cuda4est_t *cuda4est = (cuda4est_t*) malloc(sizeof(cuda4est_t)); cuda4est->p4est = p4est; user_data_for_cuda_t *user_data_api = (user_data_for_cuda_t*) malloc(sizeof(user_data_for_cuda_t)); user_data_api->user_data = &ctx; user_data_api->alloc_cuda_memory = alloc_cuda_memory_step3_ctx; user_data_api->free_cuda_memory = free_cuda_memory_step3_ctx; user_data_api->get_cuda_allocated_user_data = get_cuda_allocated_user_data_step3_ctx; cuda4est->user_data_api = user_data_api; quad_user_data_api_t *quad_user_data_api = (quad_user_data_api_t*) malloc(sizeof(quad_user_data_api_t)); quad_user_data_api->alloc_cuda_memory = alloc_cuda_memory_step3_quad_user_data; quad_user_data_api->alloc_cuda_memory_for_all_quads = alloc_all_quads_cuda_memory_step3; quad_user_data_api->free_cuda_memory = free_cuda_memory_step3_quad_user_data; quad_user_data_api->free_cuda_memory_for_all_quads = free_all_quads_cuda_memory_step3; quad_user_data_api->get_cuda_allocated_user_data = get_cuda_allocated_user_data_step3_quad_user_data; quad_user_data_api->update_quad_cuda_user_data = update_quad_cuda_step3_user_data; quad_user_data_api->update_all_quads_cuda_user_data = update_all_quads_cuda_user_data_step3; quad_user_data_api->download_quad_cuda_user_data_to_host = download_quad_cuda_user_data_step3_to_host; quad_user_data_api->download_all_quads_cuda_user_data_to_host = download_all_quads_cuda_user_data_to_host_t_step3; cuda4est->quad_user_data_api = quad_user_data_api; /* *INDENT-ON* */ /* refine and coarsen based on an interpolation error estimate */ recursive = 1; p4est_refine (p4est, recursive, step3_refine_err_estimate, step3_init_initial_condition); p4est_coarsen (p4est, recursive, step3_coarsen_initial_condition, step3_init_initial_condition); /* Partition: The quadrants are redistributed for equal element count. The * partition can optionally be modified such that a family of octants, which * are possibly ready for coarsening, are never split between processors. */ partforcoarsen = 1; /* If we call the 2:1 balance we ensure that neighbors do not differ in size * by more than a factor of 2. This can optionally include diagonal * neighbors across edges or corners as well; see p4est.h. */ p4est_balance (p4est, P4EST_CONNECT_FACE, step3_init_initial_condition); p4est_partition (p4est, partforcoarsen, NULL); /* time step */ //step3_timestep (cuda4est, 0.1); step3_timestep (cuda4est, 1); /* Destroy the p4est and the connectivity structure. */ p4est_destroy (p4est); p4est_connectivity_destroy (conn); /* Verify that allocations internal to p4est and sc do not leak memory. * This should be called if sc_init () has been called earlier. */ sc_finalize (); /* This is standard MPI programs. Without --enable-mpi, this is a dummy. */ mpiret = sc_MPI_Finalize (); SC_CHECK_MPI (mpiret); auto stop = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start); // To get the value of duration use the count() // member function on the duration object std::cout << "time duration: "<< duration.count() << std::endl; return 0; }
3c1c5ebe82b9776624ff338f45314406b9d2b07d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ #include "ppl/cv/cuda/dilate.h" #include "morphology.hpp" #include <cfloat> #include "utility.hpp" using namespace ppl::common; namespace ppl { namespace cv { namespace cuda { struct MaxSwap { __DEVICE__ void initialize(uchar &value0, uchar &value1, uchar &value2, uchar &value3) { value0 = 0; value1 = 0; value2 = 0; value3 = 0; } __DEVICE__ void initialize(uchar &value) { value = 0; } __DEVICE__ void initialize(uchar3 &value) { value.x = 0; value.y = 0; value.z = 0; } __DEVICE__ void initialize(uchar4 &value) { value.x = 0; value.y = 0; value.z = 0; value.w = 0; } __DEVICE__ void initialize(float &value) { value = -FLT_MAX; } __DEVICE__ void initialize(float3 &value) { value.x = -FLT_MAX; value.y = -FLT_MAX; value.z = -FLT_MAX; } __DEVICE__ void initialize(float4 &value) { value.x = -FLT_MAX; value.y = -FLT_MAX; value.z = -FLT_MAX; value.w = -FLT_MAX; } __DEVICE__ void operator()(uchar &value, uchar &target) { value = value < target ? target : value; } __DEVICE__ void operator()(uchar3 &value, uchar3 &target) { value.x = value.x < target.x ? target.x : value.x; value.y = value.y < target.y ? target.y : value.y; value.z = value.z < target.z ? target.z : value.z; } __DEVICE__ void operator()(uchar4 &value, uchar4 &target) { value.x = value.x < target.x ? target.x : value.x; value.y = value.y < target.y ? target.y : value.y; value.z = value.z < target.z ? target.z : value.z; value.w = value.w < target.w ? target.w : value.w; } __DEVICE__ void operator()(float &value, float &target) { value = value < target ? target : value; } __DEVICE__ void operator()(float3 &value, float3 &target) { value.x = value.x < target.x ? target.x : value.x; value.y = value.y < target.y ? target.y : value.y; value.z = value.z < target.z ? target.z : value.z; } __DEVICE__ void operator()(float4 &value, float4 &target) { value.x = value.x < target.x ? target.x : value.x; value.y = value.y < target.y ? target.y : value.y; value.z = value.z < target.z ? target.z : value.z; value.w = value.w < target.w ? target.w : value.w; } __DEVICE__ void checkConstantResult(uchar &result, uchar border_value) { result = result < border_value ? border_value : result; } __DEVICE__ void checkConstantResult(uchar3 &result, uchar border_value) { result.x = result.x < border_value ? border_value : result.x; } __DEVICE__ void checkConstantResult(uchar4 &result, uchar border_value) { result.x = result.x < border_value ? border_value : result.x; } __DEVICE__ void checkConstantResult(float &result, float border_value) { result = result < border_value ? border_value : result; } __DEVICE__ void checkConstantResult(float3 &result, float border_value) { result.x = result.x < border_value ? border_value : result.x; } __DEVICE__ void checkConstantResult(float4 &result, float border_value) { result.x = result.x < border_value ? border_value : result.x; } __DEVICE__ void checkU8C1ConstantResult(uchar4 &result, uchar border_value, bool constant_border0, bool constant_border1, bool constant_border2, bool constant_border3) { if (constant_border0) { result.x = result.x < border_value ? border_value : result.x; } if (constant_border1) { result.y = result.y < border_value ? border_value : result.y; } if (constant_border2) { result.z = result.z < border_value ? border_value : result.z; } if (constant_border3) { result.w = result.w < border_value ? border_value : result.w; } } }; RetCode dilate(const uchar* src, int rows, int cols, int channels, int src_stride, uchar* dst, int dst_stride, const uchar* kernel, int kernel_y, int kernel_x, BorderType border_type, const uchar border_value, hipStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(rows > 0 && cols > 0); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(uchar)); PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(uchar)); PPL_ASSERT(kernel_y > 0 && kernel_y < rows); PPL_ASSERT(kernel_x > 0 && kernel_x < cols); PPL_ASSERT(kernel_y & 1 == 1 && kernel_x & 1 == 1); PPL_ASSERT(border_type == BORDER_TYPE_CONSTANT || border_type == BORDER_TYPE_REPLICATE || border_type == BORDER_TYPE_REFLECT || border_type == BORDER_TYPE_WRAP || border_type == BORDER_TYPE_REFLECT_101); hipError_t code; if (kernel_x == 1 && kernel_y == 1 && src_stride == dst_stride) { if (src != dst) { code = hipMemcpyAsync(dst, src, src_stride * rows, hipMemcpyDeviceToDevice); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } int diameter_x = kernel_x >> 1; int diameter_y = kernel_y >> 1; dim3 block, grid; block.x = kBlockDimX0; block.y = kBlockDimY0; grid.x = divideUp(cols, kBlockDimX0, kBlockShiftX0); grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0); bool all_masked = true; if (kernel != nullptr) { int count = kernel_y * kernel_x; for (int index = 0; index < count; index++) { if (kernel[index] != 1) { all_masked = false; break; } } } MaxSwap morphology_swap; if (all_masked) { uchar* buffer; size_t pitch; if (channels == 1) { int left_threads = divideUp(diameter_x, 4, 2); int remainders = cols & 3; remainders = remainders > diameter_x ? remainders : diameter_x; int aligned_columns = (cols - remainders) >> 2; int right_threads = cols - (aligned_columns << 2); int columns = aligned_columns + right_threads; if ((left_threads << 2) + right_threads <= cols) { dim3 block0, grid0; block0.x = kBlockDimX0; block0.y = kBlockDimY0; grid0.x = divideUp(columns, kBlockDimX0, kBlockShiftX0); grid0.y = divideUp(rows, kBlockDimY0, kBlockShiftY0); if (rows >= 480 && cols >= 640 && kernel_y >= 7 && kernel_x >= 7) { code = hipMallocPitch(&buffer, &pitch, cols * channels * sizeof(uchar), rows); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } hipLaunchKernelGGL(( morphRowU8C1Kernel0<MaxSwap>), dim3(grid0), dim3(block0), 0, stream, src, rows, cols, columns, src_stride, left_threads, aligned_columns, diameter_x, buffer, pitch, morphology_swap); hipLaunchKernelGGL(( morphColKernel0<uchar, uchar, MaxSwap>), dim3(grid), dim3(block), 0, stream, buffer, rows, cols, pitch, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); hipFree(buffer); } else { hipLaunchKernelGGL(( morph2DU8C1Kernel0<MaxSwap>), dim3(grid0), dim3(block0), 0, stream, src, rows, cols, columns, src_stride, left_threads, aligned_columns, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); } } else { hipLaunchKernelGGL(( morph2DKernel0<uchar, uchar, MaxSwap>), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); } } else if (channels == 3) { if (rows >= 480 && cols >= 640 && kernel_y >= 7 && kernel_x >= 7) { code = hipMallocPitch(&buffer, &pitch, cols * channels * sizeof(uchar), rows); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } hipLaunchKernelGGL(( morphRowKernel0<uchar3, uchar, MaxSwap>), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, diameter_x, buffer, pitch, morphology_swap); hipLaunchKernelGGL(( morphColKernel0<uchar3, uchar, MaxSwap>), dim3(grid), dim3(block), 0, stream, buffer, rows, cols, pitch, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); hipFree(buffer); } else { hipLaunchKernelGGL(( morph2DKernel0<uchar3, uchar, MaxSwap>), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); } } else { // channels == 4 if (rows >= 780 && cols >= 1024 && kernel_y >= 7 && kernel_x >= 7) { code = hipMallocPitch(&buffer, &pitch, cols * channels * sizeof(uchar), rows); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } hipLaunchKernelGGL(( morphRowKernel0<uchar4, uchar, MaxSwap>), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, diameter_x, buffer, pitch, morphology_swap); hipLaunchKernelGGL(( morphColKernel0<uchar4, uchar, MaxSwap>), dim3(grid), dim3(block), 0, stream, buffer, rows, cols, pitch, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); hipFree(buffer); } else { hipLaunchKernelGGL(( morph2DKernel0<uchar4, uchar, MaxSwap>), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); } } } else { uchar* mask; int size = kernel_y * kernel_x * sizeof(uchar); code = hipMalloc(&mask, size); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } code = hipMemcpyAsync(mask, kernel, size, hipMemcpyHostToDevice); if (code != hipSuccess) { hipFree(mask); LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } if (channels == 1) { int left_threads = divideUp(diameter_x, 4, 2); int remainders = cols & 3; remainders = remainders > diameter_x ? remainders : diameter_x; int aligned_columns = (cols - remainders) >> 2; int right_threads = cols - (aligned_columns << 2); int columns = aligned_columns + right_threads; if ((left_threads << 2) + right_threads <= cols) { dim3 block0, grid0; block0.x = kBlockDimX0; block0.y = kBlockDimY0; grid0.x = divideUp(columns, kBlockDimX0, kBlockShiftX0); grid0.y = divideUp(rows, kBlockDimY0, kBlockShiftY0); hipLaunchKernelGGL(( morph2DU8C1Kernel1<MaxSwap>), dim3(grid0), dim3(block0), 0, stream, src, rows, cols, columns, src_stride, mask, left_threads, aligned_columns, diameter_x, diameter_y, kernel_x, kernel_y, dst, dst_stride, border_type, border_value, morphology_swap); } else { hipLaunchKernelGGL(( morph2DKernel1<uchar, uchar, MaxSwap>), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, mask, diameter_x, diameter_y, kernel_x, kernel_y, dst, dst_stride, border_type, border_value, morphology_swap); } } else if (channels == 3) { hipLaunchKernelGGL(( morph2DKernel1<uchar3, uchar, MaxSwap>), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, mask, diameter_x, diameter_y, kernel_x, kernel_y, dst, dst_stride, border_type, border_value, morphology_swap); } else { hipLaunchKernelGGL(( morph2DKernel1<uchar4, uchar, MaxSwap>), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, mask, diameter_x, diameter_y, kernel_x, kernel_y, dst, dst_stride, border_type, border_value, morphology_swap); } hipFree(mask); } return RC_SUCCESS; } RetCode dilate(const float* src, int rows, int cols, int channels, int src_stride, float* dst, int dst_stride, const uchar* kernel, int kernel_y, int kernel_x, BorderType border_type, const float border_value, hipStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(rows > 0 && cols > 0); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(float)); PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float)); PPL_ASSERT(kernel_y > 0 && kernel_y < rows); PPL_ASSERT(kernel_x > 0 && kernel_x < cols); PPL_ASSERT(border_type == BORDER_TYPE_CONSTANT || border_type == BORDER_TYPE_REPLICATE || border_type == BORDER_TYPE_REFLECT || border_type == BORDER_TYPE_WRAP || border_type == BORDER_TYPE_REFLECT_101); hipError_t code; if (kernel_x == 1 && kernel_y == 1 && src_stride == dst_stride) { if (src != dst) { code = hipMemcpyAsync(dst, src, src_stride * rows, hipMemcpyDeviceToDevice); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } int diameter_x = kernel_x >> 1; int diameter_y = kernel_y >> 1; dim3 block, grid; block.x = kBlockDimX1; block.y = kBlockDimY1; grid.x = divideUp(cols, kBlockDimX1, kBlockShiftX1); grid.y = divideUp(rows, kBlockDimY1, kBlockShiftY1); bool all_masked = true; if (kernel != nullptr) { int count = kernel_y * kernel_x; for (int index = 0; index < count; index++) { if (kernel[index] != 1) { all_masked = false; break; } } } MaxSwap morphology_swap; if (all_masked) { float* buffer; size_t pitch; if (channels == 1) { if (rows >= 480 && cols >= 640 && kernel_y >= 7 && kernel_x >= 7) { code = hipMallocPitch(&buffer, &pitch, cols * channels * sizeof(float), rows); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } hipLaunchKernelGGL(( morphRowKernel0<float, float, MaxSwap>), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, diameter_x, buffer, pitch, morphology_swap); hipLaunchKernelGGL(( morphColKernel0<float, float, MaxSwap>), dim3(grid), dim3(block), 0, stream, buffer, rows, cols, pitch, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); hipFree(buffer); } else { hipLaunchKernelGGL(( morph2DKernel0<float, float, MaxSwap>), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); } } else if (channels == 3) { if (rows >= 480 && cols >= 640 && kernel_y >= 7 && kernel_x >= 7) { code = hipMallocPitch(&buffer, &pitch, cols * channels * sizeof(float), rows); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } hipLaunchKernelGGL(( morphRowKernel0<float3, float, MaxSwap>), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, diameter_x, buffer, pitch, morphology_swap); hipLaunchKernelGGL(( morphColKernel0<float3, float, MaxSwap>), dim3(grid), dim3(block), 0, stream, buffer, rows, cols, pitch, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); hipFree(buffer); } else { hipLaunchKernelGGL(( morph2DKernel0<float3, float, MaxSwap>), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); } } else { // channels == 4 if (rows >= 480 && cols >= 640 && kernel_y >= 7 && kernel_x >= 7) { code = hipMallocPitch(&buffer, &pitch, cols * channels * sizeof(float), rows); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } hipLaunchKernelGGL(( morphRowKernel0<float4, float, MaxSwap>), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, diameter_x, buffer, pitch, morphology_swap); hipLaunchKernelGGL(( morphColKernel0<float4, float, MaxSwap>), dim3(grid), dim3(block), 0, stream, buffer, rows, cols, pitch, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); hipFree(buffer); } else { hipLaunchKernelGGL(( morph2DKernel0<float4, float, MaxSwap>), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); } } } else { uchar* mask; int size = kernel_y * kernel_x * sizeof(uchar); code = hipMalloc(&mask, size); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } code = hipMemcpyAsync(mask, kernel, size, hipMemcpyHostToDevice); if (code != hipSuccess) { hipFree(mask); LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } if (channels == 1) { hipLaunchKernelGGL(( morph2DKernel1<float, float, MaxSwap>), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, mask, diameter_x, diameter_y, kernel_x, kernel_y, dst, dst_stride, border_type, border_value, morphology_swap); } else if (channels == 3) { hipLaunchKernelGGL(( morph2DKernel1<float3, float, MaxSwap>), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, mask, diameter_x, diameter_y, kernel_x, kernel_y, dst, dst_stride, border_type, border_value, morphology_swap); } else { hipLaunchKernelGGL(( morph2DKernel1<float4, float, MaxSwap>), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, mask, diameter_x, diameter_y, kernel_x, kernel_y, dst, dst_stride, border_type, border_value, morphology_swap); } hipFree(mask); } return RC_SUCCESS; } template <> RetCode Dilate<uchar, 1>(hipStream_t stream, int height, int width, int inWidthStride, const uchar* inData, int kernelx_len, int kernely_len, const uchar* kernel, int outWidthStride, uchar* outData, BorderType border_type, const uchar border_value) { RetCode code = dilate(inData, height, width, 1, inWidthStride, outData, outWidthStride, kernel, kernely_len, kernelx_len, border_type, border_value, stream); return code; } template <> RetCode Dilate<uchar, 3>(hipStream_t stream, int height, int width, int inWidthStride, const uchar* inData, int kernelx_len, int kernely_len, const uchar* kernel, int outWidthStride, uchar* outData, BorderType border_type, const uchar border_value) { RetCode code = dilate(inData, height, width, 3, inWidthStride, outData, outWidthStride, kernel, kernely_len, kernelx_len, border_type, border_value, stream); return code; } template <> RetCode Dilate<uchar, 4>(hipStream_t stream, int height, int width, int inWidthStride, const uchar* inData, int kernelx_len, int kernely_len, const uchar* kernel, int outWidthStride, uchar* outData, BorderType border_type, const uchar border_value) { RetCode code = dilate(inData, height, width, 4, inWidthStride, outData, outWidthStride, kernel, kernely_len, kernelx_len, border_type, border_value, stream); return code; } template <> RetCode Dilate<float, 1>(hipStream_t stream, int height, int width, int inWidthStride, const float* inData, int kernelx_len, int kernely_len, const uchar* kernel, int outWidthStride, float* outData, BorderType border_type, const float border_value) { inWidthStride *= sizeof(float); outWidthStride *= sizeof(float); RetCode code = dilate(inData, height, width, 1, inWidthStride, outData, outWidthStride, kernel, kernely_len, kernelx_len, border_type, border_value, stream); return code; } template <> RetCode Dilate<float, 3>(hipStream_t stream, int height, int width, int inWidthStride, const float* inData, int kernelx_len, int kernely_len, const uchar* kernel, int outWidthStride, float* outData, BorderType border_type, const float border_value) { inWidthStride *= sizeof(float); outWidthStride *= sizeof(float); RetCode code = dilate(inData, height, width, 3, inWidthStride, outData, outWidthStride, kernel, kernely_len, kernelx_len, border_type, border_value, stream); return code; } template <> RetCode Dilate<float, 4>(hipStream_t stream, int height, int width, int inWidthStride, const float* inData, int kernelx_len, int kernely_len, const uchar* kernel, int outWidthStride, float* outData, BorderType border_type, const float border_value) { inWidthStride *= sizeof(float); outWidthStride *= sizeof(float); RetCode code = dilate(inData, height, width, 4, inWidthStride, outData, outWidthStride, kernel, kernely_len, kernelx_len, border_type, border_value, stream); return code; } } // namespace cuda } // namespace cv } // namespace ppl
3c1c5ebe82b9776624ff338f45314406b9d2b07d.cu
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ #include "ppl/cv/cuda/dilate.h" #include "morphology.hpp" #include <cfloat> #include "utility.hpp" using namespace ppl::common; namespace ppl { namespace cv { namespace cuda { struct MaxSwap { __DEVICE__ void initialize(uchar &value0, uchar &value1, uchar &value2, uchar &value3) { value0 = 0; value1 = 0; value2 = 0; value3 = 0; } __DEVICE__ void initialize(uchar &value) { value = 0; } __DEVICE__ void initialize(uchar3 &value) { value.x = 0; value.y = 0; value.z = 0; } __DEVICE__ void initialize(uchar4 &value) { value.x = 0; value.y = 0; value.z = 0; value.w = 0; } __DEVICE__ void initialize(float &value) { value = -FLT_MAX; } __DEVICE__ void initialize(float3 &value) { value.x = -FLT_MAX; value.y = -FLT_MAX; value.z = -FLT_MAX; } __DEVICE__ void initialize(float4 &value) { value.x = -FLT_MAX; value.y = -FLT_MAX; value.z = -FLT_MAX; value.w = -FLT_MAX; } __DEVICE__ void operator()(uchar &value, uchar &target) { value = value < target ? target : value; } __DEVICE__ void operator()(uchar3 &value, uchar3 &target) { value.x = value.x < target.x ? target.x : value.x; value.y = value.y < target.y ? target.y : value.y; value.z = value.z < target.z ? target.z : value.z; } __DEVICE__ void operator()(uchar4 &value, uchar4 &target) { value.x = value.x < target.x ? target.x : value.x; value.y = value.y < target.y ? target.y : value.y; value.z = value.z < target.z ? target.z : value.z; value.w = value.w < target.w ? target.w : value.w; } __DEVICE__ void operator()(float &value, float &target) { value = value < target ? target : value; } __DEVICE__ void operator()(float3 &value, float3 &target) { value.x = value.x < target.x ? target.x : value.x; value.y = value.y < target.y ? target.y : value.y; value.z = value.z < target.z ? target.z : value.z; } __DEVICE__ void operator()(float4 &value, float4 &target) { value.x = value.x < target.x ? target.x : value.x; value.y = value.y < target.y ? target.y : value.y; value.z = value.z < target.z ? target.z : value.z; value.w = value.w < target.w ? target.w : value.w; } __DEVICE__ void checkConstantResult(uchar &result, uchar border_value) { result = result < border_value ? border_value : result; } __DEVICE__ void checkConstantResult(uchar3 &result, uchar border_value) { result.x = result.x < border_value ? border_value : result.x; } __DEVICE__ void checkConstantResult(uchar4 &result, uchar border_value) { result.x = result.x < border_value ? border_value : result.x; } __DEVICE__ void checkConstantResult(float &result, float border_value) { result = result < border_value ? border_value : result; } __DEVICE__ void checkConstantResult(float3 &result, float border_value) { result.x = result.x < border_value ? border_value : result.x; } __DEVICE__ void checkConstantResult(float4 &result, float border_value) { result.x = result.x < border_value ? border_value : result.x; } __DEVICE__ void checkU8C1ConstantResult(uchar4 &result, uchar border_value, bool constant_border0, bool constant_border1, bool constant_border2, bool constant_border3) { if (constant_border0) { result.x = result.x < border_value ? border_value : result.x; } if (constant_border1) { result.y = result.y < border_value ? border_value : result.y; } if (constant_border2) { result.z = result.z < border_value ? border_value : result.z; } if (constant_border3) { result.w = result.w < border_value ? border_value : result.w; } } }; RetCode dilate(const uchar* src, int rows, int cols, int channels, int src_stride, uchar* dst, int dst_stride, const uchar* kernel, int kernel_y, int kernel_x, BorderType border_type, const uchar border_value, cudaStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(rows > 0 && cols > 0); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(uchar)); PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(uchar)); PPL_ASSERT(kernel_y > 0 && kernel_y < rows); PPL_ASSERT(kernel_x > 0 && kernel_x < cols); PPL_ASSERT(kernel_y & 1 == 1 && kernel_x & 1 == 1); PPL_ASSERT(border_type == BORDER_TYPE_CONSTANT || border_type == BORDER_TYPE_REPLICATE || border_type == BORDER_TYPE_REFLECT || border_type == BORDER_TYPE_WRAP || border_type == BORDER_TYPE_REFLECT_101); cudaError_t code; if (kernel_x == 1 && kernel_y == 1 && src_stride == dst_stride) { if (src != dst) { code = cudaMemcpyAsync(dst, src, src_stride * rows, cudaMemcpyDeviceToDevice); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } int diameter_x = kernel_x >> 1; int diameter_y = kernel_y >> 1; dim3 block, grid; block.x = kBlockDimX0; block.y = kBlockDimY0; grid.x = divideUp(cols, kBlockDimX0, kBlockShiftX0); grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0); bool all_masked = true; if (kernel != nullptr) { int count = kernel_y * kernel_x; for (int index = 0; index < count; index++) { if (kernel[index] != 1) { all_masked = false; break; } } } MaxSwap morphology_swap; if (all_masked) { uchar* buffer; size_t pitch; if (channels == 1) { int left_threads = divideUp(diameter_x, 4, 2); int remainders = cols & 3; remainders = remainders > diameter_x ? remainders : diameter_x; int aligned_columns = (cols - remainders) >> 2; int right_threads = cols - (aligned_columns << 2); int columns = aligned_columns + right_threads; if ((left_threads << 2) + right_threads <= cols) { dim3 block0, grid0; block0.x = kBlockDimX0; block0.y = kBlockDimY0; grid0.x = divideUp(columns, kBlockDimX0, kBlockShiftX0); grid0.y = divideUp(rows, kBlockDimY0, kBlockShiftY0); if (rows >= 480 && cols >= 640 && kernel_y >= 7 && kernel_x >= 7) { code = cudaMallocPitch(&buffer, &pitch, cols * channels * sizeof(uchar), rows); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } morphRowU8C1Kernel0<MaxSwap><<<grid0, block0, 0, stream>>>(src, rows, cols, columns, src_stride, left_threads, aligned_columns, diameter_x, buffer, pitch, morphology_swap); morphColKernel0<uchar, uchar, MaxSwap><<<grid, block, 0, stream>>>( buffer, rows, cols, pitch, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); cudaFree(buffer); } else { morph2DU8C1Kernel0<MaxSwap><<<grid0, block0, 0, stream>>>(src, rows, cols, columns, src_stride, left_threads, aligned_columns, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); } } else { morph2DKernel0<uchar, uchar, MaxSwap><<<grid, block, 0, stream>>>(src, rows, cols, src_stride, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); } } else if (channels == 3) { if (rows >= 480 && cols >= 640 && kernel_y >= 7 && kernel_x >= 7) { code = cudaMallocPitch(&buffer, &pitch, cols * channels * sizeof(uchar), rows); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } morphRowKernel0<uchar3, uchar, MaxSwap><<<grid, block, 0, stream>>>(src, rows, cols, src_stride, diameter_x, buffer, pitch, morphology_swap); morphColKernel0<uchar3, uchar, MaxSwap><<<grid, block, 0, stream>>>( buffer, rows, cols, pitch, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); cudaFree(buffer); } else { morph2DKernel0<uchar3, uchar, MaxSwap><<<grid, block, 0, stream>>>(src, rows, cols, src_stride, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); } } else { // channels == 4 if (rows >= 780 && cols >= 1024 && kernel_y >= 7 && kernel_x >= 7) { code = cudaMallocPitch(&buffer, &pitch, cols * channels * sizeof(uchar), rows); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } morphRowKernel0<uchar4, uchar, MaxSwap><<<grid, block, 0, stream>>>(src, rows, cols, src_stride, diameter_x, buffer, pitch, morphology_swap); morphColKernel0<uchar4, uchar, MaxSwap><<<grid, block, 0, stream>>>( buffer, rows, cols, pitch, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); cudaFree(buffer); } else { morph2DKernel0<uchar4, uchar, MaxSwap><<<grid, block, 0, stream>>>(src, rows, cols, src_stride, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); } } } else { uchar* mask; int size = kernel_y * kernel_x * sizeof(uchar); code = cudaMalloc(&mask, size); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } code = cudaMemcpyAsync(mask, kernel, size, cudaMemcpyHostToDevice); if (code != cudaSuccess) { cudaFree(mask); LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } if (channels == 1) { int left_threads = divideUp(diameter_x, 4, 2); int remainders = cols & 3; remainders = remainders > diameter_x ? remainders : diameter_x; int aligned_columns = (cols - remainders) >> 2; int right_threads = cols - (aligned_columns << 2); int columns = aligned_columns + right_threads; if ((left_threads << 2) + right_threads <= cols) { dim3 block0, grid0; block0.x = kBlockDimX0; block0.y = kBlockDimY0; grid0.x = divideUp(columns, kBlockDimX0, kBlockShiftX0); grid0.y = divideUp(rows, kBlockDimY0, kBlockShiftY0); morph2DU8C1Kernel1<MaxSwap><<<grid0, block0, 0, stream>>>(src, rows, cols, columns, src_stride, mask, left_threads, aligned_columns, diameter_x, diameter_y, kernel_x, kernel_y, dst, dst_stride, border_type, border_value, morphology_swap); } else { morph2DKernel1<uchar, uchar, MaxSwap><<<grid, block, 0, stream>>>(src, rows, cols, src_stride, mask, diameter_x, diameter_y, kernel_x, kernel_y, dst, dst_stride, border_type, border_value, morphology_swap); } } else if (channels == 3) { morph2DKernel1<uchar3, uchar, MaxSwap><<<grid, block, 0, stream>>>(src, rows, cols, src_stride, mask, diameter_x, diameter_y, kernel_x, kernel_y, dst, dst_stride, border_type, border_value, morphology_swap); } else { morph2DKernel1<uchar4, uchar, MaxSwap><<<grid, block, 0, stream>>>(src, rows, cols, src_stride, mask, diameter_x, diameter_y, kernel_x, kernel_y, dst, dst_stride, border_type, border_value, morphology_swap); } cudaFree(mask); } return RC_SUCCESS; } RetCode dilate(const float* src, int rows, int cols, int channels, int src_stride, float* dst, int dst_stride, const uchar* kernel, int kernel_y, int kernel_x, BorderType border_type, const float border_value, cudaStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(rows > 0 && cols > 0); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(float)); PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float)); PPL_ASSERT(kernel_y > 0 && kernel_y < rows); PPL_ASSERT(kernel_x > 0 && kernel_x < cols); PPL_ASSERT(border_type == BORDER_TYPE_CONSTANT || border_type == BORDER_TYPE_REPLICATE || border_type == BORDER_TYPE_REFLECT || border_type == BORDER_TYPE_WRAP || border_type == BORDER_TYPE_REFLECT_101); cudaError_t code; if (kernel_x == 1 && kernel_y == 1 && src_stride == dst_stride) { if (src != dst) { code = cudaMemcpyAsync(dst, src, src_stride * rows, cudaMemcpyDeviceToDevice); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } int diameter_x = kernel_x >> 1; int diameter_y = kernel_y >> 1; dim3 block, grid; block.x = kBlockDimX1; block.y = kBlockDimY1; grid.x = divideUp(cols, kBlockDimX1, kBlockShiftX1); grid.y = divideUp(rows, kBlockDimY1, kBlockShiftY1); bool all_masked = true; if (kernel != nullptr) { int count = kernel_y * kernel_x; for (int index = 0; index < count; index++) { if (kernel[index] != 1) { all_masked = false; break; } } } MaxSwap morphology_swap; if (all_masked) { float* buffer; size_t pitch; if (channels == 1) { if (rows >= 480 && cols >= 640 && kernel_y >= 7 && kernel_x >= 7) { code = cudaMallocPitch(&buffer, &pitch, cols * channels * sizeof(float), rows); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } morphRowKernel0<float, float, MaxSwap><<<grid, block, 0, stream>>>(src, rows, cols, src_stride, diameter_x, buffer, pitch, morphology_swap); morphColKernel0<float, float, MaxSwap><<<grid, block, 0, stream>>>( buffer, rows, cols, pitch, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); cudaFree(buffer); } else { morph2DKernel0<float, float, MaxSwap><<<grid, block, 0, stream>>>(src, rows, cols, src_stride, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); } } else if (channels == 3) { if (rows >= 480 && cols >= 640 && kernel_y >= 7 && kernel_x >= 7) { code = cudaMallocPitch(&buffer, &pitch, cols * channels * sizeof(float), rows); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } morphRowKernel0<float3, float, MaxSwap><<<grid, block, 0, stream>>>(src, rows, cols, src_stride, diameter_x, buffer, pitch, morphology_swap); morphColKernel0<float3, float, MaxSwap><<<grid, block, 0, stream>>>( buffer, rows, cols, pitch, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); cudaFree(buffer); } else { morph2DKernel0<float3, float, MaxSwap><<<grid, block, 0, stream>>>(src, rows, cols, src_stride, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); } } else { // channels == 4 if (rows >= 480 && cols >= 640 && kernel_y >= 7 && kernel_x >= 7) { code = cudaMallocPitch(&buffer, &pitch, cols * channels * sizeof(float), rows); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } morphRowKernel0<float4, float, MaxSwap><<<grid, block, 0, stream>>>(src, rows, cols, src_stride, diameter_x, buffer, pitch, morphology_swap); morphColKernel0<float4, float, MaxSwap><<<grid, block, 0, stream>>>( buffer, rows, cols, pitch, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); cudaFree(buffer); } else { morph2DKernel0<float4, float, MaxSwap><<<grid, block, 0, stream>>>(src, rows, cols, src_stride, diameter_x, diameter_y, dst, dst_stride, border_type, border_value, morphology_swap); } } } else { uchar* mask; int size = kernel_y * kernel_x * sizeof(uchar); code = cudaMalloc(&mask, size); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } code = cudaMemcpyAsync(mask, kernel, size, cudaMemcpyHostToDevice); if (code != cudaSuccess) { cudaFree(mask); LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } if (channels == 1) { morph2DKernel1<float, float, MaxSwap><<<grid, block, 0, stream>>>(src, rows, cols, src_stride, mask, diameter_x, diameter_y, kernel_x, kernel_y, dst, dst_stride, border_type, border_value, morphology_swap); } else if (channels == 3) { morph2DKernel1<float3, float, MaxSwap><<<grid, block, 0, stream>>>(src, rows, cols, src_stride, mask, diameter_x, diameter_y, kernel_x, kernel_y, dst, dst_stride, border_type, border_value, morphology_swap); } else { morph2DKernel1<float4, float, MaxSwap><<<grid, block, 0, stream>>>(src, rows, cols, src_stride, mask, diameter_x, diameter_y, kernel_x, kernel_y, dst, dst_stride, border_type, border_value, morphology_swap); } cudaFree(mask); } return RC_SUCCESS; } template <> RetCode Dilate<uchar, 1>(cudaStream_t stream, int height, int width, int inWidthStride, const uchar* inData, int kernelx_len, int kernely_len, const uchar* kernel, int outWidthStride, uchar* outData, BorderType border_type, const uchar border_value) { RetCode code = dilate(inData, height, width, 1, inWidthStride, outData, outWidthStride, kernel, kernely_len, kernelx_len, border_type, border_value, stream); return code; } template <> RetCode Dilate<uchar, 3>(cudaStream_t stream, int height, int width, int inWidthStride, const uchar* inData, int kernelx_len, int kernely_len, const uchar* kernel, int outWidthStride, uchar* outData, BorderType border_type, const uchar border_value) { RetCode code = dilate(inData, height, width, 3, inWidthStride, outData, outWidthStride, kernel, kernely_len, kernelx_len, border_type, border_value, stream); return code; } template <> RetCode Dilate<uchar, 4>(cudaStream_t stream, int height, int width, int inWidthStride, const uchar* inData, int kernelx_len, int kernely_len, const uchar* kernel, int outWidthStride, uchar* outData, BorderType border_type, const uchar border_value) { RetCode code = dilate(inData, height, width, 4, inWidthStride, outData, outWidthStride, kernel, kernely_len, kernelx_len, border_type, border_value, stream); return code; } template <> RetCode Dilate<float, 1>(cudaStream_t stream, int height, int width, int inWidthStride, const float* inData, int kernelx_len, int kernely_len, const uchar* kernel, int outWidthStride, float* outData, BorderType border_type, const float border_value) { inWidthStride *= sizeof(float); outWidthStride *= sizeof(float); RetCode code = dilate(inData, height, width, 1, inWidthStride, outData, outWidthStride, kernel, kernely_len, kernelx_len, border_type, border_value, stream); return code; } template <> RetCode Dilate<float, 3>(cudaStream_t stream, int height, int width, int inWidthStride, const float* inData, int kernelx_len, int kernely_len, const uchar* kernel, int outWidthStride, float* outData, BorderType border_type, const float border_value) { inWidthStride *= sizeof(float); outWidthStride *= sizeof(float); RetCode code = dilate(inData, height, width, 3, inWidthStride, outData, outWidthStride, kernel, kernely_len, kernelx_len, border_type, border_value, stream); return code; } template <> RetCode Dilate<float, 4>(cudaStream_t stream, int height, int width, int inWidthStride, const float* inData, int kernelx_len, int kernely_len, const uchar* kernel, int outWidthStride, float* outData, BorderType border_type, const float border_value) { inWidthStride *= sizeof(float); outWidthStride *= sizeof(float); RetCode code = dilate(inData, height, width, 4, inWidthStride, outData, outWidthStride, kernel, kernely_len, kernelx_len, border_type, border_value, stream); return code; } } // namespace cuda } // namespace cv } // namespace ppl
40d9a575c2a4327e4d35ca7f3fd9e1594a50030f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Udacity HW 4 //Radix Sorting #include "utils.h" #include <thrust/host_vector.h> /* Red Eye Removal =============== For this assignment we are implementing red eye removal. This is accomplished by first creating a score for every pixel that tells us how likely it is to be a red eye pixel. We have already done this for you - you are receiving the scores and need to sort them in ascending order so that we know which pixels to alter to remove the red eye. Note: ascending order == smallest to largest Each score is associated with a position, when you sort the scores, you must also move the positions accordingly. Implementing Parallel Radix Sort with CUDA ========================================== The basic idea is to construct a histogram on each pass of how many of each "digit" there are. Then we scan this histogram so that we know where to put the output of each digit. For example, the first 1 must come after all the 0s so we have to know how many 0s there are to be able to start moving 1s into the correct position. 1) Histogram of the number of occurrences of each digit 2) Exclusive Prefix Sum of Histogram 3) Determine relative offset of each digit For example [0 0 1 1 0 0 1] -> [0 1 0 1 2 3 2] 4) Combine the results of steps 2 & 3 to determine the final output location for each element and move it there LSB Radix sort is an out-of-place sort and you will need to ping-pong values between the input and output buffers we have provided. Make sure the final sorted results end up in the output buffer! Hint: You may need to do a copy at the end. */ __global__ void getPredicates(unsigned int* d_in, size_t size, int bit, unsigned int* d_false_predicate, unsigned int* d_true_predicate) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= size) return; unsigned int zero = !(d_in[index] & (1 << bit)); d_false_predicate[index] = zero; d_true_predicate[index] = !zero; if(index < 16) printf(" %d", d_false_predicate[index]); } __global__ void sumReduce(unsigned int* sum, unsigned int* input, size_t size) { //indexing int gid = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; if (gid >= size) return; //copy input array to shared extern __shared__ unsigned int s_sums[]; s_sums[tid] = input[gid]; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { s_sums[tid] += s_sums[tid + s]; } __syncthreads(); } if (tid == 0) { sum[blockIdx.x] = s_sums[tid]; } } __global__ void prefixSum(unsigned int* out, unsigned int* in, size_t size) { //indexing int gid = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; if (gid >= size) return; //copy input array to intermediate array to start extern __shared__ int s_intermediate[]; s_intermediate[tid] = in[gid]; __syncthreads(); //copy input array to intermediate array to start extern __shared__ int s_out[]; s_out[tid] = s_intermediate[tid]; __syncthreads(); //0th value in the intermediate does not change if (tid == 0) out[tid] = in[tid]; //Copy in[] to shared memory extern __shared__ int s_in[]; s_in[tid] = in[gid]; __syncthreads(); //reduce for(int s = 1; s < blockDim.x; s *= 2) { int pos = (tid + 1) * s * 2 - 1; if (pos == blockDim.x - 1) //last element is 0 { s_intermediate[pos] = 0; //out becomes the next in for next stride s_in[pos] = s_intermediate[pos]; } else if (pos < blockDim.x) { s_intermediate[pos] = s_in[pos] + s_in[pos - s]; //out becomes the next in for next stride s_in[pos] = s_intermediate[pos]; } __syncthreads(); } //downsweep for (int s = blockDim.x; s > 0; s /= 2) { //get new position int pos = (tid + 1) * s * 2 - 1; if(pos < blockDim.x) { //swap and sum values int x = s_in[pos]; int y = s_in[pos - s]; s_out[pos] = y + x; //sum s_out[pos - s] = x; //swap //out becomce next in s_intermediate[pos] = s_out[pos]; s_intermediate[pos - s] = s_out[pos - s]; } __syncthreads(); } //copy back to device out[gid] = s_out[tid]; //or s_out[tid] with downsweep __syncthreads(); //print if (gid < 16) printf("%d ", out[gid]); } __global__ void prefixSumFix(unsigned int* scan, unsigned int* sums, size_t numElems) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= numElems) return; scan[index] += sums[blockIdx.x]; //print if (index > 1023 && index < 1040) printf("%d ", scan[index]); } __global__ void scatterElements(unsigned int* d_inputVals, unsigned int* d_outputVals, unsigned int* true_scan, unsigned int* false_scan, unsigned int* d_inputPos, unsigned int* d_outputPos, size_t numElems, unsigned int bit) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= numElems) return; unsigned int isLastElemZero = ((d_inputVals[numElems - 1] & (1 << bit)) == 0); unsigned int oneStartPos = false_scan[numElems - 1] + isLastElemZero; unsigned int pos = d_inputPos[index]; unsigned int value = d_inputVals[index]; int new_loc; if (value & (1 << bit)) { new_loc = oneStartPos + true_scan[index]; d_outputVals[new_loc] = value; d_outputPos[new_loc] = pos; } else { new_loc = false_scan[index]; d_outputVals[new_loc] = value; d_outputPos[new_loc] = pos; } //print example if (index == 0) printf("isLastElemZero = %d oneStartPos = %d NewLoc = %d Value = %d\n", isLastElemZero, oneStartPos, new_loc, value); } __global__ void copyBuffers(unsigned int* d_inVals, unsigned int* d_inPos, unsigned int* d_outVals, unsigned int* d_outPos, size_t numElems) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numElems) return; d_outVals[index] = d_inVals[index]; d_outPos[index] = d_inPos[index]; } void your_sort(unsigned int* d_inputVals, unsigned int* d_inputPos, unsigned int* d_outputVals, unsigned int* d_outputPos, const size_t numElems) { //kernal threads and blocks int threads = 1024; int blocks = (numElems + threads - 1) / threads; printf("numElems = %d\nBlocks = %d\n\n", numElems, blocks); //initialize buffers unsigned int* d_true_predicate; checkCudaErrors(hipMalloc((void **) &d_true_predicate, numElems * sizeof(unsigned int))); unsigned int* d_true_sums; checkCudaErrors(hipMalloc((void **) &d_true_sums, blocks * sizeof(unsigned int))); unsigned int* d_true_sums_scan; checkCudaErrors(hipMalloc((void **) &d_true_sums_scan, blocks * sizeof(unsigned int))); unsigned int* d_true_scan; checkCudaErrors(hipMalloc((void **) &d_true_scan, numElems * sizeof(unsigned int))); unsigned int* d_false_predicate; checkCudaErrors(hipMalloc((void **) &d_false_predicate, numElems * sizeof(unsigned int))); unsigned int* d_false_sums; checkCudaErrors(hipMalloc((void **) &d_false_sums, blocks * sizeof(unsigned int))); unsigned int* d_false_sums_scan; checkCudaErrors(hipMalloc((void **) &d_false_sums_scan, blocks * sizeof(unsigned int))); unsigned int* d_false_scan; checkCudaErrors(hipMalloc((void **) &d_false_scan, numElems * sizeof(unsigned int))); //loop thru bits for(unsigned int bit = 0; bit < 8 * sizeof(unsigned int); bit++) { printf("\nBit position = %d\n", bit); //0 all buffers everytime hipMemset(d_true_predicate, 0, numElems * sizeof(unsigned int)); hipMemset(d_true_scan, 0, numElems * sizeof(unsigned int)); hipMemset(d_true_sums, 0, blocks * sizeof(unsigned int)); hipMemset(d_true_sums_scan, 0, blocks * sizeof(unsigned int)); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipMemset(d_false_predicate, 0, numElems * sizeof(unsigned int)); hipMemset(d_false_scan, 0, numElems * sizeof(unsigned int)); hipMemset(d_false_sums, 0, blocks * sizeof(unsigned int)); hipMemset(d_false_sums_scan, 0, blocks * sizeof(unsigned int)); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //get predicates printf("False predicate(first 16): "); hipLaunchKernelGGL(( getPredicates), dim3(blocks), dim3(threads), 0, 0, d_inputVals, numElems, bit, d_false_predicate, d_true_predicate); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); printf("\n"); //get block sums printf("False sums(first 16): "); hipLaunchKernelGGL(( sumReduce), dim3(blocks), dim3(threads), threads * sizeof(unsigned int), 0, d_false_sums, d_false_predicate, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); printf("\nTrue sums(first 16): "); hipLaunchKernelGGL(( sumReduce), dim3(blocks), dim3(threads), threads * sizeof(unsigned int), 0, d_true_sums, d_true_predicate, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); printf("\n"); //scan blockwise printf("False scan(first 16): "); hipLaunchKernelGGL(( prefixSum), dim3(blocks), dim3(threads), threads * sizeof(unsigned int), 0, d_false_scan, d_false_predicate, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); printf("\nTrue scan(first 16): "); hipLaunchKernelGGL(( prefixSum), dim3(blocks), dim3(threads), threads * sizeof(unsigned int), 0, d_true_scan, d_true_predicate, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); printf("\n"); //scan block sums printf("False sums scan(first 16): "); hipLaunchKernelGGL(( prefixSum), dim3(1), dim3(threads), threads * sizeof(unsigned int), 0, d_false_sums_scan, d_false_sums, blocks); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); printf("\nTrue sums scan(first 16): "); hipLaunchKernelGGL(( prefixSum), dim3(1), dim3(threads), threads * sizeof(unsigned int), 0, d_true_sums_scan, d_true_sums, blocks); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); printf("\n"); //fix scans printf("False scan fixed(After element 1023): "); hipLaunchKernelGGL(( prefixSumFix), dim3(blocks), dim3(threads), 0, 0, d_false_scan, d_false_sums_scan, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); printf("\nTrue scan fixed(After element 1023): "); hipLaunchKernelGGL(( prefixSumFix), dim3(blocks), dim3(threads), 0, 0, d_true_scan, d_true_sums_scan, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); printf("\n"); //scatter printf("Scatter Element Example:\n"); hipLaunchKernelGGL(( scatterElements), dim3(blocks), dim3(threads), 0, 0, d_inputVals, d_outputVals, d_true_scan, d_false_scan, d_inputPos, d_outputPos, numElems, bit); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //swap buffers (pointers) std::swap(d_outputVals, d_inputVals); std::swap(d_outputPos, d_inputPos); printf("\n_________________________________________________________________________________________\n"); //return; } //copy from input buffer to output hipLaunchKernelGGL(( copyBuffers), dim3(blocks), dim3(threads), 0, 0, d_inputVals, d_inputPos, d_outputVals, d_outputPos, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //delete buffers hipFree(d_true_predicate); hipFree(d_true_sums); hipFree(d_true_sums_scan); hipFree(d_true_scan); hipFree(d_false_predicate); hipFree(d_false_sums); hipFree(d_false_sums_scan); hipFree(d_false_scan); }
40d9a575c2a4327e4d35ca7f3fd9e1594a50030f.cu
//Udacity HW 4 //Radix Sorting #include "utils.h" #include <thrust/host_vector.h> /* Red Eye Removal =============== For this assignment we are implementing red eye removal. This is accomplished by first creating a score for every pixel that tells us how likely it is to be a red eye pixel. We have already done this for you - you are receiving the scores and need to sort them in ascending order so that we know which pixels to alter to remove the red eye. Note: ascending order == smallest to largest Each score is associated with a position, when you sort the scores, you must also move the positions accordingly. Implementing Parallel Radix Sort with CUDA ========================================== The basic idea is to construct a histogram on each pass of how many of each "digit" there are. Then we scan this histogram so that we know where to put the output of each digit. For example, the first 1 must come after all the 0s so we have to know how many 0s there are to be able to start moving 1s into the correct position. 1) Histogram of the number of occurrences of each digit 2) Exclusive Prefix Sum of Histogram 3) Determine relative offset of each digit For example [0 0 1 1 0 0 1] -> [0 1 0 1 2 3 2] 4) Combine the results of steps 2 & 3 to determine the final output location for each element and move it there LSB Radix sort is an out-of-place sort and you will need to ping-pong values between the input and output buffers we have provided. Make sure the final sorted results end up in the output buffer! Hint: You may need to do a copy at the end. */ __global__ void getPredicates(unsigned int* d_in, size_t size, int bit, unsigned int* d_false_predicate, unsigned int* d_true_predicate) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= size) return; unsigned int zero = !(d_in[index] & (1 << bit)); d_false_predicate[index] = zero; d_true_predicate[index] = !zero; if(index < 16) printf(" %d", d_false_predicate[index]); } __global__ void sumReduce(unsigned int* sum, unsigned int* input, size_t size) { //indexing int gid = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; if (gid >= size) return; //copy input array to shared extern __shared__ unsigned int s_sums[]; s_sums[tid] = input[gid]; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { s_sums[tid] += s_sums[tid + s]; } __syncthreads(); } if (tid == 0) { sum[blockIdx.x] = s_sums[tid]; } } __global__ void prefixSum(unsigned int* out, unsigned int* in, size_t size) { //indexing int gid = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; if (gid >= size) return; //copy input array to intermediate array to start extern __shared__ int s_intermediate[]; s_intermediate[tid] = in[gid]; __syncthreads(); //copy input array to intermediate array to start extern __shared__ int s_out[]; s_out[tid] = s_intermediate[tid]; __syncthreads(); //0th value in the intermediate does not change if (tid == 0) out[tid] = in[tid]; //Copy in[] to shared memory extern __shared__ int s_in[]; s_in[tid] = in[gid]; __syncthreads(); //reduce for(int s = 1; s < blockDim.x; s *= 2) { int pos = (tid + 1) * s * 2 - 1; if (pos == blockDim.x - 1) //last element is 0 { s_intermediate[pos] = 0; //out becomes the next in for next stride s_in[pos] = s_intermediate[pos]; } else if (pos < blockDim.x) { s_intermediate[pos] = s_in[pos] + s_in[pos - s]; //out becomes the next in for next stride s_in[pos] = s_intermediate[pos]; } __syncthreads(); } //downsweep for (int s = blockDim.x; s > 0; s /= 2) { //get new position int pos = (tid + 1) * s * 2 - 1; if(pos < blockDim.x) { //swap and sum values int x = s_in[pos]; int y = s_in[pos - s]; s_out[pos] = y + x; //sum s_out[pos - s] = x; //swap //out becomce next in s_intermediate[pos] = s_out[pos]; s_intermediate[pos - s] = s_out[pos - s]; } __syncthreads(); } //copy back to device out[gid] = s_out[tid]; //or s_out[tid] with downsweep __syncthreads(); //print if (gid < 16) printf("%d ", out[gid]); } __global__ void prefixSumFix(unsigned int* scan, unsigned int* sums, size_t numElems) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= numElems) return; scan[index] += sums[blockIdx.x]; //print if (index > 1023 && index < 1040) printf("%d ", scan[index]); } __global__ void scatterElements(unsigned int* d_inputVals, unsigned int* d_outputVals, unsigned int* true_scan, unsigned int* false_scan, unsigned int* d_inputPos, unsigned int* d_outputPos, size_t numElems, unsigned int bit) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= numElems) return; unsigned int isLastElemZero = ((d_inputVals[numElems - 1] & (1 << bit)) == 0); unsigned int oneStartPos = false_scan[numElems - 1] + isLastElemZero; unsigned int pos = d_inputPos[index]; unsigned int value = d_inputVals[index]; int new_loc; if (value & (1 << bit)) { new_loc = oneStartPos + true_scan[index]; d_outputVals[new_loc] = value; d_outputPos[new_loc] = pos; } else { new_loc = false_scan[index]; d_outputVals[new_loc] = value; d_outputPos[new_loc] = pos; } //print example if (index == 0) printf("isLastElemZero = %d oneStartPos = %d NewLoc = %d Value = %d\n", isLastElemZero, oneStartPos, new_loc, value); } __global__ void copyBuffers(unsigned int* d_inVals, unsigned int* d_inPos, unsigned int* d_outVals, unsigned int* d_outPos, size_t numElems) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numElems) return; d_outVals[index] = d_inVals[index]; d_outPos[index] = d_inPos[index]; } void your_sort(unsigned int* d_inputVals, unsigned int* d_inputPos, unsigned int* d_outputVals, unsigned int* d_outputPos, const size_t numElems) { //kernal threads and blocks int threads = 1024; int blocks = (numElems + threads - 1) / threads; printf("numElems = %d\nBlocks = %d\n\n", numElems, blocks); //initialize buffers unsigned int* d_true_predicate; checkCudaErrors(cudaMalloc((void **) &d_true_predicate, numElems * sizeof(unsigned int))); unsigned int* d_true_sums; checkCudaErrors(cudaMalloc((void **) &d_true_sums, blocks * sizeof(unsigned int))); unsigned int* d_true_sums_scan; checkCudaErrors(cudaMalloc((void **) &d_true_sums_scan, blocks * sizeof(unsigned int))); unsigned int* d_true_scan; checkCudaErrors(cudaMalloc((void **) &d_true_scan, numElems * sizeof(unsigned int))); unsigned int* d_false_predicate; checkCudaErrors(cudaMalloc((void **) &d_false_predicate, numElems * sizeof(unsigned int))); unsigned int* d_false_sums; checkCudaErrors(cudaMalloc((void **) &d_false_sums, blocks * sizeof(unsigned int))); unsigned int* d_false_sums_scan; checkCudaErrors(cudaMalloc((void **) &d_false_sums_scan, blocks * sizeof(unsigned int))); unsigned int* d_false_scan; checkCudaErrors(cudaMalloc((void **) &d_false_scan, numElems * sizeof(unsigned int))); //loop thru bits for(unsigned int bit = 0; bit < 8 * sizeof(unsigned int); bit++) { printf("\nBit position = %d\n", bit); //0 all buffers everytime cudaMemset(d_true_predicate, 0, numElems * sizeof(unsigned int)); cudaMemset(d_true_scan, 0, numElems * sizeof(unsigned int)); cudaMemset(d_true_sums, 0, blocks * sizeof(unsigned int)); cudaMemset(d_true_sums_scan, 0, blocks * sizeof(unsigned int)); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); cudaMemset(d_false_predicate, 0, numElems * sizeof(unsigned int)); cudaMemset(d_false_scan, 0, numElems * sizeof(unsigned int)); cudaMemset(d_false_sums, 0, blocks * sizeof(unsigned int)); cudaMemset(d_false_sums_scan, 0, blocks * sizeof(unsigned int)); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //get predicates printf("False predicate(first 16): "); getPredicates<<<blocks, threads>>>(d_inputVals, numElems, bit, d_false_predicate, d_true_predicate); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); printf("\n"); //get block sums printf("False sums(first 16): "); sumReduce<<<blocks, threads, threads * sizeof(unsigned int)>>>(d_false_sums, d_false_predicate, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); printf("\nTrue sums(first 16): "); sumReduce<<<blocks, threads, threads * sizeof(unsigned int)>>>(d_true_sums, d_true_predicate, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); printf("\n"); //scan blockwise printf("False scan(first 16): "); prefixSum<<<blocks, threads, threads * sizeof(unsigned int)>>>(d_false_scan, d_false_predicate, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); printf("\nTrue scan(first 16): "); prefixSum<<<blocks, threads, threads * sizeof(unsigned int)>>>(d_true_scan, d_true_predicate, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); printf("\n"); //scan block sums printf("False sums scan(first 16): "); prefixSum<<<1, threads, threads * sizeof(unsigned int)>>>(d_false_sums_scan, d_false_sums, blocks); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); printf("\nTrue sums scan(first 16): "); prefixSum<<<1, threads, threads * sizeof(unsigned int)>>>(d_true_sums_scan, d_true_sums, blocks); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); printf("\n"); //fix scans printf("False scan fixed(After element 1023): "); prefixSumFix<<<blocks, threads>>>(d_false_scan, d_false_sums_scan, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); printf("\nTrue scan fixed(After element 1023): "); prefixSumFix<<<blocks, threads>>>(d_true_scan, d_true_sums_scan, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); printf("\n"); //scatter printf("Scatter Element Example:\n"); scatterElements<<<blocks, threads>>>(d_inputVals, d_outputVals, d_true_scan, d_false_scan, d_inputPos, d_outputPos, numElems, bit); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //swap buffers (pointers) std::swap(d_outputVals, d_inputVals); std::swap(d_outputPos, d_inputPos); printf("\n_________________________________________________________________________________________\n"); //return; } //copy from input buffer to output copyBuffers<<<blocks, threads>>>(d_inputVals, d_inputPos, d_outputVals, d_outputPos, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //delete buffers cudaFree(d_true_predicate); cudaFree(d_true_sums); cudaFree(d_true_sums_scan); cudaFree(d_true_scan); cudaFree(d_false_predicate); cudaFree(d_false_sums); cudaFree(d_false_sums_scan); cudaFree(d_false_scan); }
de7c457db60b92e53255a0042cc644416ccca58e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zbajac_csr_overlap.cu, normal z -> c, Sun Nov 20 20:20:42 2016 */ #include "magmasparse_internal.h" #define PRECISION_c #define BLOCKSIZE 256 __global__ void magma_ck_testLocking(unsigned int* locks, int n) { int id = threadIdx.x % n; bool leaveLoop = false; while (!leaveLoop) { if (atomicExch(&(locks[id]), 1u) == 0u) { //critical section leaveLoop = true; atomicExch(&(locks[id]),0u); } } } /* __global__ void magma_cbajac_csr_o_ls_kernel(int localiters, int n, int matrices, int overlap, magma_c_matrix *D, magma_c_matrix *R, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { // int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; // int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; //valR = R[ (1+blockIdx.x-1)%matrices ].dval; //colR = R[ (1+blockIdx.x-1)%matrices ].dcol; //rowR = R[ (1+blockIdx.x-1)%matrices ].drow; //valD = D[ (1+blockIdx.x-1)%matrices ].dval; //colD = D[ (1+blockIdx.x-1)%matrices ].dcol; //rowD = D[ (1+blockIdx.x-1)%matrices ].drow; if (blockIdx.x%2 == 1) { valR = R[0].dval; valD = D[0].dval; colR = R[0].dcol; rowR = R[0].drow; colD = D[0].dcol; rowD = D[0].drow; } else { valR = R[1].dval; valD = D[1].dval; colR = R[1].dcol; rowR = R[1].drow; colD = D[1].dcol; rowD = D[1].drow; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; printf("bdx:%d idx:%d start:%d end:%d\n", blockIdx.x, threadIdx.x, start, end); #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for (i = start; i < end; i++) v += valR[i] * x[ colR[i] ]; start = rowD[index]; end = rowD[index+1]; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations local_x[threadIdx.x] = x[index]; //+ ( v - tmp); // / (valD[start]); __syncthreads(); #pragma unroll for (j = 0; j < localiters-1; j++) { tmp = zero; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if (threadIdx.x > overlap) { // RAS x[index] = local_x[threadIdx.x]; } } } */ __global__ void magma_cbajac_csr_o_ls_kernel1(int localiters, int n, int matrices, int overlap, magmaFloatComplex * valD, magma_index_t * rowD, magma_index_t * colD, magmaFloatComplex * valR, magma_index_t * rowR, magma_index_t * colR, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; //bool leaveLoop = false; if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel2(int localiters, int n, int matrices, int overlap, magmaFloatComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaFloatComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaFloatComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaFloatComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; //bool leaveLoop = false; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if (blockIdx.x%matrices == 0) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if (blockIdx.x%matrices == 1) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel4(int localiters, int n, int matrices, int overlap, magmaFloatComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaFloatComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaFloatComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaFloatComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, magmaFloatComplex * valD2, magma_index_t * rowD2, magma_index_t * colD2, magmaFloatComplex * valR2, magma_index_t * rowR2, magma_index_t * colR2, magmaFloatComplex * valD3, magma_index_t * rowD3, magma_index_t * colD3, magmaFloatComplex * valR3, magma_index_t * rowR3, magma_index_t * colR3, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; //bool leaveLoop = false; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==1 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==2 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==3 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel8(int localiters, int n, int matrices, int overlap, magmaFloatComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaFloatComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaFloatComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaFloatComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, magmaFloatComplex * valD2, magma_index_t * rowD2, magma_index_t * colD2, magmaFloatComplex * valR2, magma_index_t * rowR2, magma_index_t * colR2, magmaFloatComplex * valD3, magma_index_t * rowD3, magma_index_t * colD3, magmaFloatComplex * valR3, magma_index_t * rowR3, magma_index_t * colR3, magmaFloatComplex * valD4, magma_index_t * rowD4, magma_index_t * colD4, magmaFloatComplex * valR4, magma_index_t * rowR4, magma_index_t * colR4, magmaFloatComplex * valD5, magma_index_t * rowD5, magma_index_t * colD5, magmaFloatComplex * valR5, magma_index_t * rowR5, magma_index_t * colR5, magmaFloatComplex * valD6, magma_index_t * rowD6, magma_index_t * colD6, magmaFloatComplex * valR6, magma_index_t * rowR6, magma_index_t * colR6, magmaFloatComplex * valD7, magma_index_t * rowD7, magma_index_t * colD7, magmaFloatComplex * valR7, magma_index_t * rowR7, magma_index_t * colR7, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if( blockIdx.x%matrices==0 ){ valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }else if ( blockIdx.x%matrices==1 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }else if ( blockIdx.x%matrices==2 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }else if ( blockIdx.x%matrices==3 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }else if ( blockIdx.x%matrices==4 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==5 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==6 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==7 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel16(int localiters, int n, int matrices, int overlap, magmaFloatComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaFloatComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaFloatComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaFloatComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaFloatComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaFloatComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaFloatComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaFloatComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaFloatComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaFloatComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaFloatComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaFloatComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaFloatComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaFloatComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaFloatComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaFloatComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaFloatComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaFloatComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaFloatComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaFloatComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaFloatComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaFloatComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaFloatComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaFloatComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaFloatComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaFloatComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaFloatComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaFloatComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaFloatComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaFloatComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaFloatComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaFloatComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==1 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==2 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==3 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==4 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==5 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==6 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==7 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==8 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==9 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==10 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==11 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==12 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==13 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==14 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==15 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel32(int localiters, int n, int matrices, int overlap, magmaFloatComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaFloatComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaFloatComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaFloatComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaFloatComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaFloatComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaFloatComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaFloatComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaFloatComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaFloatComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaFloatComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaFloatComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaFloatComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaFloatComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaFloatComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaFloatComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaFloatComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaFloatComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaFloatComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaFloatComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaFloatComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaFloatComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaFloatComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaFloatComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaFloatComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaFloatComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaFloatComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaFloatComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaFloatComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaFloatComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaFloatComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaFloatComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, magmaFloatComplex *valD16, magma_index_t *rowD16, magma_index_t *colD16, magmaFloatComplex *valR16, magma_index_t *rowR16, magma_index_t *colR16, magmaFloatComplex *valD17, magma_index_t *rowD17, magma_index_t *colD17, magmaFloatComplex *valR17, magma_index_t *rowR17, magma_index_t *colR17, magmaFloatComplex *valD18, magma_index_t *rowD18, magma_index_t *colD18, magmaFloatComplex *valR18, magma_index_t *rowR18, magma_index_t *colR18, magmaFloatComplex *valD19, magma_index_t *rowD19, magma_index_t *colD19, magmaFloatComplex *valR19, magma_index_t *rowR19, magma_index_t *colR19, magmaFloatComplex *valD20, magma_index_t *rowD20, magma_index_t *colD20, magmaFloatComplex *valR20, magma_index_t *rowR20, magma_index_t *colR20, magmaFloatComplex *valD21, magma_index_t *rowD21, magma_index_t *colD21, magmaFloatComplex *valR21, magma_index_t *rowR21, magma_index_t *colR21, magmaFloatComplex *valD22, magma_index_t *rowD22, magma_index_t *colD22, magmaFloatComplex *valR22, magma_index_t *rowR22, magma_index_t *colR22, magmaFloatComplex *valD23, magma_index_t *rowD23, magma_index_t *colD23, magmaFloatComplex *valR23, magma_index_t *rowR23, magma_index_t *colR23, magmaFloatComplex *valD24, magma_index_t *rowD24, magma_index_t *colD24, magmaFloatComplex *valR24, magma_index_t *rowR24, magma_index_t *colR24, magmaFloatComplex *valD25, magma_index_t *rowD25, magma_index_t *colD25, magmaFloatComplex *valR25, magma_index_t *rowR25, magma_index_t *colR25, magmaFloatComplex *valD26, magma_index_t *rowD26, magma_index_t *colD26, magmaFloatComplex *valR26, magma_index_t *rowR26, magma_index_t *colR26, magmaFloatComplex *valD27, magma_index_t *rowD27, magma_index_t *colD27, magmaFloatComplex *valR27, magma_index_t *rowR27, magma_index_t *colR27, magmaFloatComplex *valD28, magma_index_t *rowD28, magma_index_t *colD28, magmaFloatComplex *valR28, magma_index_t *rowR28, magma_index_t *colR28, magmaFloatComplex *valD29, magma_index_t *rowD29, magma_index_t *colD29, magmaFloatComplex *valR29, magma_index_t *rowR29, magma_index_t *colR29, magmaFloatComplex *valD30, magma_index_t *rowD30, magma_index_t *colD30, magmaFloatComplex *valR30, magma_index_t *rowR30, magma_index_t *colR30, magmaFloatComplex *valD31, magma_index_t *rowD31, magma_index_t *colD31, magmaFloatComplex *valR31, magma_index_t *rowR31, magma_index_t *colR31, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==1 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==2 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==3 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==4 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==5 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==6 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==7 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==8 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==9 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==10 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==11 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==12 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==13 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==14 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==15 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==16 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==17 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==18 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==19 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==20 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==21 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==22 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==23 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==24 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==25 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==26 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==27 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==28 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==29 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==30 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==31 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel64(int localiters, int n, int matrices, int overlap, magmaFloatComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaFloatComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaFloatComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaFloatComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaFloatComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaFloatComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaFloatComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaFloatComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaFloatComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaFloatComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaFloatComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaFloatComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaFloatComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaFloatComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaFloatComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaFloatComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaFloatComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaFloatComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaFloatComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaFloatComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaFloatComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaFloatComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaFloatComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaFloatComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaFloatComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaFloatComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaFloatComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaFloatComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaFloatComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaFloatComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaFloatComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaFloatComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, magmaFloatComplex *valD16, magma_index_t *rowD16, magma_index_t *colD16, magmaFloatComplex *valR16, magma_index_t *rowR16, magma_index_t *colR16, magmaFloatComplex *valD17, magma_index_t *rowD17, magma_index_t *colD17, magmaFloatComplex *valR17, magma_index_t *rowR17, magma_index_t *colR17, magmaFloatComplex *valD18, magma_index_t *rowD18, magma_index_t *colD18, magmaFloatComplex *valR18, magma_index_t *rowR18, magma_index_t *colR18, magmaFloatComplex *valD19, magma_index_t *rowD19, magma_index_t *colD19, magmaFloatComplex *valR19, magma_index_t *rowR19, magma_index_t *colR19, magmaFloatComplex *valD20, magma_index_t *rowD20, magma_index_t *colD20, magmaFloatComplex *valR20, magma_index_t *rowR20, magma_index_t *colR20, magmaFloatComplex *valD21, magma_index_t *rowD21, magma_index_t *colD21, magmaFloatComplex *valR21, magma_index_t *rowR21, magma_index_t *colR21, magmaFloatComplex *valD22, magma_index_t *rowD22, magma_index_t *colD22, magmaFloatComplex *valR22, magma_index_t *rowR22, magma_index_t *colR22, magmaFloatComplex *valD23, magma_index_t *rowD23, magma_index_t *colD23, magmaFloatComplex *valR23, magma_index_t *rowR23, magma_index_t *colR23, magmaFloatComplex *valD24, magma_index_t *rowD24, magma_index_t *colD24, magmaFloatComplex *valR24, magma_index_t *rowR24, magma_index_t *colR24, magmaFloatComplex *valD25, magma_index_t *rowD25, magma_index_t *colD25, magmaFloatComplex *valR25, magma_index_t *rowR25, magma_index_t *colR25, magmaFloatComplex *valD26, magma_index_t *rowD26, magma_index_t *colD26, magmaFloatComplex *valR26, magma_index_t *rowR26, magma_index_t *colR26, magmaFloatComplex *valD27, magma_index_t *rowD27, magma_index_t *colD27, magmaFloatComplex *valR27, magma_index_t *rowR27, magma_index_t *colR27, magmaFloatComplex *valD28, magma_index_t *rowD28, magma_index_t *colD28, magmaFloatComplex *valR28, magma_index_t *rowR28, magma_index_t *colR28, magmaFloatComplex *valD29, magma_index_t *rowD29, magma_index_t *colD29, magmaFloatComplex *valR29, magma_index_t *rowR29, magma_index_t *colR29, magmaFloatComplex *valD30, magma_index_t *rowD30, magma_index_t *colD30, magmaFloatComplex *valR30, magma_index_t *rowR30, magma_index_t *colR30, magmaFloatComplex *valD31, magma_index_t *rowD31, magma_index_t *colD31, magmaFloatComplex *valR31, magma_index_t *rowR31, magma_index_t *colR31, magmaFloatComplex *valD32, magma_index_t *rowD32, magma_index_t *colD32, magmaFloatComplex *valR32, magma_index_t *rowR32, magma_index_t *colR32, magmaFloatComplex *valD33, magma_index_t *rowD33, magma_index_t *colD33, magmaFloatComplex *valR33, magma_index_t *rowR33, magma_index_t *colR33, magmaFloatComplex *valD34, magma_index_t *rowD34, magma_index_t *colD34, magmaFloatComplex *valR34, magma_index_t *rowR34, magma_index_t *colR34, magmaFloatComplex *valD35, magma_index_t *rowD35, magma_index_t *colD35, magmaFloatComplex *valR35, magma_index_t *rowR35, magma_index_t *colR35, magmaFloatComplex *valD36, magma_index_t *rowD36, magma_index_t *colD36, magmaFloatComplex *valR36, magma_index_t *rowR36, magma_index_t *colR36, magmaFloatComplex *valD37, magma_index_t *rowD37, magma_index_t *colD37, magmaFloatComplex *valR37, magma_index_t *rowR37, magma_index_t *colR37, magmaFloatComplex *valD38, magma_index_t *rowD38, magma_index_t *colD38, magmaFloatComplex *valR38, magma_index_t *rowR38, magma_index_t *colR38, magmaFloatComplex *valD39, magma_index_t *rowD39, magma_index_t *colD39, magmaFloatComplex *valR39, magma_index_t *rowR39, magma_index_t *colR39, magmaFloatComplex *valD40, magma_index_t *rowD40, magma_index_t *colD40, magmaFloatComplex *valR40, magma_index_t *rowR40, magma_index_t *colR40, magmaFloatComplex *valD41, magma_index_t *rowD41, magma_index_t *colD41, magmaFloatComplex *valR41, magma_index_t *rowR41, magma_index_t *colR41, magmaFloatComplex *valD42, magma_index_t *rowD42, magma_index_t *colD42, magmaFloatComplex *valR42, magma_index_t *rowR42, magma_index_t *colR42, magmaFloatComplex *valD43, magma_index_t *rowD43, magma_index_t *colD43, magmaFloatComplex *valR43, magma_index_t *rowR43, magma_index_t *colR43, magmaFloatComplex *valD44, magma_index_t *rowD44, magma_index_t *colD44, magmaFloatComplex *valR44, magma_index_t *rowR44, magma_index_t *colR44, magmaFloatComplex *valD45, magma_index_t *rowD45, magma_index_t *colD45, magmaFloatComplex *valR45, magma_index_t *rowR45, magma_index_t *colR45, magmaFloatComplex *valD46, magma_index_t *rowD46, magma_index_t *colD46, magmaFloatComplex *valR46, magma_index_t *rowR46, magma_index_t *colR46, magmaFloatComplex *valD47, magma_index_t *rowD47, magma_index_t *colD47, magmaFloatComplex *valR47, magma_index_t *rowR47, magma_index_t *colR47, magmaFloatComplex *valD48, magma_index_t *rowD48, magma_index_t *colD48, magmaFloatComplex *valR48, magma_index_t *rowR48, magma_index_t *colR48, magmaFloatComplex *valD49, magma_index_t *rowD49, magma_index_t *colD49, magmaFloatComplex *valR49, magma_index_t *rowR49, magma_index_t *colR49, magmaFloatComplex *valD50, magma_index_t *rowD50, magma_index_t *colD50, magmaFloatComplex *valR50, magma_index_t *rowR50, magma_index_t *colR50, magmaFloatComplex *valD51, magma_index_t *rowD51, magma_index_t *colD51, magmaFloatComplex *valR51, magma_index_t *rowR51, magma_index_t *colR51, magmaFloatComplex *valD52, magma_index_t *rowD52, magma_index_t *colD52, magmaFloatComplex *valR52, magma_index_t *rowR52, magma_index_t *colR52, magmaFloatComplex *valD53, magma_index_t *rowD53, magma_index_t *colD53, magmaFloatComplex *valR53, magma_index_t *rowR53, magma_index_t *colR53, magmaFloatComplex *valD54, magma_index_t *rowD54, magma_index_t *colD54, magmaFloatComplex *valR54, magma_index_t *rowR54, magma_index_t *colR54, magmaFloatComplex *valD55, magma_index_t *rowD55, magma_index_t *colD55, magmaFloatComplex *valR55, magma_index_t *rowR55, magma_index_t *colR55, magmaFloatComplex *valD56, magma_index_t *rowD56, magma_index_t *colD56, magmaFloatComplex *valR56, magma_index_t *rowR56, magma_index_t *colR56, magmaFloatComplex *valD57, magma_index_t *rowD57, magma_index_t *colD57, magmaFloatComplex *valR57, magma_index_t *rowR57, magma_index_t *colR57, magmaFloatComplex *valD58, magma_index_t *rowD58, magma_index_t *colD58, magmaFloatComplex *valR58, magma_index_t *rowR58, magma_index_t *colR58, magmaFloatComplex *valD59, magma_index_t *rowD59, magma_index_t *colD59, magmaFloatComplex *valR59, magma_index_t *rowR59, magma_index_t *colR59, magmaFloatComplex *valD60, magma_index_t *rowD60, magma_index_t *colD60, magmaFloatComplex *valR60, magma_index_t *rowR60, magma_index_t *colR60, magmaFloatComplex *valD61, magma_index_t *rowD61, magma_index_t *colD61, magmaFloatComplex *valR61, magma_index_t *rowR61, magma_index_t *colR61, magmaFloatComplex *valD62, magma_index_t *rowD62, magma_index_t *colD62, magmaFloatComplex *valR62, magma_index_t *rowR62, magma_index_t *colR62, magmaFloatComplex *valD63, magma_index_t *rowD63, magma_index_t *colD63, magmaFloatComplex *valR63, magma_index_t *rowR63, magma_index_t *colR63, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR63; valD = valD63; colR = colR63; rowR = rowR63; colD = colD63; rowD = rowD63; } else if ( blockIdx.x%matrices==1 ) { valR = valR62; valD = valD62; colR = colR62; rowR = rowR62; colD = colD62; rowD = rowD62; } else if ( blockIdx.x%matrices==2 ) { valR = valR61; valD = valD61; colR = colR61; rowR = rowR61; colD = colD61; rowD = rowD61; } else if ( blockIdx.x%matrices==3 ) { valR = valR60; valD = valD60; colR = colR60; rowR = rowR60; colD = colD60; rowD = rowD60; } else if ( blockIdx.x%matrices==4 ) { valR = valR59; valD = valD59; colR = colR59; rowR = rowR59; colD = colD59; rowD = rowD59; } else if ( blockIdx.x%matrices==5 ) { valR = valR58; valD = valD58; colR = colR58; rowR = rowR58; colD = colD58; rowD = rowD58; } else if ( blockIdx.x%matrices==6 ) { valR = valR57; valD = valD57; colR = colR57; rowR = rowR57; colD = colD57; rowD = rowD57; } else if ( blockIdx.x%matrices==7 ) { valR = valR56; valD = valD56; colR = colR56; rowR = rowR56; colD = colD56; rowD = rowD56; } else if ( blockIdx.x%matrices==8 ) { valR = valR55; valD = valD55; colR = colR55; rowR = rowR55; colD = colD55; rowD = rowD55; } else if ( blockIdx.x%matrices==9 ) { valR = valR54; valD = valD54; colR = colR54; rowR = rowR54; colD = colD54; rowD = rowD54; } else if ( blockIdx.x%matrices==10 ) { valR = valR53; valD = valD53; colR = colR53; rowR = rowR53; colD = colD53; rowD = rowD53; } else if ( blockIdx.x%matrices==11 ) { valR = valR52; valD = valD52; colR = colR52; rowR = rowR52; colD = colD52; rowD = rowD52; } else if ( blockIdx.x%matrices==12 ) { valR = valR51; valD = valD51; colR = colR51; rowR = rowR51; colD = colD51; rowD = rowD51; } else if ( blockIdx.x%matrices==13 ) { valR = valR50; valD = valD50; colR = colR50; rowR = rowR50; colD = colD50; rowD = rowD50; } else if ( blockIdx.x%matrices==14 ) { valR = valR49; valD = valD49; colR = colR49; rowR = rowR49; colD = colD49; rowD = rowD49; } else if ( blockIdx.x%matrices==15 ) { valR = valR48; valD = valD48; colR = colR48; rowR = rowR48; colD = colD48; rowD = rowD48; } else if ( blockIdx.x%matrices==16 ) { valR = valR47; valD = valD47; colR = colR47; rowR = rowR47; colD = colD47; rowD = rowD47; } else if ( blockIdx.x%matrices==17 ) { valR = valR46; valD = valD46; colR = colR46; rowR = rowR46; colD = colD46; rowD = rowD46; } else if ( blockIdx.x%matrices==18 ) { valR = valR45; valD = valD45; colR = colR45; rowR = rowR45; colD = colD45; rowD = rowD45; } else if ( blockIdx.x%matrices==19 ) { valR = valR44; valD = valD44; colR = colR44; rowR = rowR44; colD = colD44; rowD = rowD44; } else if ( blockIdx.x%matrices==20 ) { valR = valR43; valD = valD43; colR = colR43; rowR = rowR43; colD = colD43; rowD = rowD43; } else if ( blockIdx.x%matrices==21 ) { valR = valR42; valD = valD42; colR = colR42; rowR = rowR42; colD = colD42; rowD = rowD42; } else if ( blockIdx.x%matrices==22 ) { valR = valR41; valD = valD41; colR = colR41; rowR = rowR41; colD = colD41; rowD = rowD41; } else if ( blockIdx.x%matrices==23 ) { valR = valR40; valD = valD40; colR = colR40; rowR = rowR40; colD = colD40; rowD = rowD40; } else if ( blockIdx.x%matrices==24 ) { valR = valR39; valD = valD39; colR = colR39; rowR = rowR39; colD = colD39; rowD = rowD39; } else if ( blockIdx.x%matrices==25 ) { valR = valR38; valD = valD38; colR = colR38; rowR = rowR38; colD = colD38; rowD = rowD38; } else if ( blockIdx.x%matrices==26 ) { valR = valR37; valD = valD37; colR = colR37; rowR = rowR37; colD = colD37; rowD = rowD37; } else if ( blockIdx.x%matrices==27 ) { valR = valR36; valD = valD36; colR = colR36; rowR = rowR36; colD = colD36; rowD = rowD36; } else if ( blockIdx.x%matrices==28 ) { valR = valR35; valD = valD35; colR = colR35; rowR = rowR35; colD = colD35; rowD = rowD35; } else if ( blockIdx.x%matrices==29 ) { valR = valR34; valD = valD34; colR = colR34; rowR = rowR34; colD = colD34; rowD = rowD34; } else if ( blockIdx.x%matrices==30 ) { valR = valR33; valD = valD33; colR = colR33; rowR = rowR33; colD = colD33; rowD = rowD33; } else if ( blockIdx.x%matrices==31 ) { valR = valR32; valD = valD32; colR = colR32; rowR = rowR32; colD = colD32; rowD = rowD32; } else if ( blockIdx.x%matrices==32 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==33 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==34 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==35 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==36 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==37 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==38 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==39 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==40 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==41 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==42 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==43 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==44 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==45 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==46 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==47 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==48 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==49 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==50 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==51 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==52 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==53 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==54 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==55 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==56 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==57 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==58 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==59 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==60 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==61 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==62 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==63 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration with directed restricted additive Schwarz overlap (top-down) performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param[in] localiters magma_int_t number of local Jacobi-like updates @param[in] matrices magma_int_t number of sub-matrices @param[in] overlap magma_int_t size of the overlap @param[in] D magma_c_matrix* set of matrices with diagonal blocks @param[in] R magma_c_matrix* set of matrices with non-diagonal parts @param[in] b magma_c_matrix RHS @param[in] x magma_c_matrix* iterate/solution @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cbajac_csr_overlap( magma_int_t localiters, magma_int_t matrices, magma_int_t overlap, magma_c_matrix *D, magma_c_matrix *R, magma_c_matrix b, magma_c_matrix *x, magma_queue_t queue ) { int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int size = D[0].num_rows; int min_nnz=100; for(int i=0; i<matrices; i++){ min_nnz = min(min_nnz, R[i].nnz); } if ( min_nnz > -1 ){ if ( matrices == 1 ){ int dimgrid1 = magma_ceildiv( size , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_cbajac_csr_o_ls_kernel1), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, b.dval, x->dval ); } else if (matrices == 2) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_cbajac_csr_o_ls_kernel2), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, b.dval, x->dval ); //magma_cbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 4){ int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_cbajac_csr_o_ls_kernel4), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, b.dval, x->dval ); //magma_cbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 8) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_cbajac_csr_o_ls_kernel8), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, D[4].dval, D[4].drow, D[4].dcol, R[4].dval, R[4].drow, R[4].dcol, D[5].dval, D[5].drow, D[5].dcol, R[5].dval, R[5].drow, R[5].dcol, D[6].dval, D[6].drow, D[6].dcol, R[6].dval, R[6].drow, R[6].dcol, D[7].dval, D[7].drow, D[7].dcol, R[7].dval, R[7].drow, R[7].dcol, b.dval, x->dval ); //magma_cbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 16) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_cbajac_csr_o_ls_kernel16), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, b.dval, x->dval ); } else if (matrices == 32) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_cbajac_csr_o_ls_kernel32), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, b.dval, x->dval ); } else if (matrices == 64) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_cbajac_csr_o_ls_kernel64), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, D[32].dval, D[32].drow, D[32].dcol, R[32].dval, R[32].drow, R[32].dcol, D[33].dval, D[33].drow, D[33].dcol, R[33].dval, R[33].drow, R[33].dcol, D[34].dval, D[34].drow, D[34].dcol, R[34].dval, R[34].drow, R[34].dcol, D[35].dval, D[35].drow, D[35].dcol, R[35].dval, R[35].drow, R[35].dcol, D[36].dval, D[36].drow, D[36].dcol, R[36].dval, R[36].drow, R[36].dcol, D[37].dval, D[37].drow, D[37].dcol, R[37].dval, R[37].drow, R[37].dcol, D[38].dval, D[38].drow, D[38].dcol, R[38].dval, R[38].drow, R[38].dcol, D[39].dval, D[39].drow, D[39].dcol, R[39].dval, R[39].drow, R[39].dcol, D[40].dval, D[40].drow, D[40].dcol, R[40].dval, R[40].drow, R[40].dcol, D[41].dval, D[41].drow, D[41].dcol, R[41].dval, R[41].drow, R[41].dcol, D[42].dval, D[42].drow, D[42].dcol, R[42].dval, R[42].drow, R[42].dcol, D[43].dval, D[43].drow, D[43].dcol, R[43].dval, R[43].drow, R[43].dcol, D[44].dval, D[44].drow, D[44].dcol, R[44].dval, R[44].drow, R[44].dcol, D[45].dval, D[45].drow, D[45].dcol, R[45].dval, R[45].drow, R[45].dcol, D[46].dval, D[46].drow, D[46].dcol, R[46].dval, R[46].drow, R[46].dcol, D[47].dval, D[47].drow, D[47].dcol, R[47].dval, R[47].drow, R[47].dcol, D[48].dval, D[48].drow, D[48].dcol, R[48].dval, R[48].drow, R[48].dcol, D[49].dval, D[49].drow, D[49].dcol, R[49].dval, R[49].drow, R[49].dcol, D[50].dval, D[50].drow, D[50].dcol, R[50].dval, R[50].drow, R[50].dcol, D[51].dval, D[51].drow, D[51].dcol, R[51].dval, R[51].drow, R[51].dcol, D[52].dval, D[52].drow, D[52].dcol, R[52].dval, R[52].drow, R[52].dcol, D[53].dval, D[53].drow, D[53].dcol, R[53].dval, R[53].drow, R[53].dcol, D[54].dval, D[54].drow, D[54].dcol, R[54].dval, R[54].drow, R[54].dcol, D[55].dval, D[55].drow, D[55].dcol, R[55].dval, R[55].drow, R[55].dcol, D[56].dval, D[56].drow, D[56].dcol, R[56].dval, R[56].drow, R[56].dcol, D[57].dval, D[57].drow, D[57].dcol, R[57].dval, R[57].drow, R[57].dcol, D[58].dval, D[58].drow, D[58].dcol, R[58].dval, R[58].drow, R[58].dcol, D[59].dval, D[59].drow, D[59].dcol, R[59].dval, R[59].drow, R[59].dcol, D[60].dval, D[60].drow, D[60].dcol, R[60].dval, R[60].drow, R[60].dcol, D[61].dval, D[61].drow, D[61].dcol, R[61].dval, R[61].drow, R[61].dcol, D[62].dval, D[62].drow, D[62].dcol, R[62].dval, R[62].drow, R[62].dcol, D[63].dval, D[63].drow, D[63].dcol, R[63].dval, R[63].drow, R[63].dcol, b.dval, x->dval ); //magma_cbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else { printf("error: invalid matrix count.\n"); } } else { printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
de7c457db60b92e53255a0042cc644416ccca58e.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zbajac_csr_overlap.cu, normal z -> c, Sun Nov 20 20:20:42 2016 */ #include "magmasparse_internal.h" #define PRECISION_c #define BLOCKSIZE 256 __global__ void magma_ck_testLocking(unsigned int* locks, int n) { int id = threadIdx.x % n; bool leaveLoop = false; while (!leaveLoop) { if (atomicExch(&(locks[id]), 1u) == 0u) { //critical section leaveLoop = true; atomicExch(&(locks[id]),0u); } } } /* __global__ void magma_cbajac_csr_o_ls_kernel(int localiters, int n, int matrices, int overlap, magma_c_matrix *D, magma_c_matrix *R, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { // int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; // int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; //valR = R[ (1+blockIdx.x-1)%matrices ].dval; //colR = R[ (1+blockIdx.x-1)%matrices ].dcol; //rowR = R[ (1+blockIdx.x-1)%matrices ].drow; //valD = D[ (1+blockIdx.x-1)%matrices ].dval; //colD = D[ (1+blockIdx.x-1)%matrices ].dcol; //rowD = D[ (1+blockIdx.x-1)%matrices ].drow; if (blockIdx.x%2 == 1) { valR = R[0].dval; valD = D[0].dval; colR = R[0].dcol; rowR = R[0].drow; colD = D[0].dcol; rowD = D[0].drow; } else { valR = R[1].dval; valD = D[1].dval; colR = R[1].dcol; rowR = R[1].drow; colD = D[1].dcol; rowD = D[1].drow; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; printf("bdx:%d idx:%d start:%d end:%d\n", blockIdx.x, threadIdx.x, start, end); #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for (i = start; i < end; i++) v += valR[i] * x[ colR[i] ]; start = rowD[index]; end = rowD[index+1]; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations local_x[threadIdx.x] = x[index]; //+ ( v - tmp); // / (valD[start]); __syncthreads(); #pragma unroll for (j = 0; j < localiters-1; j++) { tmp = zero; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if (threadIdx.x > overlap) { // RAS x[index] = local_x[threadIdx.x]; } } } */ __global__ void magma_cbajac_csr_o_ls_kernel1(int localiters, int n, int matrices, int overlap, magmaFloatComplex * valD, magma_index_t * rowD, magma_index_t * colD, magmaFloatComplex * valR, magma_index_t * rowR, magma_index_t * colR, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; //bool leaveLoop = false; if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel2(int localiters, int n, int matrices, int overlap, magmaFloatComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaFloatComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaFloatComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaFloatComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; //bool leaveLoop = false; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if (blockIdx.x%matrices == 0) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if (blockIdx.x%matrices == 1) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel4(int localiters, int n, int matrices, int overlap, magmaFloatComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaFloatComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaFloatComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaFloatComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, magmaFloatComplex * valD2, magma_index_t * rowD2, magma_index_t * colD2, magmaFloatComplex * valR2, magma_index_t * rowR2, magma_index_t * colR2, magmaFloatComplex * valD3, magma_index_t * rowD3, magma_index_t * colD3, magmaFloatComplex * valR3, magma_index_t * rowR3, magma_index_t * colR3, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; //bool leaveLoop = false; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==1 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==2 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==3 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel8(int localiters, int n, int matrices, int overlap, magmaFloatComplex * valD0, magma_index_t * rowD0, magma_index_t * colD0, magmaFloatComplex * valR0, magma_index_t * rowR0, magma_index_t * colR0, magmaFloatComplex * valD1, magma_index_t * rowD1, magma_index_t * colD1, magmaFloatComplex * valR1, magma_index_t * rowR1, magma_index_t * colR1, magmaFloatComplex * valD2, magma_index_t * rowD2, magma_index_t * colD2, magmaFloatComplex * valR2, magma_index_t * rowR2, magma_index_t * colR2, magmaFloatComplex * valD3, magma_index_t * rowD3, magma_index_t * colD3, magmaFloatComplex * valR3, magma_index_t * rowR3, magma_index_t * colR3, magmaFloatComplex * valD4, magma_index_t * rowD4, magma_index_t * colD4, magmaFloatComplex * valR4, magma_index_t * rowR4, magma_index_t * colR4, magmaFloatComplex * valD5, magma_index_t * rowD5, magma_index_t * colD5, magmaFloatComplex * valR5, magma_index_t * rowR5, magma_index_t * colR5, magmaFloatComplex * valD6, magma_index_t * rowD6, magma_index_t * colD6, magmaFloatComplex * valR6, magma_index_t * rowR6, magma_index_t * colR6, magmaFloatComplex * valD7, magma_index_t * rowD7, magma_index_t * colD7, magmaFloatComplex * valR7, magma_index_t * rowR7, magma_index_t * colR7, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if( blockIdx.x%matrices==0 ){ valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }else if ( blockIdx.x%matrices==1 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }else if ( blockIdx.x%matrices==2 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }else if ( blockIdx.x%matrices==3 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }else if ( blockIdx.x%matrices==4 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==5 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==6 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==7 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel16(int localiters, int n, int matrices, int overlap, magmaFloatComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaFloatComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaFloatComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaFloatComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaFloatComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaFloatComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaFloatComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaFloatComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaFloatComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaFloatComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaFloatComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaFloatComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaFloatComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaFloatComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaFloatComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaFloatComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaFloatComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaFloatComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaFloatComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaFloatComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaFloatComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaFloatComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaFloatComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaFloatComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaFloatComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaFloatComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaFloatComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaFloatComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaFloatComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaFloatComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaFloatComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaFloatComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==1 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==2 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==3 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==4 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==5 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==6 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==7 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==8 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==9 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==10 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==11 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==12 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==13 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==14 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==15 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel32(int localiters, int n, int matrices, int overlap, magmaFloatComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaFloatComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaFloatComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaFloatComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaFloatComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaFloatComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaFloatComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaFloatComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaFloatComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaFloatComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaFloatComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaFloatComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaFloatComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaFloatComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaFloatComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaFloatComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaFloatComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaFloatComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaFloatComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaFloatComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaFloatComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaFloatComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaFloatComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaFloatComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaFloatComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaFloatComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaFloatComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaFloatComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaFloatComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaFloatComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaFloatComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaFloatComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, magmaFloatComplex *valD16, magma_index_t *rowD16, magma_index_t *colD16, magmaFloatComplex *valR16, magma_index_t *rowR16, magma_index_t *colR16, magmaFloatComplex *valD17, magma_index_t *rowD17, magma_index_t *colD17, magmaFloatComplex *valR17, magma_index_t *rowR17, magma_index_t *colR17, magmaFloatComplex *valD18, magma_index_t *rowD18, magma_index_t *colD18, magmaFloatComplex *valR18, magma_index_t *rowR18, magma_index_t *colR18, magmaFloatComplex *valD19, magma_index_t *rowD19, magma_index_t *colD19, magmaFloatComplex *valR19, magma_index_t *rowR19, magma_index_t *colR19, magmaFloatComplex *valD20, magma_index_t *rowD20, magma_index_t *colD20, magmaFloatComplex *valR20, magma_index_t *rowR20, magma_index_t *colR20, magmaFloatComplex *valD21, magma_index_t *rowD21, magma_index_t *colD21, magmaFloatComplex *valR21, magma_index_t *rowR21, magma_index_t *colR21, magmaFloatComplex *valD22, magma_index_t *rowD22, magma_index_t *colD22, magmaFloatComplex *valR22, magma_index_t *rowR22, magma_index_t *colR22, magmaFloatComplex *valD23, magma_index_t *rowD23, magma_index_t *colD23, magmaFloatComplex *valR23, magma_index_t *rowR23, magma_index_t *colR23, magmaFloatComplex *valD24, magma_index_t *rowD24, magma_index_t *colD24, magmaFloatComplex *valR24, magma_index_t *rowR24, magma_index_t *colR24, magmaFloatComplex *valD25, magma_index_t *rowD25, magma_index_t *colD25, magmaFloatComplex *valR25, magma_index_t *rowR25, magma_index_t *colR25, magmaFloatComplex *valD26, magma_index_t *rowD26, magma_index_t *colD26, magmaFloatComplex *valR26, magma_index_t *rowR26, magma_index_t *colR26, magmaFloatComplex *valD27, magma_index_t *rowD27, magma_index_t *colD27, magmaFloatComplex *valR27, magma_index_t *rowR27, magma_index_t *colR27, magmaFloatComplex *valD28, magma_index_t *rowD28, magma_index_t *colD28, magmaFloatComplex *valR28, magma_index_t *rowR28, magma_index_t *colR28, magmaFloatComplex *valD29, magma_index_t *rowD29, magma_index_t *colD29, magmaFloatComplex *valR29, magma_index_t *rowR29, magma_index_t *colR29, magmaFloatComplex *valD30, magma_index_t *rowD30, magma_index_t *colD30, magmaFloatComplex *valR30, magma_index_t *rowR30, magma_index_t *colR30, magmaFloatComplex *valD31, magma_index_t *rowD31, magma_index_t *colD31, magmaFloatComplex *valR31, magma_index_t *rowR31, magma_index_t *colR31, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==1 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==2 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==3 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==4 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==5 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==6 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==7 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==8 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==9 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==10 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==11 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==12 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==13 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==14 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==15 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==16 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==17 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==18 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==19 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==20 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==21 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==22 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==23 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==24 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==25 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==26 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==27 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==28 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==29 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==30 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==31 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_cbajac_csr_o_ls_kernel64(int localiters, int n, int matrices, int overlap, magmaFloatComplex *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , magmaFloatComplex *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , magmaFloatComplex *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , magmaFloatComplex *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , magmaFloatComplex *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , magmaFloatComplex *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , magmaFloatComplex *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , magmaFloatComplex *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , magmaFloatComplex *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , magmaFloatComplex *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , magmaFloatComplex *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , magmaFloatComplex *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , magmaFloatComplex *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , magmaFloatComplex *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , magmaFloatComplex *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , magmaFloatComplex *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , magmaFloatComplex *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , magmaFloatComplex *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , magmaFloatComplex *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , magmaFloatComplex *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , magmaFloatComplex *valD10, magma_index_t *rowD10, magma_index_t *colD10, magmaFloatComplex *valR10, magma_index_t *rowR10, magma_index_t *colR10, magmaFloatComplex *valD11, magma_index_t *rowD11, magma_index_t *colD11, magmaFloatComplex *valR11, magma_index_t *rowR11, magma_index_t *colR11, magmaFloatComplex *valD12, magma_index_t *rowD12, magma_index_t *colD12, magmaFloatComplex *valR12, magma_index_t *rowR12, magma_index_t *colR12, magmaFloatComplex *valD13, magma_index_t *rowD13, magma_index_t *colD13, magmaFloatComplex *valR13, magma_index_t *rowR13, magma_index_t *colR13, magmaFloatComplex *valD14, magma_index_t *rowD14, magma_index_t *colD14, magmaFloatComplex *valR14, magma_index_t *rowR14, magma_index_t *colR14, magmaFloatComplex *valD15, magma_index_t *rowD15, magma_index_t *colD15, magmaFloatComplex *valR15, magma_index_t *rowR15, magma_index_t *colR15, magmaFloatComplex *valD16, magma_index_t *rowD16, magma_index_t *colD16, magmaFloatComplex *valR16, magma_index_t *rowR16, magma_index_t *colR16, magmaFloatComplex *valD17, magma_index_t *rowD17, magma_index_t *colD17, magmaFloatComplex *valR17, magma_index_t *rowR17, magma_index_t *colR17, magmaFloatComplex *valD18, magma_index_t *rowD18, magma_index_t *colD18, magmaFloatComplex *valR18, magma_index_t *rowR18, magma_index_t *colR18, magmaFloatComplex *valD19, magma_index_t *rowD19, magma_index_t *colD19, magmaFloatComplex *valR19, magma_index_t *rowR19, magma_index_t *colR19, magmaFloatComplex *valD20, magma_index_t *rowD20, magma_index_t *colD20, magmaFloatComplex *valR20, magma_index_t *rowR20, magma_index_t *colR20, magmaFloatComplex *valD21, magma_index_t *rowD21, magma_index_t *colD21, magmaFloatComplex *valR21, magma_index_t *rowR21, magma_index_t *colR21, magmaFloatComplex *valD22, magma_index_t *rowD22, magma_index_t *colD22, magmaFloatComplex *valR22, magma_index_t *rowR22, magma_index_t *colR22, magmaFloatComplex *valD23, magma_index_t *rowD23, magma_index_t *colD23, magmaFloatComplex *valR23, magma_index_t *rowR23, magma_index_t *colR23, magmaFloatComplex *valD24, magma_index_t *rowD24, magma_index_t *colD24, magmaFloatComplex *valR24, magma_index_t *rowR24, magma_index_t *colR24, magmaFloatComplex *valD25, magma_index_t *rowD25, magma_index_t *colD25, magmaFloatComplex *valR25, magma_index_t *rowR25, magma_index_t *colR25, magmaFloatComplex *valD26, magma_index_t *rowD26, magma_index_t *colD26, magmaFloatComplex *valR26, magma_index_t *rowR26, magma_index_t *colR26, magmaFloatComplex *valD27, magma_index_t *rowD27, magma_index_t *colD27, magmaFloatComplex *valR27, magma_index_t *rowR27, magma_index_t *colR27, magmaFloatComplex *valD28, magma_index_t *rowD28, magma_index_t *colD28, magmaFloatComplex *valR28, magma_index_t *rowR28, magma_index_t *colR28, magmaFloatComplex *valD29, magma_index_t *rowD29, magma_index_t *colD29, magmaFloatComplex *valR29, magma_index_t *rowR29, magma_index_t *colR29, magmaFloatComplex *valD30, magma_index_t *rowD30, magma_index_t *colD30, magmaFloatComplex *valR30, magma_index_t *rowR30, magma_index_t *colR30, magmaFloatComplex *valD31, magma_index_t *rowD31, magma_index_t *colD31, magmaFloatComplex *valR31, magma_index_t *rowR31, magma_index_t *colR31, magmaFloatComplex *valD32, magma_index_t *rowD32, magma_index_t *colD32, magmaFloatComplex *valR32, magma_index_t *rowR32, magma_index_t *colR32, magmaFloatComplex *valD33, magma_index_t *rowD33, magma_index_t *colD33, magmaFloatComplex *valR33, magma_index_t *rowR33, magma_index_t *colR33, magmaFloatComplex *valD34, magma_index_t *rowD34, magma_index_t *colD34, magmaFloatComplex *valR34, magma_index_t *rowR34, magma_index_t *colR34, magmaFloatComplex *valD35, magma_index_t *rowD35, magma_index_t *colD35, magmaFloatComplex *valR35, magma_index_t *rowR35, magma_index_t *colR35, magmaFloatComplex *valD36, magma_index_t *rowD36, magma_index_t *colD36, magmaFloatComplex *valR36, magma_index_t *rowR36, magma_index_t *colR36, magmaFloatComplex *valD37, magma_index_t *rowD37, magma_index_t *colD37, magmaFloatComplex *valR37, magma_index_t *rowR37, magma_index_t *colR37, magmaFloatComplex *valD38, magma_index_t *rowD38, magma_index_t *colD38, magmaFloatComplex *valR38, magma_index_t *rowR38, magma_index_t *colR38, magmaFloatComplex *valD39, magma_index_t *rowD39, magma_index_t *colD39, magmaFloatComplex *valR39, magma_index_t *rowR39, magma_index_t *colR39, magmaFloatComplex *valD40, magma_index_t *rowD40, magma_index_t *colD40, magmaFloatComplex *valR40, magma_index_t *rowR40, magma_index_t *colR40, magmaFloatComplex *valD41, magma_index_t *rowD41, magma_index_t *colD41, magmaFloatComplex *valR41, magma_index_t *rowR41, magma_index_t *colR41, magmaFloatComplex *valD42, magma_index_t *rowD42, magma_index_t *colD42, magmaFloatComplex *valR42, magma_index_t *rowR42, magma_index_t *colR42, magmaFloatComplex *valD43, magma_index_t *rowD43, magma_index_t *colD43, magmaFloatComplex *valR43, magma_index_t *rowR43, magma_index_t *colR43, magmaFloatComplex *valD44, magma_index_t *rowD44, magma_index_t *colD44, magmaFloatComplex *valR44, magma_index_t *rowR44, magma_index_t *colR44, magmaFloatComplex *valD45, magma_index_t *rowD45, magma_index_t *colD45, magmaFloatComplex *valR45, magma_index_t *rowR45, magma_index_t *colR45, magmaFloatComplex *valD46, magma_index_t *rowD46, magma_index_t *colD46, magmaFloatComplex *valR46, magma_index_t *rowR46, magma_index_t *colR46, magmaFloatComplex *valD47, magma_index_t *rowD47, magma_index_t *colD47, magmaFloatComplex *valR47, magma_index_t *rowR47, magma_index_t *colR47, magmaFloatComplex *valD48, magma_index_t *rowD48, magma_index_t *colD48, magmaFloatComplex *valR48, magma_index_t *rowR48, magma_index_t *colR48, magmaFloatComplex *valD49, magma_index_t *rowD49, magma_index_t *colD49, magmaFloatComplex *valR49, magma_index_t *rowR49, magma_index_t *colR49, magmaFloatComplex *valD50, magma_index_t *rowD50, magma_index_t *colD50, magmaFloatComplex *valR50, magma_index_t *rowR50, magma_index_t *colR50, magmaFloatComplex *valD51, magma_index_t *rowD51, magma_index_t *colD51, magmaFloatComplex *valR51, magma_index_t *rowR51, magma_index_t *colR51, magmaFloatComplex *valD52, magma_index_t *rowD52, magma_index_t *colD52, magmaFloatComplex *valR52, magma_index_t *rowR52, magma_index_t *colR52, magmaFloatComplex *valD53, magma_index_t *rowD53, magma_index_t *colD53, magmaFloatComplex *valR53, magma_index_t *rowR53, magma_index_t *colR53, magmaFloatComplex *valD54, magma_index_t *rowD54, magma_index_t *colD54, magmaFloatComplex *valR54, magma_index_t *rowR54, magma_index_t *colR54, magmaFloatComplex *valD55, magma_index_t *rowD55, magma_index_t *colD55, magmaFloatComplex *valR55, magma_index_t *rowR55, magma_index_t *colR55, magmaFloatComplex *valD56, magma_index_t *rowD56, magma_index_t *colD56, magmaFloatComplex *valR56, magma_index_t *rowR56, magma_index_t *colR56, magmaFloatComplex *valD57, magma_index_t *rowD57, magma_index_t *colD57, magmaFloatComplex *valR57, magma_index_t *rowR57, magma_index_t *colR57, magmaFloatComplex *valD58, magma_index_t *rowD58, magma_index_t *colD58, magmaFloatComplex *valR58, magma_index_t *rowR58, magma_index_t *colR58, magmaFloatComplex *valD59, magma_index_t *rowD59, magma_index_t *colD59, magmaFloatComplex *valR59, magma_index_t *rowR59, magma_index_t *colR59, magmaFloatComplex *valD60, magma_index_t *rowD60, magma_index_t *colD60, magmaFloatComplex *valR60, magma_index_t *rowR60, magma_index_t *colR60, magmaFloatComplex *valD61, magma_index_t *rowD61, magma_index_t *colD61, magmaFloatComplex *valR61, magma_index_t *rowR61, magma_index_t *colR61, magmaFloatComplex *valD62, magma_index_t *rowD62, magma_index_t *colD62, magmaFloatComplex *valR62, magma_index_t *rowR62, magma_index_t *colR62, magmaFloatComplex *valD63, magma_index_t *rowD63, magma_index_t *colD63, magmaFloatComplex *valR63, magma_index_t *rowR63, magma_index_t *colR63, const magmaFloatComplex * __restrict__ b, magmaFloatComplex * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0); magmaFloatComplex bl, tmp = zero, v = zero; magmaFloatComplex *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR63; valD = valD63; colR = colR63; rowR = rowR63; colD = colD63; rowD = rowD63; } else if ( blockIdx.x%matrices==1 ) { valR = valR62; valD = valD62; colR = colR62; rowR = rowR62; colD = colD62; rowD = rowD62; } else if ( blockIdx.x%matrices==2 ) { valR = valR61; valD = valD61; colR = colR61; rowR = rowR61; colD = colD61; rowD = rowD61; } else if ( blockIdx.x%matrices==3 ) { valR = valR60; valD = valD60; colR = colR60; rowR = rowR60; colD = colD60; rowD = rowD60; } else if ( blockIdx.x%matrices==4 ) { valR = valR59; valD = valD59; colR = colR59; rowR = rowR59; colD = colD59; rowD = rowD59; } else if ( blockIdx.x%matrices==5 ) { valR = valR58; valD = valD58; colR = colR58; rowR = rowR58; colD = colD58; rowD = rowD58; } else if ( blockIdx.x%matrices==6 ) { valR = valR57; valD = valD57; colR = colR57; rowR = rowR57; colD = colD57; rowD = rowD57; } else if ( blockIdx.x%matrices==7 ) { valR = valR56; valD = valD56; colR = colR56; rowR = rowR56; colD = colD56; rowD = rowD56; } else if ( blockIdx.x%matrices==8 ) { valR = valR55; valD = valD55; colR = colR55; rowR = rowR55; colD = colD55; rowD = rowD55; } else if ( blockIdx.x%matrices==9 ) { valR = valR54; valD = valD54; colR = colR54; rowR = rowR54; colD = colD54; rowD = rowD54; } else if ( blockIdx.x%matrices==10 ) { valR = valR53; valD = valD53; colR = colR53; rowR = rowR53; colD = colD53; rowD = rowD53; } else if ( blockIdx.x%matrices==11 ) { valR = valR52; valD = valD52; colR = colR52; rowR = rowR52; colD = colD52; rowD = rowD52; } else if ( blockIdx.x%matrices==12 ) { valR = valR51; valD = valD51; colR = colR51; rowR = rowR51; colD = colD51; rowD = rowD51; } else if ( blockIdx.x%matrices==13 ) { valR = valR50; valD = valD50; colR = colR50; rowR = rowR50; colD = colD50; rowD = rowD50; } else if ( blockIdx.x%matrices==14 ) { valR = valR49; valD = valD49; colR = colR49; rowR = rowR49; colD = colD49; rowD = rowD49; } else if ( blockIdx.x%matrices==15 ) { valR = valR48; valD = valD48; colR = colR48; rowR = rowR48; colD = colD48; rowD = rowD48; } else if ( blockIdx.x%matrices==16 ) { valR = valR47; valD = valD47; colR = colR47; rowR = rowR47; colD = colD47; rowD = rowD47; } else if ( blockIdx.x%matrices==17 ) { valR = valR46; valD = valD46; colR = colR46; rowR = rowR46; colD = colD46; rowD = rowD46; } else if ( blockIdx.x%matrices==18 ) { valR = valR45; valD = valD45; colR = colR45; rowR = rowR45; colD = colD45; rowD = rowD45; } else if ( blockIdx.x%matrices==19 ) { valR = valR44; valD = valD44; colR = colR44; rowR = rowR44; colD = colD44; rowD = rowD44; } else if ( blockIdx.x%matrices==20 ) { valR = valR43; valD = valD43; colR = colR43; rowR = rowR43; colD = colD43; rowD = rowD43; } else if ( blockIdx.x%matrices==21 ) { valR = valR42; valD = valD42; colR = colR42; rowR = rowR42; colD = colD42; rowD = rowD42; } else if ( blockIdx.x%matrices==22 ) { valR = valR41; valD = valD41; colR = colR41; rowR = rowR41; colD = colD41; rowD = rowD41; } else if ( blockIdx.x%matrices==23 ) { valR = valR40; valD = valD40; colR = colR40; rowR = rowR40; colD = colD40; rowD = rowD40; } else if ( blockIdx.x%matrices==24 ) { valR = valR39; valD = valD39; colR = colR39; rowR = rowR39; colD = colD39; rowD = rowD39; } else if ( blockIdx.x%matrices==25 ) { valR = valR38; valD = valD38; colR = colR38; rowR = rowR38; colD = colD38; rowD = rowD38; } else if ( blockIdx.x%matrices==26 ) { valR = valR37; valD = valD37; colR = colR37; rowR = rowR37; colD = colD37; rowD = rowD37; } else if ( blockIdx.x%matrices==27 ) { valR = valR36; valD = valD36; colR = colR36; rowR = rowR36; colD = colD36; rowD = rowD36; } else if ( blockIdx.x%matrices==28 ) { valR = valR35; valD = valD35; colR = colR35; rowR = rowR35; colD = colD35; rowD = rowD35; } else if ( blockIdx.x%matrices==29 ) { valR = valR34; valD = valD34; colR = colR34; rowR = rowR34; colD = colD34; rowD = rowD34; } else if ( blockIdx.x%matrices==30 ) { valR = valR33; valD = valD33; colR = colR33; rowR = rowR33; colD = colD33; rowD = rowD33; } else if ( blockIdx.x%matrices==31 ) { valR = valR32; valD = valD32; colR = colR32; rowR = rowR32; colD = colD32; rowD = rowD32; } else if ( blockIdx.x%matrices==32 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==33 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==34 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==35 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==36 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==37 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==38 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==39 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==40 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==41 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==42 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==43 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==44 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==45 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==46 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==47 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==48 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==49 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==50 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==51 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==52 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==53 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==54 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==55 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==56 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==57 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==58 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==59 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==60 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==61 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==62 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==63 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ magmaFloatComplex local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration with directed restricted additive Schwarz overlap (top-down) performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param[in] localiters magma_int_t number of local Jacobi-like updates @param[in] matrices magma_int_t number of sub-matrices @param[in] overlap magma_int_t size of the overlap @param[in] D magma_c_matrix* set of matrices with diagonal blocks @param[in] R magma_c_matrix* set of matrices with non-diagonal parts @param[in] b magma_c_matrix RHS @param[in] x magma_c_matrix* iterate/solution @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cgegpuk ********************************************************************/ extern "C" magma_int_t magma_cbajac_csr_overlap( magma_int_t localiters, magma_int_t matrices, magma_int_t overlap, magma_c_matrix *D, magma_c_matrix *R, magma_c_matrix b, magma_c_matrix *x, magma_queue_t queue ) { int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int size = D[0].num_rows; int min_nnz=100; for(int i=0; i<matrices; i++){ min_nnz = min(min_nnz, R[i].nnz); } if ( min_nnz > -1 ){ if ( matrices == 1 ){ int dimgrid1 = magma_ceildiv( size , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_cbajac_csr_o_ls_kernel1<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, b.dval, x->dval ); } else if (matrices == 2) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_cbajac_csr_o_ls_kernel2<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, b.dval, x->dval ); //magma_cbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 4){ int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_cbajac_csr_o_ls_kernel4<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, b.dval, x->dval ); //magma_cbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 8) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_cbajac_csr_o_ls_kernel8<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, D[4].dval, D[4].drow, D[4].dcol, R[4].dval, R[4].drow, R[4].dcol, D[5].dval, D[5].drow, D[5].dcol, R[5].dval, R[5].drow, R[5].dcol, D[6].dval, D[6].drow, D[6].dcol, R[6].dval, R[6].drow, R[6].dcol, D[7].dval, D[7].drow, D[7].dcol, R[7].dval, R[7].drow, R[7].dcol, b.dval, x->dval ); //magma_cbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 16) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_cbajac_csr_o_ls_kernel16<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, b.dval, x->dval ); } else if (matrices == 32) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_cbajac_csr_o_ls_kernel32<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, b.dval, x->dval ); } else if (matrices == 64) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_cbajac_csr_o_ls_kernel64<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, D[32].dval, D[32].drow, D[32].dcol, R[32].dval, R[32].drow, R[32].dcol, D[33].dval, D[33].drow, D[33].dcol, R[33].dval, R[33].drow, R[33].dcol, D[34].dval, D[34].drow, D[34].dcol, R[34].dval, R[34].drow, R[34].dcol, D[35].dval, D[35].drow, D[35].dcol, R[35].dval, R[35].drow, R[35].dcol, D[36].dval, D[36].drow, D[36].dcol, R[36].dval, R[36].drow, R[36].dcol, D[37].dval, D[37].drow, D[37].dcol, R[37].dval, R[37].drow, R[37].dcol, D[38].dval, D[38].drow, D[38].dcol, R[38].dval, R[38].drow, R[38].dcol, D[39].dval, D[39].drow, D[39].dcol, R[39].dval, R[39].drow, R[39].dcol, D[40].dval, D[40].drow, D[40].dcol, R[40].dval, R[40].drow, R[40].dcol, D[41].dval, D[41].drow, D[41].dcol, R[41].dval, R[41].drow, R[41].dcol, D[42].dval, D[42].drow, D[42].dcol, R[42].dval, R[42].drow, R[42].dcol, D[43].dval, D[43].drow, D[43].dcol, R[43].dval, R[43].drow, R[43].dcol, D[44].dval, D[44].drow, D[44].dcol, R[44].dval, R[44].drow, R[44].dcol, D[45].dval, D[45].drow, D[45].dcol, R[45].dval, R[45].drow, R[45].dcol, D[46].dval, D[46].drow, D[46].dcol, R[46].dval, R[46].drow, R[46].dcol, D[47].dval, D[47].drow, D[47].dcol, R[47].dval, R[47].drow, R[47].dcol, D[48].dval, D[48].drow, D[48].dcol, R[48].dval, R[48].drow, R[48].dcol, D[49].dval, D[49].drow, D[49].dcol, R[49].dval, R[49].drow, R[49].dcol, D[50].dval, D[50].drow, D[50].dcol, R[50].dval, R[50].drow, R[50].dcol, D[51].dval, D[51].drow, D[51].dcol, R[51].dval, R[51].drow, R[51].dcol, D[52].dval, D[52].drow, D[52].dcol, R[52].dval, R[52].drow, R[52].dcol, D[53].dval, D[53].drow, D[53].dcol, R[53].dval, R[53].drow, R[53].dcol, D[54].dval, D[54].drow, D[54].dcol, R[54].dval, R[54].drow, R[54].dcol, D[55].dval, D[55].drow, D[55].dcol, R[55].dval, R[55].drow, R[55].dcol, D[56].dval, D[56].drow, D[56].dcol, R[56].dval, R[56].drow, R[56].dcol, D[57].dval, D[57].drow, D[57].dcol, R[57].dval, R[57].drow, R[57].dcol, D[58].dval, D[58].drow, D[58].dcol, R[58].dval, R[58].drow, R[58].dcol, D[59].dval, D[59].drow, D[59].dcol, R[59].dval, R[59].drow, R[59].dcol, D[60].dval, D[60].drow, D[60].dcol, R[60].dval, R[60].drow, R[60].dcol, D[61].dval, D[61].drow, D[61].dcol, R[61].dval, R[61].drow, R[61].dcol, D[62].dval, D[62].drow, D[62].dcol, R[62].dval, R[62].drow, R[62].dcol, D[63].dval, D[63].drow, D[63].dcol, R[63].dval, R[63].drow, R[63].dcol, b.dval, x->dval ); //magma_cbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else { printf("error: invalid matrix count.\n"); } } else { printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
225789e76437ae79b7dbf43b3004ba66b4d14476.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/primitive/include/softmax_backward.h" #include "oneflow/core/primitive/include/log_softmax_backward.h" #include "oneflow/core/primitive/cuda/type_seq.h" #include "oneflow/core/cuda/softmax.cuh" #include "oneflow/core/stream/cuda/cuda_stream_context.h" namespace oneflow { namespace primitive { namespace { enum class Algorithm { kSoftmax, kLogSoftmax, }; template<Algorithm algorithm, typename T> void SoftmaxBackwardGpu(hipStream_t cuda_stream, size_t rows, size_t cols, const T* y, const T* dy, T* dx) { using ComputeType = typename cuda::softmax::DefaultComputeType<T>::type; cuda::softmax::DirectLoad<T, ComputeType> load_y(y, cols); cuda::softmax::DirectLoad<T, ComputeType> load_dy(dy, cols); cuda::softmax::DirectStore<ComputeType, T> store(dx, cols); if (algorithm == Algorithm::kSoftmax) { OF_CUDA_CHECK((cuda::softmax::DispatchSoftmaxGrad<decltype(load_y), decltype(load_dy), decltype(store), ComputeType>( cuda_stream, load_y, load_dy, store, rows, cols))); } else if (algorithm == Algorithm::kLogSoftmax) { OF_CUDA_CHECK((cuda::softmax::DispatchLogSoftmaxGrad<decltype(load_y), decltype(load_dy), decltype(store), ComputeType>( cuda_stream, load_y, load_dy, store, rows, cols))); } else { UNIMPLEMENTED(); } } template<typename SoftmaxBackwardBase, Algorithm algorithm, typename T> class SoftmaxBackwardImpl : public SoftmaxBackwardBase { public: OF_DISALLOW_COPY_AND_MOVE(SoftmaxBackwardImpl); SoftmaxBackwardImpl() = default; ~SoftmaxBackwardImpl() override = default; void Launch(StreamContext* stream_ctx, size_t rows, size_t cols, const void* y, const void* dy, void* dx) override { hipStream_t cuda_stream = CHECK_NOTNULL(dynamic_cast<CudaStreamContext*>(stream_ctx))->cuda_stream(); SoftmaxBackwardGpu<algorithm, T>(cuda_stream, rows, cols, reinterpret_cast<const T*>(y), reinterpret_cast<const T*>(dy), reinterpret_cast<T*>(dx)); } }; template<typename SoftmaxBackwardBase, Algorithm algorithm, typename T> std::unique_ptr<SoftmaxBackwardBase> NewSoftmaxBackward() { return std::unique_ptr<SoftmaxBackwardBase>( new SoftmaxBackwardImpl<SoftmaxBackwardBase, algorithm, T>()); } template<typename BackwardFactoryBase, typename SoftmaxBackwardBase, Algorithm algorithm> class GenericSoftmaxBackwardFactoryImpl : public BackwardFactoryBase { public: OF_DISALLOW_COPY_AND_MOVE(GenericSoftmaxBackwardFactoryImpl); GenericSoftmaxBackwardFactoryImpl() = default; ~GenericSoftmaxBackwardFactoryImpl() override = default; std::unique_ptr<SoftmaxBackwardBase> New(DataType data_type) override { #define MAKE_NEW_SOFTMAX_ENTRY(type_cpp, type_proto) \ {type_proto, NewSoftmaxBackward<SoftmaxBackwardBase, algorithm, type_cpp>}, static const std::map<DataType, std::function<std::unique_ptr<SoftmaxBackwardBase>()>> new_softmax_backward_handle{ OF_PP_FOR_EACH_TUPLE(MAKE_NEW_SOFTMAX_ENTRY, CUDA_PRIMITIVE_FLOATING_TYPE_SEQ)}; #undef MAKE_NEW_SOFTMAX_ENTRY const auto it = new_softmax_backward_handle.find(data_type); if (it != new_softmax_backward_handle.end()) { return it->second(); } else { return nullptr; } } }; using SoftmaxBackwardFactoryImpl = GenericSoftmaxBackwardFactoryImpl<SoftmaxBackwardFactory, SoftmaxBackward, Algorithm::kSoftmax>; using LogSoftmaxBackwardFactoryImpl = GenericSoftmaxBackwardFactoryImpl<LogSoftmaxBackwardFactory, LogSoftmaxBackward, Algorithm::kLogSoftmax>; REGISTER_PRIMITIVE_FACTORY(DeviceType::kGPU, SoftmaxBackwardFactory, SoftmaxBackwardFactoryImpl); REGISTER_PRIMITIVE_FACTORY(DeviceType::kGPU, LogSoftmaxBackwardFactory, LogSoftmaxBackwardFactoryImpl); } // namespace } // namespace primitive } // namespace oneflow
225789e76437ae79b7dbf43b3004ba66b4d14476.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/primitive/include/softmax_backward.h" #include "oneflow/core/primitive/include/log_softmax_backward.h" #include "oneflow/core/primitive/cuda/type_seq.h" #include "oneflow/core/cuda/softmax.cuh" #include "oneflow/core/stream/cuda/cuda_stream_context.h" namespace oneflow { namespace primitive { namespace { enum class Algorithm { kSoftmax, kLogSoftmax, }; template<Algorithm algorithm, typename T> void SoftmaxBackwardGpu(cudaStream_t cuda_stream, size_t rows, size_t cols, const T* y, const T* dy, T* dx) { using ComputeType = typename cuda::softmax::DefaultComputeType<T>::type; cuda::softmax::DirectLoad<T, ComputeType> load_y(y, cols); cuda::softmax::DirectLoad<T, ComputeType> load_dy(dy, cols); cuda::softmax::DirectStore<ComputeType, T> store(dx, cols); if (algorithm == Algorithm::kSoftmax) { OF_CUDA_CHECK((cuda::softmax::DispatchSoftmaxGrad<decltype(load_y), decltype(load_dy), decltype(store), ComputeType>( cuda_stream, load_y, load_dy, store, rows, cols))); } else if (algorithm == Algorithm::kLogSoftmax) { OF_CUDA_CHECK((cuda::softmax::DispatchLogSoftmaxGrad<decltype(load_y), decltype(load_dy), decltype(store), ComputeType>( cuda_stream, load_y, load_dy, store, rows, cols))); } else { UNIMPLEMENTED(); } } template<typename SoftmaxBackwardBase, Algorithm algorithm, typename T> class SoftmaxBackwardImpl : public SoftmaxBackwardBase { public: OF_DISALLOW_COPY_AND_MOVE(SoftmaxBackwardImpl); SoftmaxBackwardImpl() = default; ~SoftmaxBackwardImpl() override = default; void Launch(StreamContext* stream_ctx, size_t rows, size_t cols, const void* y, const void* dy, void* dx) override { cudaStream_t cuda_stream = CHECK_NOTNULL(dynamic_cast<CudaStreamContext*>(stream_ctx))->cuda_stream(); SoftmaxBackwardGpu<algorithm, T>(cuda_stream, rows, cols, reinterpret_cast<const T*>(y), reinterpret_cast<const T*>(dy), reinterpret_cast<T*>(dx)); } }; template<typename SoftmaxBackwardBase, Algorithm algorithm, typename T> std::unique_ptr<SoftmaxBackwardBase> NewSoftmaxBackward() { return std::unique_ptr<SoftmaxBackwardBase>( new SoftmaxBackwardImpl<SoftmaxBackwardBase, algorithm, T>()); } template<typename BackwardFactoryBase, typename SoftmaxBackwardBase, Algorithm algorithm> class GenericSoftmaxBackwardFactoryImpl : public BackwardFactoryBase { public: OF_DISALLOW_COPY_AND_MOVE(GenericSoftmaxBackwardFactoryImpl); GenericSoftmaxBackwardFactoryImpl() = default; ~GenericSoftmaxBackwardFactoryImpl() override = default; std::unique_ptr<SoftmaxBackwardBase> New(DataType data_type) override { #define MAKE_NEW_SOFTMAX_ENTRY(type_cpp, type_proto) \ {type_proto, NewSoftmaxBackward<SoftmaxBackwardBase, algorithm, type_cpp>}, static const std::map<DataType, std::function<std::unique_ptr<SoftmaxBackwardBase>()>> new_softmax_backward_handle{ OF_PP_FOR_EACH_TUPLE(MAKE_NEW_SOFTMAX_ENTRY, CUDA_PRIMITIVE_FLOATING_TYPE_SEQ)}; #undef MAKE_NEW_SOFTMAX_ENTRY const auto it = new_softmax_backward_handle.find(data_type); if (it != new_softmax_backward_handle.end()) { return it->second(); } else { return nullptr; } } }; using SoftmaxBackwardFactoryImpl = GenericSoftmaxBackwardFactoryImpl<SoftmaxBackwardFactory, SoftmaxBackward, Algorithm::kSoftmax>; using LogSoftmaxBackwardFactoryImpl = GenericSoftmaxBackwardFactoryImpl<LogSoftmaxBackwardFactory, LogSoftmaxBackward, Algorithm::kLogSoftmax>; REGISTER_PRIMITIVE_FACTORY(DeviceType::kGPU, SoftmaxBackwardFactory, SoftmaxBackwardFactoryImpl); REGISTER_PRIMITIVE_FACTORY(DeviceType::kGPU, LogSoftmaxBackwardFactory, LogSoftmaxBackwardFactoryImpl); } // namespace } // namespace primitive } // namespace oneflow
64ad415f2e7137c1173f6c619801f4ac100b6f7f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated s Wed Nov 14 22:53:46 2012 */ #include "common_magma.h" #define BLOCK_SIZE 32 //#define num_threads 64 #define dgemv_bs 32 #define slansy_bs 64 #define PRECISION_s #if (!defined(PRECISION_z)) || (GPUSHMEM >= 200) __global__ void l_slansy_special (int n, const float* A, int lda, float *y){ int tx = threadIdx.x ; int ty = threadIdx.y ; int ind = blockIdx.x* dgemv_bs + tx ; float res = 0.; __shared__ float la[dgemv_bs][dgemv_bs+1]; A += ind; A+= ty * lda ; int break_d = blockIdx.x* dgemv_bs ; for(int i=0; i<break_d; i += dgemv_bs ){ #pragma unroll 8 for(int j=0; j < dgemv_bs ; j+=4){ la[tx][ty+j] = A[j*lda] ; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8 ; j++){ res+=fabsf( la[tx][j+ty*8]) ; } A+=lda* dgemv_bs ; __syncthreads(); } #pragma unroll 8 for(int j =0; j<dgemv_bs; j+=4) la[ty+j][tx] = A[ j * lda]; A+= dgemv_bs ; __syncthreads(); #pragma unroll 8 for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){ if ( i < tx ) { la[tx][i] = la[i][tx] ; } else la[tx][i] = la[tx][i] ; } __syncthreads(); #pragma unroll 8 for(int j=0; j < dgemv_bs/4 ; j++){ res+=fabsf(la[tx][j+ty*8]); } break_d += dgemv_bs ; __syncthreads(); for(int i=break_d; i<n; i += dgemv_bs ){ #pragma unroll 8 for(int j=0; j<dgemv_bs; j+=4) la[ty+j][tx] = A[ j * lda]; A+= dgemv_bs ; __syncthreads(); #pragma unroll 8 for(int j=0; j < dgemv_bs/4;j++){ res+= fabsf(la[tx][j+ty*8]); } __syncthreads(); } la[tx][ty] = MAGMA_S_MAKE( res, 0. ); __syncthreads(); if( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); y[ind] = res; } } __global__ void l_slansy_generic(int n, const float* A, int lda, float *y, int m_full_block, int m_mod_32) { int tx = threadIdx.x ; int ty = threadIdx.y ; int ind = blockIdx.x* dgemv_bs + tx ; float res = 0.; __shared__ float la [dgemv_bs][dgemv_bs+1]; if( blockIdx.x == m_full_block ) { /************************************************************************ -- Last block -- -- We will do something unusual here -- For sufficiently large matrix the overhead will be very low *************************************************************************/ if ( tx < m_mod_32 ){ A+= ( blockIdx.x * dgemv_bs + tx ) ; } else{ A+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ; } A+= ty * lda ; int break_d = blockIdx.x* dgemv_bs ; /*---------------------------- Go Right -------------------------------*/ for(int i=0; i<break_d; i += dgemv_bs ){ #pragma unroll 8 for(int j=0; j < dgemv_bs ; j+=4){ la[tx][ty+j] = A[j*lda] ; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8 ; j++){ res+=fabsf( la[tx][j+ty*8]); } A+=lda* dgemv_bs ; __syncthreads(); } /* we don't need to make zero, as those computation will be discarded. */ if( ty==0 ) { /*-------------------------------------------- he will compute the triangular parts others will be waiting with values. -----------------------------------------------*/ int j ; int count = 1 ; if( tx < m_mod_32 ) count = tx ; else count = m_mod_32 ; for(j =0;j<=count;j++){ res+= fabsf( A[j*lda]) ; } A+=(tx)*lda; count = 1 ; for(;j<m_mod_32;j++){ res+=fabsf( A[count]) ; count++; } } else{ } __syncthreads(); la[tx][ty]= MAGMA_S_MAKE( res, 0. ) ; __syncthreads(); /*-------------------------------------------------------- The leader accumulates all the results from his peer. ----------------------------------------------------------*/ if( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); if( tx < m_mod_32) y[ind] = res; } } else{ /*************************************** ----------------------------------- -- All the blocks but the last one -- **************************************** -------------------------------------*/ A += ind; A+= ty * lda ; int break_d = blockIdx.x* dgemv_bs ; /*---------------------------- Go Right -------------------------------*/ for(int i=0; i<break_d; i += dgemv_bs ){ #pragma unroll 8 for(int j=0; j < dgemv_bs ; j+=4){ la[tx][ty+j] = A[j*lda] ; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8 ; j++){ res+=fabsf(la[tx][j+ty*8]); } A+=lda* dgemv_bs ; __syncthreads(); } /*------------------------------------ Diagonal Copy + Transpose lower triangle --------------------------------------*/ #pragma unroll 8 for(int j =0; j<dgemv_bs; j+=4) la[ty+j][tx] = A[ j * lda]; A+= dgemv_bs ; __syncthreads(); /*-------------------------------------------- Mirror Upper Triangle to Lower triangle ---------------------------------------------*/ #pragma unroll 8 for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){ if ( i < tx ) { la[tx][i] = la[i][tx] ; } else la[tx][i] = la[tx][i] ; } __syncthreads(); /*-------------------------------- Do diagonal Computation -----------------------------------*/ #pragma unroll 8 for(int j=0; j < dgemv_bs/4 ; j++){ res+= fabsf(la[tx][j+ty*8]); } break_d += dgemv_bs ; __syncthreads(); n -= m_mod_32 ; // @ /*----------------------------- Go Down -------------------------------*/ for(int i=break_d; i<n; i += dgemv_bs ){ #pragma unroll 8 for(int j=0; j<dgemv_bs; j+=4) la[ty+j][tx] = A[ j * lda]; A+= dgemv_bs ; __syncthreads(); #pragma unroll 8 for(int j=0; j < dgemv_bs/4;j++){ res+=fabsf(la[tx][j+ty*8]); } __syncthreads(); } /*--------------------------------------------- doing m_mod_32 stuffs here. Symmetric is giving us benefit .. true -----------------------------------------------*/ A-=tx; if( tx < m_mod_32){ A+=tx; } else{ A+=(m_mod_32-1); /* Same as above*/ } #pragma unroll 8 for(int j=0; j<dgemv_bs; j+=4){ if( tx < m_mod_32 ) la[ty+j][tx] = MAGMA_S_MUL( MAGMA_S_ONE, A[ j * lda] ); else la[ty+j][tx] = MAGMA_S_MUL( MAGMA_S_ZERO, A[ j * lda] ); } __syncthreads(); /*---------------------------------------- What about doing some Zeroing here? instead of zeroing before? -----------------------------------------*/ #pragma unroll 8 for(int j=0; j < dgemv_bs/4;j++){ res+=fabsf(la[tx][j+ty*8]); } __syncthreads(); la[tx][ty]= MAGMA_S_MAKE( res, 0. ); __syncthreads(); /*-------------------------------------------------------- The leader accumulates all the results from his peer. ----------------------------------------------------------*/ if( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); y[ind] = res; } } } __global__ void u_slansy_generic (int n, const float* A, int lda, float *y, int m_full_block, int m_mod_32){ int tx = threadIdx.x ; int ty = threadIdx.y ; int ind = blockIdx.x* dgemv_bs + tx ; float res = 0.; __shared__ float la [dgemv_bs][dgemv_bs+1]; int blockIdxx = blockIdx.x ; if( blockIdx.x == m_full_block ) { /************************************************************************ -- Last block -- -- We will do something unusual here -- For sufficiently large matrix the overhead will be very low *************************************************************************/ ind = tx ; A+= lda*(n-1) ; if ( tx < m_mod_32 ){ A+= ( tx ) ; } else{ A+= ( m_mod_32 -1) ; } A-= ty * lda ; int break_d = (blockIdx.x)* dgemv_bs ; /*---------------------------- Go Right -------------------------------*/ for(int i=0; i<break_d; i += dgemv_bs ){ #pragma unroll 8 for(int j=0; j < dgemv_bs ; j+=4){ la[tx][ty+j] = A[-j*lda] ; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8 ; j++){ res+=fabsf(la[tx][j+ty*8]); } A-=lda* dgemv_bs ; __syncthreads(); } /* we don't need to make zero, as those computation will be discarded. */ if( ty==0 ) { /*-------------------------------------------- he will compute the triangular parts others will be waiting with values. -----------------------------------------------*/ int j ; int count = 1 ; if( tx < m_mod_32 ) count =m_mod_32- tx ; else count = m_mod_32 ; for(j =0;j<count;j++){ res+= fabsf( A[-j*lda] ); } A-=(count-1)*lda; count = 1 ; for(;j<m_mod_32;j++){ res+= fabsf( A[-count] ); count++; } } else{ } __syncthreads(); la[tx][ty]= MAGMA_S_MAKE( res, 0. ); __syncthreads(); /*-------------------------------------------------------- The leader accumulates all the results from his peer. ----------------------------------------------------------*/ if( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); if( tx < m_mod_32) y[ind] = res; } } else{ /*************************************** ----------------------------------- -- All the blocks but the last one -- -- By the way this code can be optimized more. **************************************** -------------------------------------*/ ind = blockIdx.x * dgemv_bs + tx + m_mod_32 ; const float *A1 = A ; A+= lda*(n-1) ; A += ind; A-= ty * lda ; int break_d = (n / dgemv_bs - blockIdxx-1 )* dgemv_bs ; /*---------------------------- Go Left -------------------------------*/ for(int i=0; i<break_d; i += dgemv_bs ){ #pragma unroll 8 for(int j=0; j < dgemv_bs ; j+=4){ la[tx][ty+j] = A[-j*lda] ; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8 ; j++){ res+=fabsf( la[tx][j+ty*8]); } A-=lda* dgemv_bs ; __syncthreads(); } /*------------------------------------ Diagonal Copy + Transpose lower triangle --------------------------------------*/ #pragma unroll 8 for(int j =0; j<dgemv_bs; j+=4){ la[tx][31-ty-j] = A[ -j * lda]; } A-= dgemv_bs ; __syncthreads(); /*-------------------------------------------- Mirror Upper Triangle to Lower triangle ---------------------------------------------*/ #pragma unroll 8 for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){ if ( i <tx ){ la[tx][i] = la[i][tx]; } else{ la[tx][i] = la[tx][i] ; } } __syncthreads(); /*-------------------------------- Do diagonal Computation -----------------------------------*/ #pragma unroll 8 for(int j=0; j < dgemv_bs/4 ; j++){ res+=fabsf( la[tx][j+ty*8] ) ; } break_d += dgemv_bs ; __syncthreads(); n -= m_mod_32 ; // @ /*----------------------------- Go Up -------------------------------*/ int i ; for( i=break_d; i<n; i+= dgemv_bs ){ #pragma unroll 8 for(int j=0; j<dgemv_bs; j+=4){ la[ty+j][tx] = A[- j * lda]; } A-= dgemv_bs ; __syncthreads(); #pragma unroll 8 for(int j=0; j < dgemv_bs/4;j++){ res+=fabsf ( la[31-tx][j+ty*8] ); } __syncthreads(); } /*--------------------------------------------- doing m_mod_32 stuffs here. Symmetric is giving us benefit .. true Do the other way please...... -----------------------------------------------*/ A1 = A1 + m_mod_32 * lda + tx *lda ; if( ty == 0 ) { for( int j = 0 ; j < m_mod_32 ; j++){ res+= fabsf ( A1[ j + lda * (blockIdx.x) * 32 ] ) ; } } __syncthreads(); la[tx][ty]= MAGMA_S_MAKE( res, 0); __syncthreads(); /*-------------------------------------------------------- The leader accumulates all the results from his peer. ----------------------------------------------------------*/ if( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); y[ind] = res; } } } __global__ void u_slansy_special (int n, const float* A, int lda, float *y ){ int tx = threadIdx.x ; int ty = threadIdx.y ; int ind = blockIdx.x* dgemv_bs + tx ; float res = 0.; /* Reverse Computation ... - Left - Triangle - Up */ A+= lda*(n-1) ; __shared__ float la [dgemv_bs][dgemv_bs+1]; A += ind; A-= ty * lda ; int break_d = (n / dgemv_bs - blockIdx.x-1 )* dgemv_bs ; for(int i=0; i<break_d; i += dgemv_bs ){ #pragma unroll 8 for(int j=0; j < dgemv_bs ; j+=4){ la[tx][ty+j] = A[-j*lda] ; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8 ; j++){ res+=fabsf(la[tx][j+ty*8]); } A-=lda* dgemv_bs ; __syncthreads(); } #pragma unroll 8 for(int j =0; j<dgemv_bs; j+=4) la[tx][31-ty-j] = A[ -j * lda]; /* Look at the indexing changes */ A-= dgemv_bs ; __syncthreads(); #pragma unroll 8 for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){ if ( i <tx ){ la[tx][i] = la[i][tx]; } else{ la[tx][i] = la[tx][i] ; } } __syncthreads(); #pragma unroll 8 for(int j=0; j < dgemv_bs/4 ; j++){ res+= fabsf(la[tx][j+ty*8]); } break_d += dgemv_bs ; __syncthreads(); for(int i=break_d; i<n; i+= dgemv_bs ){ #pragma unroll 8 for(int j=0; j<dgemv_bs; j+=4) la[ty+j][tx] = A[ -j * lda]; A-= dgemv_bs ; __syncthreads(); #pragma unroll 8 for(int j=0; j < dgemv_bs/4;j++){ res+=fabsf( la[31-tx][j+ty*8]); } __syncthreads(); } la[tx][ty]= MAGMA_S_MAKE( res, 0. ); __syncthreads(); if( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); y[ind] = res; } } extern "C" void mslansy (char uplo, int m, const float *A, int lda, float *Y ) { /* Note: The UPLO = 'U' Version can be optimized more. side is not needed........................... */ int blocks; if (m % dgemv_bs==0) blocks = m/ dgemv_bs; else blocks = m/ dgemv_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(32, 4, 1); if( m % dgemv_bs == 0 ) { if( uplo == 'L' || uplo == 'l'){ hipLaunchKernelGGL(( l_slansy_special) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, Y); } else{ hipLaunchKernelGGL(( u_slansy_special) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, Y); } } else{ int m_full_block = (m - m % 32 ) /32 ; int m_mod_32 = m%32 ; if( uplo == 'L' || uplo == 'l'){ hipLaunchKernelGGL(( l_slansy_generic) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, Y, m_full_block, m_mod_32); } else{ hipLaunchKernelGGL(( u_slansy_generic) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, Y, m_full_block, m_mod_32); } } } #endif /* (!defined(PRECISION_z)) || (GPUSHMEM >= 200) */ __global__ void l_slansy_max (int m, const float* A, int lda, float *y){ int tx = threadIdx.x ; int ind = blockIdx.x * slansy_bs + tx ; float res = 0., res1; int break_d = blockIdx.x* slansy_bs; if (ind < m) { A += ind; for(int i=0; i<break_d; i += slansy_bs ){ #pragma unroll 8 for(int j=0; j< slansy_bs; j++){ res1 = fabsf(A[j*lda]); res = fmax(res,res1); } A += lda*slansy_bs; } for(int j=0; j<=tx; j++){ res1 = fabsf(A[j*lda]); res = fmax(res,res1); } y[ind] = res; } } __global__ void u_slansy_max (int m, const float* A, int lda, float *y){ int ind = blockIdx.x * slansy_bs + threadIdx.x ; float res = 0.; A += ind; if (ind < m){ for(int j=m-1; j>= ind; j--) res = fmax(res, fabsf(A[j*lda])); y[ind] = res; } } extern "C" void slansy_max (char uplo, int m, const float *A, int lda, float *y){ int blocks; if (m % slansy_bs==0) blocks = m/ slansy_bs; else blocks = m/ slansy_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(slansy_bs, 1, 1); if( uplo == 'L' || uplo == 'l'){ hipLaunchKernelGGL(( l_slansy_max) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, y); } else{ hipLaunchKernelGGL(( u_slansy_max) , dim3(grid), dim3(threads), 0, magma_stream , m, A, lda, y); } } extern "C" float magmablas_slansy(char norm, char uplo, magma_int_t n, const float *A, magma_int_t lda, float *WORK ) { if (norm == 'I' || norm =='i') { #if (GPUSHMEM >= 200) mslansy ( uplo, n, A, lda, WORK); int val = hipblasIsamax(n,WORK,1); float retVal[1]; hipblasGetMatrix( 1, 1, sizeof( float ), WORK+val-1, 1, retVal, 1 ) ; return retVal[0]; #else printf("Only normM is available. Exit.\n"); exit(1); #endif } else if (norm == 'M' || norm =='m') { slansy_max ( uplo, n, A, lda, WORK); int val = hipblasIsamax(n,WORK,1); float retVal[1]; hipblasGetMatrix( 1, 1, sizeof( float ), WORK+val-1, 1, retVal, 1 ) ; return retVal[0]; } else { printf("Only normI and normM are available. Exit.\n"); exit(1); } }
64ad415f2e7137c1173f6c619801f4ac100b6f7f.cu
/* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated s Wed Nov 14 22:53:46 2012 */ #include "common_magma.h" #define BLOCK_SIZE 32 //#define num_threads 64 #define dgemv_bs 32 #define slansy_bs 64 #define PRECISION_s #if (!defined(PRECISION_z)) || (GPUSHMEM >= 200) __global__ void l_slansy_special (int n, const float* A, int lda, float *y){ int tx = threadIdx.x ; int ty = threadIdx.y ; int ind = blockIdx.x* dgemv_bs + tx ; float res = 0.; __shared__ float la[dgemv_bs][dgemv_bs+1]; A += ind; A+= ty * lda ; int break_d = blockIdx.x* dgemv_bs ; for(int i=0; i<break_d; i += dgemv_bs ){ #pragma unroll 8 for(int j=0; j < dgemv_bs ; j+=4){ la[tx][ty+j] = A[j*lda] ; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8 ; j++){ res+=fabsf( la[tx][j+ty*8]) ; } A+=lda* dgemv_bs ; __syncthreads(); } #pragma unroll 8 for(int j =0; j<dgemv_bs; j+=4) la[ty+j][tx] = A[ j * lda]; A+= dgemv_bs ; __syncthreads(); #pragma unroll 8 for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){ if ( i < tx ) { la[tx][i] = la[i][tx] ; } else la[tx][i] = la[tx][i] ; } __syncthreads(); #pragma unroll 8 for(int j=0; j < dgemv_bs/4 ; j++){ res+=fabsf(la[tx][j+ty*8]); } break_d += dgemv_bs ; __syncthreads(); for(int i=break_d; i<n; i += dgemv_bs ){ #pragma unroll 8 for(int j=0; j<dgemv_bs; j+=4) la[ty+j][tx] = A[ j * lda]; A+= dgemv_bs ; __syncthreads(); #pragma unroll 8 for(int j=0; j < dgemv_bs/4;j++){ res+= fabsf(la[tx][j+ty*8]); } __syncthreads(); } la[tx][ty] = MAGMA_S_MAKE( res, 0. ); __syncthreads(); if( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); y[ind] = res; } } __global__ void l_slansy_generic(int n, const float* A, int lda, float *y, int m_full_block, int m_mod_32) { int tx = threadIdx.x ; int ty = threadIdx.y ; int ind = blockIdx.x* dgemv_bs + tx ; float res = 0.; __shared__ float la [dgemv_bs][dgemv_bs+1]; if( blockIdx.x == m_full_block ) { /************************************************************************ -- Last block -- -- We will do something unusual here -- For sufficiently large matrix the overhead will be very low *************************************************************************/ if ( tx < m_mod_32 ){ A+= ( blockIdx.x * dgemv_bs + tx ) ; } else{ A+= ( blockIdx.x * dgemv_bs + m_mod_32 -1) ; } A+= ty * lda ; int break_d = blockIdx.x* dgemv_bs ; /*---------------------------- Go Right -------------------------------*/ for(int i=0; i<break_d; i += dgemv_bs ){ #pragma unroll 8 for(int j=0; j < dgemv_bs ; j+=4){ la[tx][ty+j] = A[j*lda] ; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8 ; j++){ res+=fabsf( la[tx][j+ty*8]); } A+=lda* dgemv_bs ; __syncthreads(); } /* we don't need to make zero, as those computation will be discarded. */ if( ty==0 ) { /*-------------------------------------------- he will compute the triangular parts others will be waiting with values. -----------------------------------------------*/ int j ; int count = 1 ; if( tx < m_mod_32 ) count = tx ; else count = m_mod_32 ; for(j =0;j<=count;j++){ res+= fabsf( A[j*lda]) ; } A+=(tx)*lda; count = 1 ; for(;j<m_mod_32;j++){ res+=fabsf( A[count]) ; count++; } } else{ } __syncthreads(); la[tx][ty]= MAGMA_S_MAKE( res, 0. ) ; __syncthreads(); /*-------------------------------------------------------- The leader accumulates all the results from his peer. ----------------------------------------------------------*/ if( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); if( tx < m_mod_32) y[ind] = res; } } else{ /*************************************** ----------------------------------- -- All the blocks but the last one -- **************************************** -------------------------------------*/ A += ind; A+= ty * lda ; int break_d = blockIdx.x* dgemv_bs ; /*---------------------------- Go Right -------------------------------*/ for(int i=0; i<break_d; i += dgemv_bs ){ #pragma unroll 8 for(int j=0; j < dgemv_bs ; j+=4){ la[tx][ty+j] = A[j*lda] ; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8 ; j++){ res+=fabsf(la[tx][j+ty*8]); } A+=lda* dgemv_bs ; __syncthreads(); } /*------------------------------------ Diagonal Copy + Transpose lower triangle --------------------------------------*/ #pragma unroll 8 for(int j =0; j<dgemv_bs; j+=4) la[ty+j][tx] = A[ j * lda]; A+= dgemv_bs ; __syncthreads(); /*-------------------------------------------- Mirror Upper Triangle to Lower triangle ---------------------------------------------*/ #pragma unroll 8 for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){ if ( i < tx ) { la[tx][i] = la[i][tx] ; } else la[tx][i] = la[tx][i] ; } __syncthreads(); /*-------------------------------- Do diagonal Computation -----------------------------------*/ #pragma unroll 8 for(int j=0; j < dgemv_bs/4 ; j++){ res+= fabsf(la[tx][j+ty*8]); } break_d += dgemv_bs ; __syncthreads(); n -= m_mod_32 ; // @ /*----------------------------- Go Down -------------------------------*/ for(int i=break_d; i<n; i += dgemv_bs ){ #pragma unroll 8 for(int j=0; j<dgemv_bs; j+=4) la[ty+j][tx] = A[ j * lda]; A+= dgemv_bs ; __syncthreads(); #pragma unroll 8 for(int j=0; j < dgemv_bs/4;j++){ res+=fabsf(la[tx][j+ty*8]); } __syncthreads(); } /*--------------------------------------------- doing m_mod_32 stuffs here. Symmetric is giving us benefit .. true -----------------------------------------------*/ A-=tx; if( tx < m_mod_32){ A+=tx; } else{ A+=(m_mod_32-1); /* Same as above*/ } #pragma unroll 8 for(int j=0; j<dgemv_bs; j+=4){ if( tx < m_mod_32 ) la[ty+j][tx] = MAGMA_S_MUL( MAGMA_S_ONE, A[ j * lda] ); else la[ty+j][tx] = MAGMA_S_MUL( MAGMA_S_ZERO, A[ j * lda] ); } __syncthreads(); /*---------------------------------------- What about doing some Zeroing here? instead of zeroing before? -----------------------------------------*/ #pragma unroll 8 for(int j=0; j < dgemv_bs/4;j++){ res+=fabsf(la[tx][j+ty*8]); } __syncthreads(); la[tx][ty]= MAGMA_S_MAKE( res, 0. ); __syncthreads(); /*-------------------------------------------------------- The leader accumulates all the results from his peer. ----------------------------------------------------------*/ if( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); y[ind] = res; } } } __global__ void u_slansy_generic (int n, const float* A, int lda, float *y, int m_full_block, int m_mod_32){ int tx = threadIdx.x ; int ty = threadIdx.y ; int ind = blockIdx.x* dgemv_bs + tx ; float res = 0.; __shared__ float la [dgemv_bs][dgemv_bs+1]; int blockIdxx = blockIdx.x ; if( blockIdx.x == m_full_block ) { /************************************************************************ -- Last block -- -- We will do something unusual here -- For sufficiently large matrix the overhead will be very low *************************************************************************/ ind = tx ; A+= lda*(n-1) ; if ( tx < m_mod_32 ){ A+= ( tx ) ; } else{ A+= ( m_mod_32 -1) ; } A-= ty * lda ; int break_d = (blockIdx.x)* dgemv_bs ; /*---------------------------- Go Right -------------------------------*/ for(int i=0; i<break_d; i += dgemv_bs ){ #pragma unroll 8 for(int j=0; j < dgemv_bs ; j+=4){ la[tx][ty+j] = A[-j*lda] ; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8 ; j++){ res+=fabsf(la[tx][j+ty*8]); } A-=lda* dgemv_bs ; __syncthreads(); } /* we don't need to make zero, as those computation will be discarded. */ if( ty==0 ) { /*-------------------------------------------- he will compute the triangular parts others will be waiting with values. -----------------------------------------------*/ int j ; int count = 1 ; if( tx < m_mod_32 ) count =m_mod_32- tx ; else count = m_mod_32 ; for(j =0;j<count;j++){ res+= fabsf( A[-j*lda] ); } A-=(count-1)*lda; count = 1 ; for(;j<m_mod_32;j++){ res+= fabsf( A[-count] ); count++; } } else{ } __syncthreads(); la[tx][ty]= MAGMA_S_MAKE( res, 0. ); __syncthreads(); /*-------------------------------------------------------- The leader accumulates all the results from his peer. ----------------------------------------------------------*/ if( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); if( tx < m_mod_32) y[ind] = res; } } else{ /*************************************** ----------------------------------- -- All the blocks but the last one -- -- By the way this code can be optimized more. **************************************** -------------------------------------*/ ind = blockIdx.x * dgemv_bs + tx + m_mod_32 ; const float *A1 = A ; A+= lda*(n-1) ; A += ind; A-= ty * lda ; int break_d = (n / dgemv_bs - blockIdxx-1 )* dgemv_bs ; /*---------------------------- Go Left -------------------------------*/ for(int i=0; i<break_d; i += dgemv_bs ){ #pragma unroll 8 for(int j=0; j < dgemv_bs ; j+=4){ la[tx][ty+j] = A[-j*lda] ; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8 ; j++){ res+=fabsf( la[tx][j+ty*8]); } A-=lda* dgemv_bs ; __syncthreads(); } /*------------------------------------ Diagonal Copy + Transpose lower triangle --------------------------------------*/ #pragma unroll 8 for(int j =0; j<dgemv_bs; j+=4){ la[tx][31-ty-j] = A[ -j * lda]; } A-= dgemv_bs ; __syncthreads(); /*-------------------------------------------- Mirror Upper Triangle to Lower triangle ---------------------------------------------*/ #pragma unroll 8 for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){ if ( i <tx ){ la[tx][i] = la[i][tx]; } else{ la[tx][i] = la[tx][i] ; } } __syncthreads(); /*-------------------------------- Do diagonal Computation -----------------------------------*/ #pragma unroll 8 for(int j=0; j < dgemv_bs/4 ; j++){ res+=fabsf( la[tx][j+ty*8] ) ; } break_d += dgemv_bs ; __syncthreads(); n -= m_mod_32 ; // @ /*----------------------------- Go Up -------------------------------*/ int i ; for( i=break_d; i<n; i+= dgemv_bs ){ #pragma unroll 8 for(int j=0; j<dgemv_bs; j+=4){ la[ty+j][tx] = A[- j * lda]; } A-= dgemv_bs ; __syncthreads(); #pragma unroll 8 for(int j=0; j < dgemv_bs/4;j++){ res+=fabsf ( la[31-tx][j+ty*8] ); } __syncthreads(); } /*--------------------------------------------- doing m_mod_32 stuffs here. Symmetric is giving us benefit .. true Do the other way please...... -----------------------------------------------*/ A1 = A1 + m_mod_32 * lda + tx *lda ; if( ty == 0 ) { for( int j = 0 ; j < m_mod_32 ; j++){ res+= fabsf ( A1[ j + lda * (blockIdx.x) * 32 ] ) ; } } __syncthreads(); la[tx][ty]= MAGMA_S_MAKE( res, 0); __syncthreads(); /*-------------------------------------------------------- The leader accumulates all the results from his peer. ----------------------------------------------------------*/ if( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); y[ind] = res; } } } __global__ void u_slansy_special (int n, const float* A, int lda, float *y ){ int tx = threadIdx.x ; int ty = threadIdx.y ; int ind = blockIdx.x* dgemv_bs + tx ; float res = 0.; /* Reverse Computation ... - Left - Triangle - Up */ A+= lda*(n-1) ; __shared__ float la [dgemv_bs][dgemv_bs+1]; A += ind; A-= ty * lda ; int break_d = (n / dgemv_bs - blockIdx.x-1 )* dgemv_bs ; for(int i=0; i<break_d; i += dgemv_bs ){ #pragma unroll 8 for(int j=0; j < dgemv_bs ; j+=4){ la[tx][ty+j] = A[-j*lda] ; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8 ; j++){ res+=fabsf(la[tx][j+ty*8]); } A-=lda* dgemv_bs ; __syncthreads(); } #pragma unroll 8 for(int j =0; j<dgemv_bs; j+=4) la[tx][31-ty-j] = A[ -j * lda]; /* Look at the indexing changes */ A-= dgemv_bs ; __syncthreads(); #pragma unroll 8 for(int i=ty*8; i<(1+ty)* dgemv_bs/4 ; i++){ if ( i <tx ){ la[tx][i] = la[i][tx]; } else{ la[tx][i] = la[tx][i] ; } } __syncthreads(); #pragma unroll 8 for(int j=0; j < dgemv_bs/4 ; j++){ res+= fabsf(la[tx][j+ty*8]); } break_d += dgemv_bs ; __syncthreads(); for(int i=break_d; i<n; i+= dgemv_bs ){ #pragma unroll 8 for(int j=0; j<dgemv_bs; j+=4) la[ty+j][tx] = A[ -j * lda]; A-= dgemv_bs ; __syncthreads(); #pragma unroll 8 for(int j=0; j < dgemv_bs/4;j++){ res+=fabsf( la[31-tx][j+ty*8]); } __syncthreads(); } la[tx][ty]= MAGMA_S_MAKE( res, 0. ); __syncthreads(); if( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); y[ind] = res; } } extern "C" void mslansy (char uplo, int m, const float *A, int lda, float *Y ) { /* Note: The UPLO = 'U' Version can be optimized more. side is not needed........................... */ int blocks; if (m % dgemv_bs==0) blocks = m/ dgemv_bs; else blocks = m/ dgemv_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(32, 4, 1); if( m % dgemv_bs == 0 ) { if( uplo == 'L' || uplo == 'l'){ l_slansy_special <<< grid, threads, 0, magma_stream >>> (m, A, lda, Y); } else{ u_slansy_special <<< grid, threads, 0, magma_stream >>> (m, A, lda, Y); } } else{ int m_full_block = (m - m % 32 ) /32 ; int m_mod_32 = m%32 ; if( uplo == 'L' || uplo == 'l'){ l_slansy_generic <<< grid, threads, 0, magma_stream >>> (m, A, lda, Y, m_full_block, m_mod_32); } else{ u_slansy_generic <<< grid, threads, 0, magma_stream >>> (m, A, lda, Y, m_full_block, m_mod_32); } } } #endif /* (!defined(PRECISION_z)) || (GPUSHMEM >= 200) */ __global__ void l_slansy_max (int m, const float* A, int lda, float *y){ int tx = threadIdx.x ; int ind = blockIdx.x * slansy_bs + tx ; float res = 0., res1; int break_d = blockIdx.x* slansy_bs; if (ind < m) { A += ind; for(int i=0; i<break_d; i += slansy_bs ){ #pragma unroll 8 for(int j=0; j< slansy_bs; j++){ res1 = fabsf(A[j*lda]); res = fmax(res,res1); } A += lda*slansy_bs; } for(int j=0; j<=tx; j++){ res1 = fabsf(A[j*lda]); res = fmax(res,res1); } y[ind] = res; } } __global__ void u_slansy_max (int m, const float* A, int lda, float *y){ int ind = blockIdx.x * slansy_bs + threadIdx.x ; float res = 0.; A += ind; if (ind < m){ for(int j=m-1; j>= ind; j--) res = fmax(res, fabsf(A[j*lda])); y[ind] = res; } } extern "C" void slansy_max (char uplo, int m, const float *A, int lda, float *y){ int blocks; if (m % slansy_bs==0) blocks = m/ slansy_bs; else blocks = m/ slansy_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(slansy_bs, 1, 1); if( uplo == 'L' || uplo == 'l'){ l_slansy_max <<< grid, threads, 0, magma_stream >>> (m, A, lda, y); } else{ u_slansy_max <<< grid, threads, 0, magma_stream >>> (m, A, lda, y); } } extern "C" float magmablas_slansy(char norm, char uplo, magma_int_t n, const float *A, magma_int_t lda, float *WORK ) { if (norm == 'I' || norm =='i') { #if (GPUSHMEM >= 200) mslansy ( uplo, n, A, lda, WORK); int val = cublasIsamax(n,WORK,1); float retVal[1]; cublasGetMatrix( 1, 1, sizeof( float ), WORK+val-1, 1, retVal, 1 ) ; return retVal[0]; #else printf("Only normM is available. Exit.\n"); exit(1); #endif } else if (norm == 'M' || norm =='m') { slansy_max ( uplo, n, A, lda, WORK); int val = cublasIsamax(n,WORK,1); float retVal[1]; cublasGetMatrix( 1, 1, sizeof( float ), WORK+val-1, 1, retVal, 1 ) ; return retVal[0]; } else { printf("Only normI and normM are available. Exit.\n"); exit(1); } }
1829c10ce91672d48f58c9537acd1a959d42d51e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef ENABLE_MPI #include <collectives/ib_comm.hpp> #include <utils.hpp> #include <utils.cuh> #include <infiniband/verbs.h> #include <iostream> #include <sstream> namespace HugeCTR { static void* proxy_thread_func(void* cfg) { auto ibv_config = (struct IbvProxy::InitConfig*)cfg; CudaCPUDeviceContext context(ibv_config->device_id_); IbvProxy* proxy = new IbvProxy(ibv_config); while(*(volatile int*)&proxy->destroy_ != 1) { proxy->stm(); } delete(proxy); return NULL; } // Helpers void IbComm::detect_ib_devs() { // Init hwloc topology hwloc_topology_init(&topo_); hwloc_topology_set_io_types_filter(topo_, HWLOC_TYPE_FILTER_KEEP_ALL); hwloc_topology_load(topo_); ibv_device **dev_list; int num_devices; dev_list = ibv_get_device_list(&num_devices); if ((!dev_list) || (num_devices == 0)) { std::cerr << "Ibv get device list failed: " << num_devices << std::endl; exit(-1); } // Get hwloc devices and final ib devs for (int d = 0; d < num_devices; d++) { if((dev_list[d]->node_type != IBV_NODE_RNIC) && (dev_list[d]->node_type != IBV_NODE_CA)) { continue; } const char* dev_name = ibv_get_device_name(dev_list[d]); if (!dev_name) { std::cerr << "Unable to get device name" << std::endl; exit(-1); } ibv_context* context; context = ibv_open_device(dev_list[d]); if (!context) { continue; } struct ibv_device_attr dev_attr; memset(&dev_attr, 0, sizeof(dev_attr)); if (ibv_query_device(context, &dev_attr) != 0) { std::cerr << "Unable to query device " << std::string(dev_name) << std::endl; exit(-1); } for (int port = 1; port <= dev_attr.phys_port_cnt; port++) { struct ibv_port_attr port_attr; if (ibv_query_port(context, port, &port_attr) != 0) { std::cout << "Unable to query port " << dev_name << ":" << port << std::endl; continue; } if (port_attr.state != IBV_PORT_ACTIVE) continue; if (port_attr.link_layer != IBV_LINK_LAYER_INFINIBAND) continue; // TODO: Check against user specified device list. ib_dev_list_.emplace_back(); ib_dev_list_.back().dev_name = dev_name; ib_dev_list_.back().dev_port_id = port; ib_dev_list_.back().hwloc_obj = hwloc_ibv_get_device_osdev(topo_, dev_list[d]); if (!ib_dev_list_.back().hwloc_obj) { std::cerr << "unable to get hwloc obj for ib device " << std::string(dev_name) << std::endl; exit(1); } } ibv_close_device(context); } ibv_free_device_list(dev_list); } void IbComm::print_obj(size_t my_rank, std::string obj_name, hwloc_obj_t obj) { if (my_rank != 0) return; if (!obj) { std::cout << obj_name << ":NULL" << std::endl; return; } if (obj->type == HWLOC_OBJ_PCI_DEVICE) { std::cout << obj_name << ":PCIeDevice " << obj->gp_index << " " << obj << " " << obj->depth << " " << obj->attr->pcidev.dev << std::endl; } else if (obj->type == HWLOC_OBJ_OS_DEVICE) { std::cout << obj_name << ":OSdev " << obj->gp_index << " " << obj << " " << obj->depth << " " << obj->name << " " << obj->attr->osdev.type << std::endl; } else if (obj->type == HWLOC_OBJ_BRIDGE) { std::cout << obj_name << ":PCIeBridge " << obj->gp_index << " " << obj << " " << obj->depth << std::endl; } else { std::cout << obj_name << ":Unknown " << obj->gp_index << " " << obj << " " << obj->depth << std::endl; } } size_t IbComm::calculate_pcie_hier_distance(size_t my_rank, hwloc_obj_t obj1, hwloc_obj_t obj2) { size_t distance = 0; auto is_bridge = [](hwloc_obj_t obj){ return obj && (obj->type == HWLOC_OBJ_BRIDGE); }; auto are_bridges = [is_bridge](hwloc_obj_t obj1, hwloc_obj_t obj2) { return is_bridge(obj1) && is_bridge(obj2); }; while(!is_bridge(obj1)) { obj1 = obj1->parent; } while(!is_bridge(obj2)) { obj2 = obj2->parent; } while (are_bridges(obj1,obj2) && (obj1 != obj2)) { while (are_bridges(obj1, obj2) && (obj1->attr->bridge.depth > obj2->attr->bridge.depth)) { obj1 = obj1->parent; distance++; } while (are_bridges(obj1, obj2) && (obj2->attr->bridge.depth > obj1->attr->bridge.depth)) { obj2 = obj2->parent; distance++; } if (are_bridges(obj1, obj2) && (obj1 != obj2)) { obj1 = obj1->parent; obj2 = obj2->parent; distance += 2; } } if (obj1 != obj2) { // No common PCIe ancestor found. Must be SYS. distance = std::numeric_limits<size_t>::max(); } return distance; } void IbComm::print_distance_matrix(size_t my_rank, std::vector<std::vector<size_t>>& gpu_nic_dist) { // Print distance matrix if (my_rank == 0) { for (size_t n = 0; n < ib_dev_list_.size(); n++) { std::cout << std::setfill(' ') << std::setw(24) << ib_dev_list_[n].dev_name; } std::cout << std::endl; for (size_t g = 0; g < num_gpus_; g++) { for (size_t n = 0; n < ib_dev_list_.size(); n++) { std::cout << std::setfill(' ') << std::setw(24) << gpu_nic_dist[g][n]; } std::cout << std::endl; } } } void IbComm::calculate_gpu_nic_affinity() { // get hwloc GPU objs std::vector<hwloc_obj_t> gpu_list; for (auto& g : device_list_) { auto gpu_obj = hwloc_cudart_get_device_osdev_by_index(topo_, g); if (!gpu_obj) { std::cerr << "unable to get hwloc obj for cuda device " << g << std::endl; exit(1); } gpu_list.push_back(gpu_obj); } // Find GPU-NIC distances std::vector<std::vector<size_t>> gpu_nic_dist(num_gpus_); for (size_t g = 0; g < num_gpus_; g++) { gpu_nic_dist[g].resize(ib_dev_list_.size()); for (size_t n = 0; n < ib_dev_list_.size(); n++) { hwloc_obj_t gpu_obj = gpu_list[g]; gpu_nic_dist[g][n] = calculate_pcie_hier_distance(my_proc_, gpu_obj, ib_dev_list_[n].hwloc_obj); } } // print_distance_matrix(my_proc_, gpu_nic_dist); // Calculate affinities. Only supports at max one NIC per GPU // If we need to support more than one NIC per GPU in future, we can replicate the gpu devs. size_t max_nics = ib_dev_list_.size(); gpu_nic_affinity_.resize(num_gpus_, max_nics); if (num_gpus_ >= ib_dev_list_.size()) { size_t current_nic = 0; for (size_t assigned_gpus = 0; assigned_gpus < num_gpus_; assigned_gpus++) { // Greedy algorithm // Find unassigned gpu with min distance size_t min_distance = std::numeric_limits<size_t>::max(); size_t min_gpu = 0; for (size_t g = 0; g < num_gpus_; g++) { if ((gpu_nic_affinity_[g] == max_nics) && (gpu_nic_dist[g][current_nic] <= min_distance)) { min_distance = gpu_nic_dist[g][current_nic]; min_gpu = g; } } gpu_nic_affinity_[min_gpu] = current_nic; current_nic = (current_nic + 1) % ib_dev_list_.size(); } } else { // still assigns max one NIC per GPU. Just iterate over NICs instead for (size_t g = 0; g < num_gpus_; g++) { size_t min_distance = std::numeric_limits<size_t>::max(); size_t min_nic = 0; for (size_t n = 0; n < ib_dev_list_.size(); n++) { if ((ib_dev_list_[n].num_gpus_assigned == 0) && (gpu_nic_dist[g][n] <= min_distance)) { min_distance = gpu_nic_dist[g][n]; min_nic = n; } } gpu_nic_affinity_[g] = min_nic; } } // Print gpu nic affinities that are picked; if (my_proc_ == 0) { for (size_t g = 0; g < num_gpus_; g++) { auto& ib_dev = ib_dev_list_[gpu_nic_affinity_[g]]; std::ostringstream ost; ost << "GPU-NIC affinity " << g << "-" << ib_dev.dev_name << ":" << ib_dev.dev_port_id; MESSAGE_(ost.str().c_str()); } } // Check gpu nic affinities of other nodes and warn if mismatch char (**gpu_nic_affinity_names)[IBV_SYSFS_NAME_MAX]; gpu_nic_affinity_names = (char (**)[IBV_SYSFS_NAME_MAX])malloc(sizeof(char(*)[IBV_SYSFS_NAME_MAX]) * num_procs_); for (size_t r = 0; r < num_procs_; r++) { gpu_nic_affinity_names[r] = (char (*)[IBV_SYSFS_NAME_MAX])malloc(sizeof(char[IBV_SYSFS_NAME_MAX]) * num_gpus_); } for (size_t g = 0; g < num_gpus_; g++) { auto ib_dev = ib_dev_list_[gpu_nic_affinity_[g]]; std::ostringstream stm; stm << ib_dev.dev_name << ":" << ib_dev.dev_port_id; std::string ib_name = stm.str(); ib_name = ib_name.substr(0, IBV_SYSFS_NAME_MAX); std::strcpy(gpu_nic_affinity_names[my_proc_][g], ib_name.c_str()); } for (size_t r = 0; r < num_procs_; r++) { CK_MPI_THROW_(MPI_Bcast(gpu_nic_affinity_names[r], num_gpus_*sizeof(char[IBV_SYSFS_NAME_MAX]), MPI_BYTE, r, MPI_COMM_WORLD)); } for (size_t r = 0; r < num_procs_; r++) { for (size_t g = 0; g < num_gpus_; g++) { std::string my_ib_name = std::string(gpu_nic_affinity_names[my_proc_][g]); std::string remote_ib_name = std::string(gpu_nic_affinity_names[r][g]); if (my_ib_name != remote_ib_name) { std::cout << "WARNING: Mismatch in mellanox dev names. " << g << " " << my_proc_ << ":" << my_ib_name << " " << r << ":" << remote_ib_name << std::endl; std::cout << "WARNING: Non uniform cluster detected. Performance maybe impacted" << std::endl; } } } for (size_t r = 0; r < num_procs_; r++) { free(gpu_nic_affinity_names[r]); } free(gpu_nic_affinity_names); CK_MPI_THROW_(MPI_Barrier(MPI_COMM_WORLD)); } void IbComm::init_proxy_threads() { proxy_cmd_ = std::make_unique<ProxyCommand>(num_gpus_); proxy_cmd_->reset(); proxy_thread_.resize(num_gpus_); proxy_cfg_.resize(num_gpus_); for (auto& cfg : proxy_cfg_) { cfg = std::make_unique<IbvProxy::InitConfig>(); } for (size_t g = 0; g < num_gpus_; g++) { size_t device_id = device_list_[g]; auto& cfg = proxy_cfg_[g]; cfg->device_id_ = device_id; cfg->global_id_ = my_proc_; cfg->proxy_id_ = g; cfg->ib_dev_ = ib_dev_list_[gpu_nic_affinity_[g]].dev_name; cfg->ib_port_ = ib_dev_list_[gpu_nic_affinity_[g]].dev_port_id; cfg->proxy_cmd_ = proxy_cmd_.get(); cfg->num_gpus_ = num_gpus_; cfg->num_procs_ = num_procs_; cfg->my_proc_ = my_proc_; sched_param param; pthread_attr_t attr; pthread_attr_init (&attr); pthread_attr_getschedparam (&attr, &param); param.sched_priority = sched_get_priority_max(SCHED_FIFO);; pthread_attr_setschedparam (&attr, &param); int ret = pthread_create(&proxy_thread_[g], &attr, &proxy_thread_func, cfg.get()); PROXY_ASSERT(ret == 0); } } // API implementation int IbComm::init(size_t num_procs, size_t num_gpus, size_t my_proc, const std::vector<int>& device_list) { num_procs_ = num_procs; num_gpus_ = num_gpus; my_proc_ = my_proc; device_list_ = device_list; PROXY_ASSERT(num_procs > 1); detect_ib_devs(); calculate_gpu_nic_affinity(); init_proxy_threads(); is_initialized_ = true; return 0; } IbComm::HierA2ACollContext::HierA2ACollContext(IbComm* comm) { CK_CUDA_THROW_(hipHostMalloc(&cmd_storage_, 2*sizeof(size_t))); h_recv_cmd_ptr_ = &cmd_storage_[0]; *h_recv_cmd_ptr_ = 1; size_t num_gpus = comm->num_gpus_; std::generate_n(std::back_inserter(ctx_), num_gpus, []{ return std::make_unique<HierA2ACollContextPerGPU>(); }); d_send_cmd_ = new size_t*[num_gpus]; d_ibv_atomic_ = new size_t*[num_gpus]; d_ibv_atomic_recv_ = new size_t*[num_gpus]; for (size_t g = 0; g < num_gpus; g++) { CK_CUDA_THROW_(hipSetDevice(comm->device_list_[g])); CK_CUDA_THROW_(hipEventCreate(&ctx_[g]->event_)); // TODO: collate all storage CK_CUDA_THROW_(hipMalloc((void**)&d_send_cmd_[g], sizeof(size_t))); size_t init_value = 2; CK_CUDA_THROW_(hipMemcpy(d_send_cmd_[g], &init_value, sizeof(size_t), hipMemcpyHostToDevice)); CK_CUDA_THROW_(hipMalloc((void**)&d_ibv_atomic_[g], MAX_IBV_DEST*sizeof(size_t))); size_t atomic_init_values[MAX_IBV_DEST]; std::fill_n(atomic_init_values, MAX_IBV_DEST, 1); CK_CUDA_THROW_(hipMemcpy(d_ibv_atomic_[g], atomic_init_values, MAX_IBV_DEST*sizeof(size_t), hipMemcpyHostToDevice)); CK_CUDA_THROW_(hipMalloc((void**)&d_ibv_atomic_recv_[g], MAX_IBV_DEST*sizeof(size_t))); std::fill_n(atomic_init_values, MAX_IBV_DEST, 0); CK_CUDA_THROW_(hipMemcpy(d_ibv_atomic_recv_[g], atomic_init_values, MAX_IBV_DEST*sizeof(size_t), hipMemcpyHostToDevice)); } barrier_ = std::make_unique<GPUBarrier>(comm->num_gpus_, comm->device_list_); sync_helper_ = std::make_unique<CollSyncHelper>(); } IbComm::HierA2ACollContext::~HierA2ACollContext() { size_t num_gpus = ctx_.size(); if (d_ibv_atomic_recv_) { for (size_t g = 0; g < num_gpus; g++) { hipFree(d_ibv_atomic_recv_[g]); } delete d_ibv_atomic_recv_; } if (d_ibv_atomic_) { for (size_t g = 0; g < num_gpus; g++) { hipFree(d_ibv_atomic_[g]); } delete d_ibv_atomic_; } if (d_send_cmd_) { for (size_t g = 0; g < num_gpus; g++) { hipFree(d_send_cmd_[g]); } delete d_send_cmd_; } if (cmd_storage_) { hipFree(cmd_storage_); } } IbComm::HierA2ACollContextPerGPU::~HierA2ACollContextPerGPU() { if (d_send_ptrs_) { free(d_send_ptrs_); } if (d_recv_ptrs_) { free(d_recv_ptrs_); } if (d_send_sizes_copy_) { hipFree(d_send_sizes_copy_); } } // TODO: Initialize these in the constructor for RAI HierA2ACollHandle IbComm::register_hier_a2a_coll(bool skip_barrier) { // std::unique_lock<std::mutex> lock(proxy_cmd_->mutex_); hier_a2a_coll_ctx_.emplace_back(std::make_unique<HierA2ACollContext>(this)); HierA2ACollHandle coll_handle = (HierA2ACollHandle)(hier_a2a_coll_ctx_.size() - 1); auto sync_helper = hier_a2a_v_coll_ctx_[coll_handle]->sync_helper_.get(); M2PHierA2ACollInit coll_init_cmd_(coll_handle, sync_helper, skip_barrier); for (size_t g = 0; g < num_gpus_; g++) { M2PHierA2ACollInit coll_init_cmd_(coll_handle, sync_helper, skip_barrier); HierA2ACollInitCmd cmd = std::make_pair(std::move(coll_init_cmd_), std::move(P2MNull())); proxy_cmd_->cmd_[g] = std::move(cmd); } proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); proxy_cmd_->reset(); return coll_handle; } HierA2AvCollHandle IbComm::register_hier_a2a_v_coll(bool skip_barrier) { // std::unique_lock<std::mutex> lock(proxy_cmd_->mutex_); hier_a2a_v_coll_ctx_.emplace_back(std::make_unique<HierA2ACollContext>(this)); HierA2AvCollHandle coll_handle = (HierA2AvCollHandle)(hier_a2a_v_coll_ctx_.size() - 1); auto sync_helper = hier_a2a_v_coll_ctx_[coll_handle]->sync_helper_.get(); for (size_t g = 0; g < num_gpus_; g++) { M2PHierA2AvCollInit coll_init_cmd_(coll_handle, sync_helper, skip_barrier); HierA2AvCollInitCmd cmd = std::make_pair(std::move(coll_init_cmd_), std::move(P2MNull())); proxy_cmd_->cmd_[g] = std::move(cmd); } proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); proxy_cmd_->reset(); return coll_handle; } void IbComm::set_a2a_coll_stream(HierA2ACollHandle coll, hipStream_t stream, size_t device_id) { hier_a2a_coll_ctx_[coll]->ctx_[device_id]->stream_ = stream; } void IbComm::set_a2a_coll_stream(HierA2AvCollHandle coll, hipStream_t stream, size_t device_id) { hier_a2a_v_coll_ctx_[coll]->ctx_[device_id]->stream_ = stream; } void IbComm::set_a2a_coll_buf(HierA2ACollHandle coll, void** send_ptrs, const size_t* send_max_size, void** recv_ptrs, const size_t* recv_max_size, size_t device_id) { auto& coll_ctx = *hier_a2a_coll_ctx_[coll]; if (proxy_cmd_->cmd_[device_id].which() != 0) { ERROR_MESSAGE_("Proxy command is already populated. Don't mix up set API"); exit(1); } proxy_cmd_->cmd_[device_id] = HierA2ABufInitCmd(); HierA2ABufInitCmd& cmd = boost::get<HierA2ABufInitCmd>(proxy_cmd_->cmd_[device_id]); M2PHierA2ABufInit& buf_init = std::get<0>(cmd); auto& gpu_ctx = *coll_ctx.ctx_[device_id]; gpu_ctx.d_send_ptrs_ = (void** )malloc(sizeof(void*)*num_procs_); gpu_ctx.d_recv_ptrs_ = (void** )malloc(sizeof(void*)*num_procs_); memcpy(gpu_ctx.d_send_ptrs_, send_ptrs, sizeof(void*)*num_procs_); memcpy(gpu_ctx.d_recv_ptrs_, recv_ptrs, sizeof(void*)*num_procs_); buf_init.coll_handle_ = coll; buf_init.d_send_ptrs_ = send_ptrs; buf_init.d_recv_ptrs_ = recv_ptrs; buf_init.h_max_send_size_ = send_max_size; buf_init.h_max_recv_size_ = recv_max_size; buf_init.h_recv_cmd_ptr_ = coll_ctx.h_recv_cmd_ptr_; buf_init.d_ibv_atomic_ = coll_ctx.d_ibv_atomic_[device_id]; buf_init.d_ibv_atomic_recv_ = coll_ctx.d_ibv_atomic_recv_[device_id]; } void IbComm::set_a2a_coll_buf(HierA2AvCollHandle coll, void* send_ptrs, const size_t send_max_size, void* recv_ptrs, const size_t recv_max_size, size_t device_id) { auto& coll_ctx = *hier_a2a_v_coll_ctx_[coll]; if (proxy_cmd_->cmd_[device_id].which() != 0) { ERROR_MESSAGE_("Proxy command is already populated. Don't mix up set API"); exit(1); } proxy_cmd_->cmd_[device_id] = HierA2AvBufInitCmd(); HierA2AvBufInitCmd& cmd = boost::get<HierA2AvBufInitCmd>(proxy_cmd_->cmd_[device_id]); M2PHierA2AvBufInit& buf_init = std::get<0>(cmd); auto& gpu_ctx = *coll_ctx.ctx_[device_id]; gpu_ctx.d_send_ptrs_ = (void** )malloc(sizeof(void*)); gpu_ctx.d_recv_ptrs_ = (void** )malloc(sizeof(void*)); gpu_ctx.d_send_ptrs_[0] = send_ptrs; gpu_ctx.d_recv_ptrs_[0] = recv_ptrs; gpu_ctx.h_max_send_size_ = send_max_size; CK_CUDA_THROW_(hipSetDevice(device_list_[device_id])); // Allocate A2Av send size copy storage CK_CUDA_THROW_(hipMalloc((void**)(&gpu_ctx.d_send_sizes_copy_), sizeof(size_t)*num_gpus_*num_procs_)); std::vector<size_t> send_sizes(num_gpus_*num_procs_, send_max_size / (num_gpus_*num_procs_)); CK_CUDA_THROW_(hipMemcpy(gpu_ctx.d_send_sizes_copy_, send_sizes.data(), sizeof(size_t)*num_gpus_*num_procs_, hipMemcpyHostToDevice)); buf_init.coll_handle_ = coll; buf_init.d_send_ptrs_ = send_ptrs; buf_init.d_recv_ptrs_ = recv_ptrs; buf_init.h_max_send_size_ = send_max_size; buf_init.h_max_recv_size_ = recv_max_size; buf_init.h_recv_cmd_ptr_ = coll_ctx.h_recv_cmd_ptr_; buf_init.d_ibv_atomic_ = coll_ctx.d_ibv_atomic_[device_id]; buf_init.d_ibv_atomic_recv_ = coll_ctx.d_ibv_atomic_recv_[device_id]; } void IbComm::register_a2a_coll_buf(HierA2ACollHandle coll) { // Init command pointers auto& coll_ctx = *hier_a2a_coll_ctx_[coll]; proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); for (size_t g = 0; g < num_gpus_; g++) { HierA2ABufInitCmd& proxy_cmd = boost::get<HierA2ABufInitCmd>(proxy_cmd_->cmd_[g]); auto& buf_init_out = std::get<1>(proxy_cmd); coll_ctx.ctx_[g]->h_send_sizes_ = buf_init_out.h_send_size_; coll_ctx.ctx_[g]->h_recv_sizes_ = buf_init_out.h_recv_size_; } proxy_cmd_->reset(); } void IbComm::register_a2a_coll_buf(HierA2AvCollHandle coll) { // Init command pointers auto& coll_ctx = *hier_a2a_v_coll_ctx_[coll]; proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); for (size_t g = 0; g < num_gpus_; g++) { HierA2AvBufInitCmd& proxy_cmd = boost::get<HierA2AvBufInitCmd>(proxy_cmd_->cmd_[g]); auto& buf_init_out = std::get<1>(proxy_cmd); coll_ctx.ctx_[g]->h_send_sizes_ = buf_init_out.h_send_size_; coll_ctx.ctx_[g]->h_recv_sizes_ = buf_init_out.h_recv_size_; } proxy_cmd_->reset(); } static __global__ void update_sizes( size_t* __restrict__ h_send_sizes, size_t* __restrict__ h_recv_sizes, size_t* __restrict__ d_send_sizes_copy, const size_t* __restrict__ d_send_sizes, const size_t* __restrict__ d_recv_sizes, size_t size) { for (size_t i = blockIdx.x*blockDim.x + threadIdx.x; i < size; i += blockDim.x*gridDim.x) { size_t send_size = d_send_sizes[i]; h_send_sizes[i] = send_size; d_send_sizes_copy[i] = send_size; h_recv_sizes[i] = d_recv_sizes[i]; } } void IbComm::update_a2a_coll_sizes(HierA2AvCollHandle coll, const size_t* d_send_sizes, const size_t* d_recv_sizes, hipStream_t dep_stream, size_t device_id) { auto& ctx = *hier_a2a_v_coll_ctx_[coll]; auto& gpu_ctx = *ctx.ctx_[device_id]; CK_CUDA_THROW_(hipEventRecord(gpu_ctx.event_, dep_stream)); CK_CUDA_THROW_(hipStreamWaitEvent(gpu_ctx.stream_, gpu_ctx.event_)); constexpr size_t MAX_TPB = 256; size_t n_blocks = ceildiv<size_t>(num_procs_*num_gpus_, MAX_TPB); hipLaunchKernelGGL(( update_sizes), dim3(n_blocks), dim3(MAX_TPB), 0, gpu_ctx.stream_, gpu_ctx.h_send_sizes_, gpu_ctx.h_recv_sizes_, gpu_ctx.d_send_sizes_copy_, d_send_sizes, d_recv_sizes, num_procs_*num_gpus_); } // Local first distribution TODO: node first might be efficient static __global__ void update_pre_intra_sizes( size_t* __restrict__ h_send_sizes, size_t* __restrict__ d_send_sizes, size_t** __restrict__ d_pre_intra_send_sizes, size_t my_gpu_id, size_t num_gpus, size_t num_procs) { // Thread blocks = num procs // Threads = num gpus int gpu_id = threadIdx.x; int proc_id = blockIdx.x; size_t send_size = d_pre_intra_send_sizes[gpu_id][proc_id*num_gpus + my_gpu_id]; size_t send_indx = proc_id*num_gpus + gpu_id; h_send_sizes[send_indx] = send_size; d_send_sizes[send_indx] = send_size; // TODO: uncomment below for cuda graph support // __threadfence_system(); } void IbComm::pre_intra_update_a2a_coll_sizes(HierA2AvCollHandle coll, size_t** d_pre_intra_send_sizes, hipStream_t dep_stream, size_t device_id) { auto& ctx = *hier_a2a_v_coll_ctx_[coll]; auto& gpu_ctx = *ctx.ctx_[device_id]; CK_CUDA_THROW_(hipEventRecord(gpu_ctx.event_, dep_stream)); CK_CUDA_THROW_(hipStreamWaitEvent(gpu_ctx.stream_, gpu_ctx.event_)); ctx.barrier_->sync_all_gpus(gpu_ctx.stream_, device_id); hipLaunchKernelGGL(( update_pre_intra_sizes), dim3(num_procs_), dim3(num_gpus_), 0, gpu_ctx.stream_, gpu_ctx.h_send_sizes_, gpu_ctx.d_send_sizes_copy_, d_pre_intra_send_sizes, device_id, num_gpus_, num_procs_); } void IbComm::set_ready_to_transfer() { PROXY_ASSERT_MSG(!is_ready_to_transfer_, "Ready to transfer is already set") for(size_t g = 0; g < num_gpus_; g++) { proxy_cmd_->cmd_[g] = ProxyStateTransitionCmd(); ProxyStateTransitionCmd& cmd_t = boost::get<ProxyStateTransitionCmd>(proxy_cmd_->cmd_[g]); M2PStateTransition& cmd = std::get<0>(cmd_t); cmd.state_ = IbvProxyState::READY_TO_TRANSFER; } proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); proxy_cmd_->reset(); is_ready_to_transfer_ = true; } template<typename T> static __global__ void copy_local( const T* __restrict__ input_, T* __restrict__ output_, size_t size) { for (size_t i = blockIdx.x*blockDim.x + threadIdx.x; i < size; i += blockDim.x*gridDim.x) { output_[i] = input_[i]; } } template<typename T> static __global__ void copy_local_segmented( const T* __restrict__ input_, T* __restrict__ output_, const size_t* __restrict__ sizes, int num_segments, size_t offset) { for (int s = 0; s < num_segments; s++) { int segment_offset = s*offset; size_t num_elems = sizes[s] / sizeof(T); for (size_t i = blockIdx.x*blockDim.x + threadIdx.x; i < num_elems; i += blockDim.x*gridDim.x) { output_[segment_offset + i] = input_[segment_offset + i]; } } } static __global__ void wait_completion(size_t* d_ibv_cmd, size_t* atomic, int nDest, int myDest, int device_id) { if ((threadIdx.x < nDest) && (threadIdx.x != myDest)) { size_t curr_count = *(volatile size_t*)d_ibv_cmd; // clock_t s=clock64(); while(*((volatile size_t*) &atomic[threadIdx.x]) < (curr_count - 1)) { // if (clock64()-s > 2000000000) { // printf("wait completion expected: %llu %llu, got %llu from_dest %d my_dest %d %d n_dest %d\n", // curr_count, (curr_count - 1), atomic[threadIdx.x], threadIdx.x, myDest, device_id, nDest); // s = clock64(); // } } } __syncthreads(); } template<typename T> void IbComm::post_send_command_a2a<T>(HierA2ACollHandle coll, hipStream_t dep_stream, size_t device_id) { auto& ctx = *hier_a2a_coll_ctx_[coll]; auto& gpu_ctx = *ctx.ctx_[device_id]; CK_CUDA_THROW_(hipEventRecord(gpu_ctx.event_, dep_stream)); CK_CUDA_THROW_(hipStreamWaitEvent(gpu_ctx.stream_, gpu_ctx.event_)); ctx.barrier_->sync_all_gpus_report_host_and_inc(ctx.d_send_cmd_[device_id], ctx.h_recv_cmd_ptr_, gpu_ctx.stream_, device_id); size_t num_elems = gpu_ctx.h_send_sizes_[my_proc_] / sizeof(T); // TODO: This is not capturable as we using sizes from host hipLaunchKernelGGL(( copy_local<T>), dim3(96), dim3(1024), 0, gpu_ctx.stream_, (T*)gpu_ctx.d_send_ptrs_[my_proc_], (T*)gpu_ctx.d_recv_ptrs_[my_proc_], num_elems); hipLaunchKernelGGL(( wait_completion), dim3(1), dim3(32), 0, gpu_ctx.stream_, ctx.d_send_cmd_[device_id], ctx.d_ibv_atomic_[device_id], num_procs_, my_proc_, device_id); } template <typename T> void IbComm::post_send_command_a2a<T>(HierA2AvCollHandle coll, hipStream_t dep_stream, size_t device_id) { auto& ctx = *hier_a2a_v_coll_ctx_[coll]; auto& gpu_ctx = *ctx.ctx_[device_id]; CK_CUDA_THROW_(hipEventRecord(gpu_ctx.event_, dep_stream)); CK_CUDA_THROW_(hipStreamWaitEvent(gpu_ctx.stream_, gpu_ctx.event_)); ctx.barrier_->sync_all_gpus_report_host_and_inc(ctx.d_send_cmd_[device_id], ctx.h_recv_cmd_ptr_, gpu_ctx.stream_, device_id); // TODO: Change it to use max SMs size_t* copy_sizes = &gpu_ctx.d_send_sizes_copy_[my_proc_*num_gpus_]; size_t offset = gpu_ctx.h_max_send_size_ / (num_procs_*num_gpus_) / sizeof(T); // TODO: This is not good, we are reading the sizes from host, create a device copy! hipLaunchKernelGGL(( copy_local_segmented<T>), dim3(96), dim3(1024), 0, gpu_ctx.stream_, (T*)gpu_ctx.d_send_ptrs_[0] + (my_proc_*num_gpus_*offset), (T*)gpu_ctx.d_recv_ptrs_[0] + (my_proc_*num_gpus_*offset), copy_sizes, num_gpus_, offset); hipLaunchKernelGGL(( wait_completion), dim3(1), dim3(32), 0, gpu_ctx.stream_, ctx.d_send_cmd_[device_id], ctx.d_ibv_atomic_[device_id], num_procs_, my_proc_, device_id); } static __global__ void wait_recv(size_t* d_ibv_cmd, size_t* atomic, int nDest, int myDest) { if ((threadIdx.x < nDest) && (threadIdx.x != myDest)) { size_t curr_count = *d_ibv_cmd; while(*((volatile size_t*) &atomic[threadIdx.x]) < (curr_count - 2)) {} } __syncthreads(); } void IbComm::wait_global_recv_async(HierA2ACollHandle coll, size_t device_id) { auto& ctx = *hier_a2a_coll_ctx_[coll]; auto& gpu_ctx = *ctx.ctx_[device_id]; hipLaunchKernelGGL(( wait_recv), dim3(1), dim3(32), 0, gpu_ctx.stream_, ctx.d_send_cmd_[device_id], ctx.d_ibv_atomic_recv_[device_id], num_procs_, my_proc_); } void IbComm::wait_global_recv_async(HierA2AvCollHandle coll, size_t device_id) { auto& ctx = *hier_a2a_v_coll_ctx_[coll]; auto& gpu_ctx = *ctx.ctx_[device_id]; hipLaunchKernelGGL(( wait_recv), dim3(1), dim3(32), 0, gpu_ctx.stream_, ctx.d_send_cmd_[device_id], ctx.d_ibv_atomic_recv_[device_id], num_procs_, my_proc_); } template void IbComm::post_send_command_a2a<__half>(HierA2ACollHandle coll, hipStream_t dep_stream, size_t device_id); template void IbComm::post_send_command_a2a <float>(HierA2ACollHandle coll, hipStream_t dep_stream, size_t device_id); template void IbComm::post_send_command_a2a <uint32_t>(HierA2ACollHandle coll, hipStream_t dep_stream, size_t device_id); template void IbComm::post_send_command_a2a <uint16_t>(HierA2ACollHandle coll, hipStream_t dep_stream, size_t device_id); template void IbComm::post_send_command_a2a<__half>(HierA2AvCollHandle coll, hipStream_t dep_stream, size_t device_id); template void IbComm::post_send_command_a2a <float>(HierA2AvCollHandle coll, hipStream_t dep_stream, size_t device_id); template void IbComm::post_send_command_a2a <uint32_t>(HierA2AvCollHandle coll, hipStream_t dep_stream, size_t device_id); template void IbComm::post_send_command_a2a <uint16_t>(HierA2AvCollHandle coll, hipStream_t dep_stream, size_t device_id); void IbComm::finalize() { if (!is_initialized_) { return; } if (!is_ready_to_transfer_) { for(size_t g = 0; g < num_gpus_; g++) { proxy_cmd_->cmd_[g] = ProxyStateTransitionCmd(); ProxyStateTransitionCmd& cmd_t = boost::get<ProxyStateTransitionCmd>(proxy_cmd_->cmd_[g]); M2PStateTransition& cmd = std::get<0>(cmd_t); cmd.state_ = IbvProxyState::DESTROY; } proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); proxy_cmd_->reset(); } proxy_cmd_->set_destroy(); for (size_t g = 0; g < num_gpus_; g++) { int ret = pthread_join(proxy_thread_[g], NULL); PROXY_ASSERT(ret == 0); } is_finalized_ = true; } IbComm::~IbComm() { if (!is_finalized_) { finalize(); } } } #endif
1829c10ce91672d48f58c9537acd1a959d42d51e.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef ENABLE_MPI #include <collectives/ib_comm.hpp> #include <utils.hpp> #include <utils.cuh> #include <infiniband/verbs.h> #include <iostream> #include <sstream> namespace HugeCTR { static void* proxy_thread_func(void* cfg) { auto ibv_config = (struct IbvProxy::InitConfig*)cfg; CudaCPUDeviceContext context(ibv_config->device_id_); IbvProxy* proxy = new IbvProxy(ibv_config); while(*(volatile int*)&proxy->destroy_ != 1) { proxy->stm(); } delete(proxy); return NULL; } // Helpers void IbComm::detect_ib_devs() { // Init hwloc topology hwloc_topology_init(&topo_); hwloc_topology_set_io_types_filter(topo_, HWLOC_TYPE_FILTER_KEEP_ALL); hwloc_topology_load(topo_); ibv_device **dev_list; int num_devices; dev_list = ibv_get_device_list(&num_devices); if ((!dev_list) || (num_devices == 0)) { std::cerr << "Ibv get device list failed: " << num_devices << std::endl; exit(-1); } // Get hwloc devices and final ib devs for (int d = 0; d < num_devices; d++) { if((dev_list[d]->node_type != IBV_NODE_RNIC) && (dev_list[d]->node_type != IBV_NODE_CA)) { continue; } const char* dev_name = ibv_get_device_name(dev_list[d]); if (!dev_name) { std::cerr << "Unable to get device name" << std::endl; exit(-1); } ibv_context* context; context = ibv_open_device(dev_list[d]); if (!context) { continue; } struct ibv_device_attr dev_attr; memset(&dev_attr, 0, sizeof(dev_attr)); if (ibv_query_device(context, &dev_attr) != 0) { std::cerr << "Unable to query device " << std::string(dev_name) << std::endl; exit(-1); } for (int port = 1; port <= dev_attr.phys_port_cnt; port++) { struct ibv_port_attr port_attr; if (ibv_query_port(context, port, &port_attr) != 0) { std::cout << "Unable to query port " << dev_name << ":" << port << std::endl; continue; } if (port_attr.state != IBV_PORT_ACTIVE) continue; if (port_attr.link_layer != IBV_LINK_LAYER_INFINIBAND) continue; // TODO: Check against user specified device list. ib_dev_list_.emplace_back(); ib_dev_list_.back().dev_name = dev_name; ib_dev_list_.back().dev_port_id = port; ib_dev_list_.back().hwloc_obj = hwloc_ibv_get_device_osdev(topo_, dev_list[d]); if (!ib_dev_list_.back().hwloc_obj) { std::cerr << "unable to get hwloc obj for ib device " << std::string(dev_name) << std::endl; exit(1); } } ibv_close_device(context); } ibv_free_device_list(dev_list); } void IbComm::print_obj(size_t my_rank, std::string obj_name, hwloc_obj_t obj) { if (my_rank != 0) return; if (!obj) { std::cout << obj_name << ":NULL" << std::endl; return; } if (obj->type == HWLOC_OBJ_PCI_DEVICE) { std::cout << obj_name << ":PCIeDevice " << obj->gp_index << " " << obj << " " << obj->depth << " " << obj->attr->pcidev.dev << std::endl; } else if (obj->type == HWLOC_OBJ_OS_DEVICE) { std::cout << obj_name << ":OSdev " << obj->gp_index << " " << obj << " " << obj->depth << " " << obj->name << " " << obj->attr->osdev.type << std::endl; } else if (obj->type == HWLOC_OBJ_BRIDGE) { std::cout << obj_name << ":PCIeBridge " << obj->gp_index << " " << obj << " " << obj->depth << std::endl; } else { std::cout << obj_name << ":Unknown " << obj->gp_index << " " << obj << " " << obj->depth << std::endl; } } size_t IbComm::calculate_pcie_hier_distance(size_t my_rank, hwloc_obj_t obj1, hwloc_obj_t obj2) { size_t distance = 0; auto is_bridge = [](hwloc_obj_t obj){ return obj && (obj->type == HWLOC_OBJ_BRIDGE); }; auto are_bridges = [is_bridge](hwloc_obj_t obj1, hwloc_obj_t obj2) { return is_bridge(obj1) && is_bridge(obj2); }; while(!is_bridge(obj1)) { obj1 = obj1->parent; } while(!is_bridge(obj2)) { obj2 = obj2->parent; } while (are_bridges(obj1,obj2) && (obj1 != obj2)) { while (are_bridges(obj1, obj2) && (obj1->attr->bridge.depth > obj2->attr->bridge.depth)) { obj1 = obj1->parent; distance++; } while (are_bridges(obj1, obj2) && (obj2->attr->bridge.depth > obj1->attr->bridge.depth)) { obj2 = obj2->parent; distance++; } if (are_bridges(obj1, obj2) && (obj1 != obj2)) { obj1 = obj1->parent; obj2 = obj2->parent; distance += 2; } } if (obj1 != obj2) { // No common PCIe ancestor found. Must be SYS. distance = std::numeric_limits<size_t>::max(); } return distance; } void IbComm::print_distance_matrix(size_t my_rank, std::vector<std::vector<size_t>>& gpu_nic_dist) { // Print distance matrix if (my_rank == 0) { for (size_t n = 0; n < ib_dev_list_.size(); n++) { std::cout << std::setfill(' ') << std::setw(24) << ib_dev_list_[n].dev_name; } std::cout << std::endl; for (size_t g = 0; g < num_gpus_; g++) { for (size_t n = 0; n < ib_dev_list_.size(); n++) { std::cout << std::setfill(' ') << std::setw(24) << gpu_nic_dist[g][n]; } std::cout << std::endl; } } } void IbComm::calculate_gpu_nic_affinity() { // get hwloc GPU objs std::vector<hwloc_obj_t> gpu_list; for (auto& g : device_list_) { auto gpu_obj = hwloc_cudart_get_device_osdev_by_index(topo_, g); if (!gpu_obj) { std::cerr << "unable to get hwloc obj for cuda device " << g << std::endl; exit(1); } gpu_list.push_back(gpu_obj); } // Find GPU-NIC distances std::vector<std::vector<size_t>> gpu_nic_dist(num_gpus_); for (size_t g = 0; g < num_gpus_; g++) { gpu_nic_dist[g].resize(ib_dev_list_.size()); for (size_t n = 0; n < ib_dev_list_.size(); n++) { hwloc_obj_t gpu_obj = gpu_list[g]; gpu_nic_dist[g][n] = calculate_pcie_hier_distance(my_proc_, gpu_obj, ib_dev_list_[n].hwloc_obj); } } // print_distance_matrix(my_proc_, gpu_nic_dist); // Calculate affinities. Only supports at max one NIC per GPU // If we need to support more than one NIC per GPU in future, we can replicate the gpu devs. size_t max_nics = ib_dev_list_.size(); gpu_nic_affinity_.resize(num_gpus_, max_nics); if (num_gpus_ >= ib_dev_list_.size()) { size_t current_nic = 0; for (size_t assigned_gpus = 0; assigned_gpus < num_gpus_; assigned_gpus++) { // Greedy algorithm // Find unassigned gpu with min distance size_t min_distance = std::numeric_limits<size_t>::max(); size_t min_gpu = 0; for (size_t g = 0; g < num_gpus_; g++) { if ((gpu_nic_affinity_[g] == max_nics) && (gpu_nic_dist[g][current_nic] <= min_distance)) { min_distance = gpu_nic_dist[g][current_nic]; min_gpu = g; } } gpu_nic_affinity_[min_gpu] = current_nic; current_nic = (current_nic + 1) % ib_dev_list_.size(); } } else { // still assigns max one NIC per GPU. Just iterate over NICs instead for (size_t g = 0; g < num_gpus_; g++) { size_t min_distance = std::numeric_limits<size_t>::max(); size_t min_nic = 0; for (size_t n = 0; n < ib_dev_list_.size(); n++) { if ((ib_dev_list_[n].num_gpus_assigned == 0) && (gpu_nic_dist[g][n] <= min_distance)) { min_distance = gpu_nic_dist[g][n]; min_nic = n; } } gpu_nic_affinity_[g] = min_nic; } } // Print gpu nic affinities that are picked; if (my_proc_ == 0) { for (size_t g = 0; g < num_gpus_; g++) { auto& ib_dev = ib_dev_list_[gpu_nic_affinity_[g]]; std::ostringstream ost; ost << "GPU-NIC affinity " << g << "-" << ib_dev.dev_name << ":" << ib_dev.dev_port_id; MESSAGE_(ost.str().c_str()); } } // Check gpu nic affinities of other nodes and warn if mismatch char (**gpu_nic_affinity_names)[IBV_SYSFS_NAME_MAX]; gpu_nic_affinity_names = (char (**)[IBV_SYSFS_NAME_MAX])malloc(sizeof(char(*)[IBV_SYSFS_NAME_MAX]) * num_procs_); for (size_t r = 0; r < num_procs_; r++) { gpu_nic_affinity_names[r] = (char (*)[IBV_SYSFS_NAME_MAX])malloc(sizeof(char[IBV_SYSFS_NAME_MAX]) * num_gpus_); } for (size_t g = 0; g < num_gpus_; g++) { auto ib_dev = ib_dev_list_[gpu_nic_affinity_[g]]; std::ostringstream stm; stm << ib_dev.dev_name << ":" << ib_dev.dev_port_id; std::string ib_name = stm.str(); ib_name = ib_name.substr(0, IBV_SYSFS_NAME_MAX); std::strcpy(gpu_nic_affinity_names[my_proc_][g], ib_name.c_str()); } for (size_t r = 0; r < num_procs_; r++) { CK_MPI_THROW_(MPI_Bcast(gpu_nic_affinity_names[r], num_gpus_*sizeof(char[IBV_SYSFS_NAME_MAX]), MPI_BYTE, r, MPI_COMM_WORLD)); } for (size_t r = 0; r < num_procs_; r++) { for (size_t g = 0; g < num_gpus_; g++) { std::string my_ib_name = std::string(gpu_nic_affinity_names[my_proc_][g]); std::string remote_ib_name = std::string(gpu_nic_affinity_names[r][g]); if (my_ib_name != remote_ib_name) { std::cout << "WARNING: Mismatch in mellanox dev names. " << g << " " << my_proc_ << ":" << my_ib_name << " " << r << ":" << remote_ib_name << std::endl; std::cout << "WARNING: Non uniform cluster detected. Performance maybe impacted" << std::endl; } } } for (size_t r = 0; r < num_procs_; r++) { free(gpu_nic_affinity_names[r]); } free(gpu_nic_affinity_names); CK_MPI_THROW_(MPI_Barrier(MPI_COMM_WORLD)); } void IbComm::init_proxy_threads() { proxy_cmd_ = std::make_unique<ProxyCommand>(num_gpus_); proxy_cmd_->reset(); proxy_thread_.resize(num_gpus_); proxy_cfg_.resize(num_gpus_); for (auto& cfg : proxy_cfg_) { cfg = std::make_unique<IbvProxy::InitConfig>(); } for (size_t g = 0; g < num_gpus_; g++) { size_t device_id = device_list_[g]; auto& cfg = proxy_cfg_[g]; cfg->device_id_ = device_id; cfg->global_id_ = my_proc_; cfg->proxy_id_ = g; cfg->ib_dev_ = ib_dev_list_[gpu_nic_affinity_[g]].dev_name; cfg->ib_port_ = ib_dev_list_[gpu_nic_affinity_[g]].dev_port_id; cfg->proxy_cmd_ = proxy_cmd_.get(); cfg->num_gpus_ = num_gpus_; cfg->num_procs_ = num_procs_; cfg->my_proc_ = my_proc_; sched_param param; pthread_attr_t attr; pthread_attr_init (&attr); pthread_attr_getschedparam (&attr, &param); param.sched_priority = sched_get_priority_max(SCHED_FIFO);; pthread_attr_setschedparam (&attr, &param); int ret = pthread_create(&proxy_thread_[g], &attr, &proxy_thread_func, cfg.get()); PROXY_ASSERT(ret == 0); } } // API implementation int IbComm::init(size_t num_procs, size_t num_gpus, size_t my_proc, const std::vector<int>& device_list) { num_procs_ = num_procs; num_gpus_ = num_gpus; my_proc_ = my_proc; device_list_ = device_list; PROXY_ASSERT(num_procs > 1); detect_ib_devs(); calculate_gpu_nic_affinity(); init_proxy_threads(); is_initialized_ = true; return 0; } IbComm::HierA2ACollContext::HierA2ACollContext(IbComm* comm) { CK_CUDA_THROW_(cudaMallocHost(&cmd_storage_, 2*sizeof(size_t))); h_recv_cmd_ptr_ = &cmd_storage_[0]; *h_recv_cmd_ptr_ = 1; size_t num_gpus = comm->num_gpus_; std::generate_n(std::back_inserter(ctx_), num_gpus, []{ return std::make_unique<HierA2ACollContextPerGPU>(); }); d_send_cmd_ = new size_t*[num_gpus]; d_ibv_atomic_ = new size_t*[num_gpus]; d_ibv_atomic_recv_ = new size_t*[num_gpus]; for (size_t g = 0; g < num_gpus; g++) { CK_CUDA_THROW_(cudaSetDevice(comm->device_list_[g])); CK_CUDA_THROW_(cudaEventCreate(&ctx_[g]->event_)); // TODO: collate all storage CK_CUDA_THROW_(cudaMalloc((void**)&d_send_cmd_[g], sizeof(size_t))); size_t init_value = 2; CK_CUDA_THROW_(cudaMemcpy(d_send_cmd_[g], &init_value, sizeof(size_t), cudaMemcpyHostToDevice)); CK_CUDA_THROW_(cudaMalloc((void**)&d_ibv_atomic_[g], MAX_IBV_DEST*sizeof(size_t))); size_t atomic_init_values[MAX_IBV_DEST]; std::fill_n(atomic_init_values, MAX_IBV_DEST, 1); CK_CUDA_THROW_(cudaMemcpy(d_ibv_atomic_[g], atomic_init_values, MAX_IBV_DEST*sizeof(size_t), cudaMemcpyHostToDevice)); CK_CUDA_THROW_(cudaMalloc((void**)&d_ibv_atomic_recv_[g], MAX_IBV_DEST*sizeof(size_t))); std::fill_n(atomic_init_values, MAX_IBV_DEST, 0); CK_CUDA_THROW_(cudaMemcpy(d_ibv_atomic_recv_[g], atomic_init_values, MAX_IBV_DEST*sizeof(size_t), cudaMemcpyHostToDevice)); } barrier_ = std::make_unique<GPUBarrier>(comm->num_gpus_, comm->device_list_); sync_helper_ = std::make_unique<CollSyncHelper>(); } IbComm::HierA2ACollContext::~HierA2ACollContext() { size_t num_gpus = ctx_.size(); if (d_ibv_atomic_recv_) { for (size_t g = 0; g < num_gpus; g++) { cudaFree(d_ibv_atomic_recv_[g]); } delete d_ibv_atomic_recv_; } if (d_ibv_atomic_) { for (size_t g = 0; g < num_gpus; g++) { cudaFree(d_ibv_atomic_[g]); } delete d_ibv_atomic_; } if (d_send_cmd_) { for (size_t g = 0; g < num_gpus; g++) { cudaFree(d_send_cmd_[g]); } delete d_send_cmd_; } if (cmd_storage_) { cudaFree(cmd_storage_); } } IbComm::HierA2ACollContextPerGPU::~HierA2ACollContextPerGPU() { if (d_send_ptrs_) { free(d_send_ptrs_); } if (d_recv_ptrs_) { free(d_recv_ptrs_); } if (d_send_sizes_copy_) { cudaFree(d_send_sizes_copy_); } } // TODO: Initialize these in the constructor for RAI HierA2ACollHandle IbComm::register_hier_a2a_coll(bool skip_barrier) { // std::unique_lock<std::mutex> lock(proxy_cmd_->mutex_); hier_a2a_coll_ctx_.emplace_back(std::make_unique<HierA2ACollContext>(this)); HierA2ACollHandle coll_handle = (HierA2ACollHandle)(hier_a2a_coll_ctx_.size() - 1); auto sync_helper = hier_a2a_v_coll_ctx_[coll_handle]->sync_helper_.get(); M2PHierA2ACollInit coll_init_cmd_(coll_handle, sync_helper, skip_barrier); for (size_t g = 0; g < num_gpus_; g++) { M2PHierA2ACollInit coll_init_cmd_(coll_handle, sync_helper, skip_barrier); HierA2ACollInitCmd cmd = std::make_pair(std::move(coll_init_cmd_), std::move(P2MNull())); proxy_cmd_->cmd_[g] = std::move(cmd); } proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); proxy_cmd_->reset(); return coll_handle; } HierA2AvCollHandle IbComm::register_hier_a2a_v_coll(bool skip_barrier) { // std::unique_lock<std::mutex> lock(proxy_cmd_->mutex_); hier_a2a_v_coll_ctx_.emplace_back(std::make_unique<HierA2ACollContext>(this)); HierA2AvCollHandle coll_handle = (HierA2AvCollHandle)(hier_a2a_v_coll_ctx_.size() - 1); auto sync_helper = hier_a2a_v_coll_ctx_[coll_handle]->sync_helper_.get(); for (size_t g = 0; g < num_gpus_; g++) { M2PHierA2AvCollInit coll_init_cmd_(coll_handle, sync_helper, skip_barrier); HierA2AvCollInitCmd cmd = std::make_pair(std::move(coll_init_cmd_), std::move(P2MNull())); proxy_cmd_->cmd_[g] = std::move(cmd); } proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); proxy_cmd_->reset(); return coll_handle; } void IbComm::set_a2a_coll_stream(HierA2ACollHandle coll, cudaStream_t stream, size_t device_id) { hier_a2a_coll_ctx_[coll]->ctx_[device_id]->stream_ = stream; } void IbComm::set_a2a_coll_stream(HierA2AvCollHandle coll, cudaStream_t stream, size_t device_id) { hier_a2a_v_coll_ctx_[coll]->ctx_[device_id]->stream_ = stream; } void IbComm::set_a2a_coll_buf(HierA2ACollHandle coll, void** send_ptrs, const size_t* send_max_size, void** recv_ptrs, const size_t* recv_max_size, size_t device_id) { auto& coll_ctx = *hier_a2a_coll_ctx_[coll]; if (proxy_cmd_->cmd_[device_id].which() != 0) { ERROR_MESSAGE_("Proxy command is already populated. Don't mix up set API"); exit(1); } proxy_cmd_->cmd_[device_id] = HierA2ABufInitCmd(); HierA2ABufInitCmd& cmd = boost::get<HierA2ABufInitCmd>(proxy_cmd_->cmd_[device_id]); M2PHierA2ABufInit& buf_init = std::get<0>(cmd); auto& gpu_ctx = *coll_ctx.ctx_[device_id]; gpu_ctx.d_send_ptrs_ = (void** )malloc(sizeof(void*)*num_procs_); gpu_ctx.d_recv_ptrs_ = (void** )malloc(sizeof(void*)*num_procs_); memcpy(gpu_ctx.d_send_ptrs_, send_ptrs, sizeof(void*)*num_procs_); memcpy(gpu_ctx.d_recv_ptrs_, recv_ptrs, sizeof(void*)*num_procs_); buf_init.coll_handle_ = coll; buf_init.d_send_ptrs_ = send_ptrs; buf_init.d_recv_ptrs_ = recv_ptrs; buf_init.h_max_send_size_ = send_max_size; buf_init.h_max_recv_size_ = recv_max_size; buf_init.h_recv_cmd_ptr_ = coll_ctx.h_recv_cmd_ptr_; buf_init.d_ibv_atomic_ = coll_ctx.d_ibv_atomic_[device_id]; buf_init.d_ibv_atomic_recv_ = coll_ctx.d_ibv_atomic_recv_[device_id]; } void IbComm::set_a2a_coll_buf(HierA2AvCollHandle coll, void* send_ptrs, const size_t send_max_size, void* recv_ptrs, const size_t recv_max_size, size_t device_id) { auto& coll_ctx = *hier_a2a_v_coll_ctx_[coll]; if (proxy_cmd_->cmd_[device_id].which() != 0) { ERROR_MESSAGE_("Proxy command is already populated. Don't mix up set API"); exit(1); } proxy_cmd_->cmd_[device_id] = HierA2AvBufInitCmd(); HierA2AvBufInitCmd& cmd = boost::get<HierA2AvBufInitCmd>(proxy_cmd_->cmd_[device_id]); M2PHierA2AvBufInit& buf_init = std::get<0>(cmd); auto& gpu_ctx = *coll_ctx.ctx_[device_id]; gpu_ctx.d_send_ptrs_ = (void** )malloc(sizeof(void*)); gpu_ctx.d_recv_ptrs_ = (void** )malloc(sizeof(void*)); gpu_ctx.d_send_ptrs_[0] = send_ptrs; gpu_ctx.d_recv_ptrs_[0] = recv_ptrs; gpu_ctx.h_max_send_size_ = send_max_size; CK_CUDA_THROW_(cudaSetDevice(device_list_[device_id])); // Allocate A2Av send size copy storage CK_CUDA_THROW_(cudaMalloc((void**)(&gpu_ctx.d_send_sizes_copy_), sizeof(size_t)*num_gpus_*num_procs_)); std::vector<size_t> send_sizes(num_gpus_*num_procs_, send_max_size / (num_gpus_*num_procs_)); CK_CUDA_THROW_(cudaMemcpy(gpu_ctx.d_send_sizes_copy_, send_sizes.data(), sizeof(size_t)*num_gpus_*num_procs_, cudaMemcpyHostToDevice)); buf_init.coll_handle_ = coll; buf_init.d_send_ptrs_ = send_ptrs; buf_init.d_recv_ptrs_ = recv_ptrs; buf_init.h_max_send_size_ = send_max_size; buf_init.h_max_recv_size_ = recv_max_size; buf_init.h_recv_cmd_ptr_ = coll_ctx.h_recv_cmd_ptr_; buf_init.d_ibv_atomic_ = coll_ctx.d_ibv_atomic_[device_id]; buf_init.d_ibv_atomic_recv_ = coll_ctx.d_ibv_atomic_recv_[device_id]; } void IbComm::register_a2a_coll_buf(HierA2ACollHandle coll) { // Init command pointers auto& coll_ctx = *hier_a2a_coll_ctx_[coll]; proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); for (size_t g = 0; g < num_gpus_; g++) { HierA2ABufInitCmd& proxy_cmd = boost::get<HierA2ABufInitCmd>(proxy_cmd_->cmd_[g]); auto& buf_init_out = std::get<1>(proxy_cmd); coll_ctx.ctx_[g]->h_send_sizes_ = buf_init_out.h_send_size_; coll_ctx.ctx_[g]->h_recv_sizes_ = buf_init_out.h_recv_size_; } proxy_cmd_->reset(); } void IbComm::register_a2a_coll_buf(HierA2AvCollHandle coll) { // Init command pointers auto& coll_ctx = *hier_a2a_v_coll_ctx_[coll]; proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); for (size_t g = 0; g < num_gpus_; g++) { HierA2AvBufInitCmd& proxy_cmd = boost::get<HierA2AvBufInitCmd>(proxy_cmd_->cmd_[g]); auto& buf_init_out = std::get<1>(proxy_cmd); coll_ctx.ctx_[g]->h_send_sizes_ = buf_init_out.h_send_size_; coll_ctx.ctx_[g]->h_recv_sizes_ = buf_init_out.h_recv_size_; } proxy_cmd_->reset(); } static __global__ void update_sizes( size_t* __restrict__ h_send_sizes, size_t* __restrict__ h_recv_sizes, size_t* __restrict__ d_send_sizes_copy, const size_t* __restrict__ d_send_sizes, const size_t* __restrict__ d_recv_sizes, size_t size) { for (size_t i = blockIdx.x*blockDim.x + threadIdx.x; i < size; i += blockDim.x*gridDim.x) { size_t send_size = d_send_sizes[i]; h_send_sizes[i] = send_size; d_send_sizes_copy[i] = send_size; h_recv_sizes[i] = d_recv_sizes[i]; } } void IbComm::update_a2a_coll_sizes(HierA2AvCollHandle coll, const size_t* d_send_sizes, const size_t* d_recv_sizes, cudaStream_t dep_stream, size_t device_id) { auto& ctx = *hier_a2a_v_coll_ctx_[coll]; auto& gpu_ctx = *ctx.ctx_[device_id]; CK_CUDA_THROW_(cudaEventRecord(gpu_ctx.event_, dep_stream)); CK_CUDA_THROW_(cudaStreamWaitEvent(gpu_ctx.stream_, gpu_ctx.event_)); constexpr size_t MAX_TPB = 256; size_t n_blocks = ceildiv<size_t>(num_procs_*num_gpus_, MAX_TPB); update_sizes<<<n_blocks, MAX_TPB, 0, gpu_ctx.stream_>>>( gpu_ctx.h_send_sizes_, gpu_ctx.h_recv_sizes_, gpu_ctx.d_send_sizes_copy_, d_send_sizes, d_recv_sizes, num_procs_*num_gpus_); } // Local first distribution TODO: node first might be efficient static __global__ void update_pre_intra_sizes( size_t* __restrict__ h_send_sizes, size_t* __restrict__ d_send_sizes, size_t** __restrict__ d_pre_intra_send_sizes, size_t my_gpu_id, size_t num_gpus, size_t num_procs) { // Thread blocks = num procs // Threads = num gpus int gpu_id = threadIdx.x; int proc_id = blockIdx.x; size_t send_size = d_pre_intra_send_sizes[gpu_id][proc_id*num_gpus + my_gpu_id]; size_t send_indx = proc_id*num_gpus + gpu_id; h_send_sizes[send_indx] = send_size; d_send_sizes[send_indx] = send_size; // TODO: uncomment below for cuda graph support // __threadfence_system(); } void IbComm::pre_intra_update_a2a_coll_sizes(HierA2AvCollHandle coll, size_t** d_pre_intra_send_sizes, cudaStream_t dep_stream, size_t device_id) { auto& ctx = *hier_a2a_v_coll_ctx_[coll]; auto& gpu_ctx = *ctx.ctx_[device_id]; CK_CUDA_THROW_(cudaEventRecord(gpu_ctx.event_, dep_stream)); CK_CUDA_THROW_(cudaStreamWaitEvent(gpu_ctx.stream_, gpu_ctx.event_)); ctx.barrier_->sync_all_gpus(gpu_ctx.stream_, device_id); update_pre_intra_sizes<<<num_procs_, num_gpus_, 0, gpu_ctx.stream_>>>( gpu_ctx.h_send_sizes_, gpu_ctx.d_send_sizes_copy_, d_pre_intra_send_sizes, device_id, num_gpus_, num_procs_); } void IbComm::set_ready_to_transfer() { PROXY_ASSERT_MSG(!is_ready_to_transfer_, "Ready to transfer is already set") for(size_t g = 0; g < num_gpus_; g++) { proxy_cmd_->cmd_[g] = ProxyStateTransitionCmd(); ProxyStateTransitionCmd& cmd_t = boost::get<ProxyStateTransitionCmd>(proxy_cmd_->cmd_[g]); M2PStateTransition& cmd = std::get<0>(cmd_t); cmd.state_ = IbvProxyState::READY_TO_TRANSFER; } proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); proxy_cmd_->reset(); is_ready_to_transfer_ = true; } template<typename T> static __global__ void copy_local( const T* __restrict__ input_, T* __restrict__ output_, size_t size) { for (size_t i = blockIdx.x*blockDim.x + threadIdx.x; i < size; i += blockDim.x*gridDim.x) { output_[i] = input_[i]; } } template<typename T> static __global__ void copy_local_segmented( const T* __restrict__ input_, T* __restrict__ output_, const size_t* __restrict__ sizes, int num_segments, size_t offset) { for (int s = 0; s < num_segments; s++) { int segment_offset = s*offset; size_t num_elems = sizes[s] / sizeof(T); for (size_t i = blockIdx.x*blockDim.x + threadIdx.x; i < num_elems; i += blockDim.x*gridDim.x) { output_[segment_offset + i] = input_[segment_offset + i]; } } } static __global__ void wait_completion(size_t* d_ibv_cmd, size_t* atomic, int nDest, int myDest, int device_id) { if ((threadIdx.x < nDest) && (threadIdx.x != myDest)) { size_t curr_count = *(volatile size_t*)d_ibv_cmd; // clock_t s=clock64(); while(*((volatile size_t*) &atomic[threadIdx.x]) < (curr_count - 1)) { // if (clock64()-s > 2000000000) { // printf("wait completion expected: %llu %llu, got %llu from_dest %d my_dest %d %d n_dest %d\n", // curr_count, (curr_count - 1), atomic[threadIdx.x], threadIdx.x, myDest, device_id, nDest); // s = clock64(); // } } } __syncthreads(); } template<typename T> void IbComm::post_send_command_a2a<T>(HierA2ACollHandle coll, cudaStream_t dep_stream, size_t device_id) { auto& ctx = *hier_a2a_coll_ctx_[coll]; auto& gpu_ctx = *ctx.ctx_[device_id]; CK_CUDA_THROW_(cudaEventRecord(gpu_ctx.event_, dep_stream)); CK_CUDA_THROW_(cudaStreamWaitEvent(gpu_ctx.stream_, gpu_ctx.event_)); ctx.barrier_->sync_all_gpus_report_host_and_inc(ctx.d_send_cmd_[device_id], ctx.h_recv_cmd_ptr_, gpu_ctx.stream_, device_id); size_t num_elems = gpu_ctx.h_send_sizes_[my_proc_] / sizeof(T); // TODO: This is not capturable as we using sizes from host copy_local<T><<<96, 1024, 0, gpu_ctx.stream_>>>( (T*)gpu_ctx.d_send_ptrs_[my_proc_], (T*)gpu_ctx.d_recv_ptrs_[my_proc_], num_elems); wait_completion<<<1, 32, 0, gpu_ctx.stream_>>>( ctx.d_send_cmd_[device_id], ctx.d_ibv_atomic_[device_id], num_procs_, my_proc_, device_id); } template <typename T> void IbComm::post_send_command_a2a<T>(HierA2AvCollHandle coll, cudaStream_t dep_stream, size_t device_id) { auto& ctx = *hier_a2a_v_coll_ctx_[coll]; auto& gpu_ctx = *ctx.ctx_[device_id]; CK_CUDA_THROW_(cudaEventRecord(gpu_ctx.event_, dep_stream)); CK_CUDA_THROW_(cudaStreamWaitEvent(gpu_ctx.stream_, gpu_ctx.event_)); ctx.barrier_->sync_all_gpus_report_host_and_inc(ctx.d_send_cmd_[device_id], ctx.h_recv_cmd_ptr_, gpu_ctx.stream_, device_id); // TODO: Change it to use max SMs size_t* copy_sizes = &gpu_ctx.d_send_sizes_copy_[my_proc_*num_gpus_]; size_t offset = gpu_ctx.h_max_send_size_ / (num_procs_*num_gpus_) / sizeof(T); // TODO: This is not good, we are reading the sizes from host, create a device copy! copy_local_segmented<T><<<96, 1024, 0, gpu_ctx.stream_>>>( (T*)gpu_ctx.d_send_ptrs_[0] + (my_proc_*num_gpus_*offset), (T*)gpu_ctx.d_recv_ptrs_[0] + (my_proc_*num_gpus_*offset), copy_sizes, num_gpus_, offset); wait_completion<<<1, 32, 0, gpu_ctx.stream_>>>( ctx.d_send_cmd_[device_id], ctx.d_ibv_atomic_[device_id], num_procs_, my_proc_, device_id); } static __global__ void wait_recv(size_t* d_ibv_cmd, size_t* atomic, int nDest, int myDest) { if ((threadIdx.x < nDest) && (threadIdx.x != myDest)) { size_t curr_count = *d_ibv_cmd; while(*((volatile size_t*) &atomic[threadIdx.x]) < (curr_count - 2)) {} } __syncthreads(); } void IbComm::wait_global_recv_async(HierA2ACollHandle coll, size_t device_id) { auto& ctx = *hier_a2a_coll_ctx_[coll]; auto& gpu_ctx = *ctx.ctx_[device_id]; wait_recv<<<1, 32, 0, gpu_ctx.stream_>>>(ctx.d_send_cmd_[device_id], ctx.d_ibv_atomic_recv_[device_id], num_procs_, my_proc_); } void IbComm::wait_global_recv_async(HierA2AvCollHandle coll, size_t device_id) { auto& ctx = *hier_a2a_v_coll_ctx_[coll]; auto& gpu_ctx = *ctx.ctx_[device_id]; wait_recv<<<1, 32, 0, gpu_ctx.stream_>>>(ctx.d_send_cmd_[device_id], ctx.d_ibv_atomic_recv_[device_id], num_procs_, my_proc_); } template void IbComm::post_send_command_a2a<__half>(HierA2ACollHandle coll, cudaStream_t dep_stream, size_t device_id); template void IbComm::post_send_command_a2a <float>(HierA2ACollHandle coll, cudaStream_t dep_stream, size_t device_id); template void IbComm::post_send_command_a2a <uint32_t>(HierA2ACollHandle coll, cudaStream_t dep_stream, size_t device_id); template void IbComm::post_send_command_a2a <uint16_t>(HierA2ACollHandle coll, cudaStream_t dep_stream, size_t device_id); template void IbComm::post_send_command_a2a<__half>(HierA2AvCollHandle coll, cudaStream_t dep_stream, size_t device_id); template void IbComm::post_send_command_a2a <float>(HierA2AvCollHandle coll, cudaStream_t dep_stream, size_t device_id); template void IbComm::post_send_command_a2a <uint32_t>(HierA2AvCollHandle coll, cudaStream_t dep_stream, size_t device_id); template void IbComm::post_send_command_a2a <uint16_t>(HierA2AvCollHandle coll, cudaStream_t dep_stream, size_t device_id); void IbComm::finalize() { if (!is_initialized_) { return; } if (!is_ready_to_transfer_) { for(size_t g = 0; g < num_gpus_; g++) { proxy_cmd_->cmd_[g] = ProxyStateTransitionCmd(); ProxyStateTransitionCmd& cmd_t = boost::get<ProxyStateTransitionCmd>(proxy_cmd_->cmd_[g]); M2PStateTransition& cmd = std::get<0>(cmd_t); cmd.state_ = IbvProxyState::DESTROY; } proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); proxy_cmd_->reset(); } proxy_cmd_->set_destroy(); for (size_t g = 0; g < num_gpus_; g++) { int ret = pthread_join(proxy_thread_[g], NULL); PROXY_ASSERT(ret == 0); } is_finalized_ = true; } IbComm::~IbComm() { if (!is_finalized_) { finalize(); } } } #endif