hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
2ea37d6f38b23c6e5b39d121e8dcd64869aeee1e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "utils.h" __global__ void segscan_kernel(int spanSize, int workPerwarp, int listlen, int *gpuInput, int *gpuInputBuff, int *gpuFlag, int *gpuType, int stride) { int blockSize = blockDim.x; int threadidKernal = blockIdx.x * blockSize + threadIdx.x; //printf("warp num %d span size %d\n", warpNum,spanSize); int warpId = threadidKernal / spanSize; int laneId = threadidKernal % spanSize; //int beg = gbase * spanSize + threadidKernal; int beg = workPerwarp * warpId + laneId; int end = min(listlen, beg + workPerwarp); int spanIndexInWarp = 0; int i; for (i = beg, spanIndexInWarp = 0; i < end; i += spanSize, spanIndexInWarp++) { //printf("global id %d warpId %d spanIndexInWarp %d taskValue %d flag %d type %d\n", // i, warpId, spanIndexInWarp, gpuInput[i], gpuFlag[i], gpuType[i]); //get value from buffer if (laneId >= stride && gpuType[i] == gpuType[i - stride]) { gpuInput[i] = gpuInputBuff[i] + gpuInputBuff[i - stride]; } } memcpy(gpuInputBuff, gpuInput, sizeof(int) * listlen); } int main(void) { printf("seg scan test\n"); //set up the configuration int blockNum = 2; int blockSize = 64; int eleNum = 1300; int spanSize = 32; int warpNum = blockNum * blockSize / spanSize; int workPerwarp; int reminder; int extra; int listlen = eleNum; if (listlen % warpNum == 0) { workPerwarp = listlen / warpNum; } else { reminder = listlen % warpNum; if (reminder % warpNum == 0) { extra = reminder / warpNum; } else { extra = (reminder / warpNum) + 1; } workPerwarp = extra + (listlen / warpNum); } //input vector int i, j; int *input = (int *)malloc(sizeof(int) * eleNum); int *flag = (int *)malloc(sizeof(int) * eleNum); int *type = (int *)malloc(sizeof(int) * eleNum); int segLen=12; for (i = 0, j = 1; i < eleNum; i++, j++) { input[i] = 1; if (j % segLen == 0) { //the last element for every segment is zero flag[i] = 1; } type[i] = i / segLen; } //gpu allocation int *gpuInput; int *gpuInputBuff; int *gpuFlag; int *gpuType; hipMallocManaged((void **)&gpuInput, sizeof(int) * eleNum); hipMemcpy(gpuInput, input, sizeof(int) * eleNum, hipMemcpyHostToDevice); hipMallocManaged((void **)&gpuInputBuff, sizeof(int) * eleNum); hipMemcpy(gpuInputBuff, input, sizeof(int) * eleNum, hipMemcpyHostToDevice); hipMallocManaged((void **)&gpuFlag, sizeof(int) * eleNum); hipMemcpy(gpuFlag, flag, sizeof(int) * eleNum, hipMemcpyHostToDevice); hipMallocManaged((void **)&gpuType, sizeof(int) * eleNum); hipMemcpy(gpuType, type, sizeof(int) * eleNum, hipMemcpyHostToDevice); printf("init configuration ok\n"); int stride = 1; setTime(); for (stride = 1; stride <= eleNum; stride *= 2) { hipLaunchKernelGGL(( segscan_kernel), dim3(blockNum), dim3(blockSize), 0, 0, spanSize, workPerwarp, eleNum, gpuInput, gpuInputBuff, gpuFlag, gpuType, stride); } hipMemcpy(input, gpuInput, sizeof(int) * eleNum, hipMemcpyDeviceToHost); for (i = 0; i < eleNum; i++) { printf("checkValue index %d value %d\n", i, input[i]); //check the flag and get the last element for the segmentation, this value is useful for some cases } //free operations int finishTime=getTime(); printf("Took %d ms.\n",finishTime); hipDeviceSynchronize(); return 0; }
2ea37d6f38b23c6e5b39d121e8dcd64869aeee1e.cu
#include <stdio.h> #include "utils.h" __global__ void segscan_kernel(int spanSize, int workPerwarp, int listlen, int *gpuInput, int *gpuInputBuff, int *gpuFlag, int *gpuType, int stride) { int blockSize = blockDim.x; int threadidKernal = blockIdx.x * blockSize + threadIdx.x; //printf("warp num %d span size %d\n", warpNum,spanSize); int warpId = threadidKernal / spanSize; int laneId = threadidKernal % spanSize; //int beg = gbase * spanSize + threadidKernal; int beg = workPerwarp * warpId + laneId; int end = min(listlen, beg + workPerwarp); int spanIndexInWarp = 0; int i; for (i = beg, spanIndexInWarp = 0; i < end; i += spanSize, spanIndexInWarp++) { //printf("global id %d warpId %d spanIndexInWarp %d taskValue %d flag %d type %d\n", // i, warpId, spanIndexInWarp, gpuInput[i], gpuFlag[i], gpuType[i]); //get value from buffer if (laneId >= stride && gpuType[i] == gpuType[i - stride]) { gpuInput[i] = gpuInputBuff[i] + gpuInputBuff[i - stride]; } } memcpy(gpuInputBuff, gpuInput, sizeof(int) * listlen); } int main(void) { printf("seg scan test\n"); //set up the configuration int blockNum = 2; int blockSize = 64; int eleNum = 1300; int spanSize = 32; int warpNum = blockNum * blockSize / spanSize; int workPerwarp; int reminder; int extra; int listlen = eleNum; if (listlen % warpNum == 0) { workPerwarp = listlen / warpNum; } else { reminder = listlen % warpNum; if (reminder % warpNum == 0) { extra = reminder / warpNum; } else { extra = (reminder / warpNum) + 1; } workPerwarp = extra + (listlen / warpNum); } //input vector int i, j; int *input = (int *)malloc(sizeof(int) * eleNum); int *flag = (int *)malloc(sizeof(int) * eleNum); int *type = (int *)malloc(sizeof(int) * eleNum); int segLen=12; for (i = 0, j = 1; i < eleNum; i++, j++) { input[i] = 1; if (j % segLen == 0) { //the last element for every segment is zero flag[i] = 1; } type[i] = i / segLen; } //gpu allocation int *gpuInput; int *gpuInputBuff; int *gpuFlag; int *gpuType; cudaMallocManaged((void **)&gpuInput, sizeof(int) * eleNum); cudaMemcpy(gpuInput, input, sizeof(int) * eleNum, cudaMemcpyHostToDevice); cudaMallocManaged((void **)&gpuInputBuff, sizeof(int) * eleNum); cudaMemcpy(gpuInputBuff, input, sizeof(int) * eleNum, cudaMemcpyHostToDevice); cudaMallocManaged((void **)&gpuFlag, sizeof(int) * eleNum); cudaMemcpy(gpuFlag, flag, sizeof(int) * eleNum, cudaMemcpyHostToDevice); cudaMallocManaged((void **)&gpuType, sizeof(int) * eleNum); cudaMemcpy(gpuType, type, sizeof(int) * eleNum, cudaMemcpyHostToDevice); printf("init configuration ok\n"); int stride = 1; setTime(); for (stride = 1; stride <= eleNum; stride *= 2) { segscan_kernel<<<blockNum, blockSize>>>(spanSize, workPerwarp, eleNum, gpuInput, gpuInputBuff, gpuFlag, gpuType, stride); } cudaMemcpy(input, gpuInput, sizeof(int) * eleNum, cudaMemcpyDeviceToHost); for (i = 0; i < eleNum; i++) { printf("checkValue index %d value %d\n", i, input[i]); //check the flag and get the last element for the segmentation, this value is useful for some cases } //free operations int finishTime=getTime(); printf("Took %d ms.\n",finishTime); cudaDeviceSynchronize(); return 0; }
463b7fe0841ab2dc9867fd9dda643c91f753f00c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void advectParticles_OGL(float2 *part, float2 *v, int dx, int dy, float dt, int lb, size_t pitch) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; // gtidx is the domain location in x for this thread float2 pterm, vterm; if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fj = fi * dx + gtidx; pterm = part[fj]; int xvi = ((int)(pterm.x * dx)); int yvi = ((int)(pterm.y * dy)); vterm = *((float2*)((char*)v + yvi * pitch) + xvi); pterm.x += dt * vterm.x; pterm.x = pterm.x - (int)pterm.x; pterm.x += 1.f; pterm.x = pterm.x - (int)pterm.x; pterm.y += dt * vterm.y; pterm.y = pterm.y - (int)pterm.y; pterm.y += 1.f; pterm.y = pterm.y - (int)pterm.y; part[fj] = pterm; } } // If this thread is inside the domain in Y } // If this thread is inside the domain in X }
463b7fe0841ab2dc9867fd9dda643c91f753f00c.cu
#include "includes.h" __global__ void advectParticles_OGL(float2 *part, float2 *v, int dx, int dy, float dt, int lb, size_t pitch) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; // gtidx is the domain location in x for this thread float2 pterm, vterm; if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fj = fi * dx + gtidx; pterm = part[fj]; int xvi = ((int)(pterm.x * dx)); int yvi = ((int)(pterm.y * dy)); vterm = *((float2*)((char*)v + yvi * pitch) + xvi); pterm.x += dt * vterm.x; pterm.x = pterm.x - (int)pterm.x; pterm.x += 1.f; pterm.x = pterm.x - (int)pterm.x; pterm.y += dt * vterm.y; pterm.y = pterm.y - (int)pterm.y; pterm.y += 1.f; pterm.y = pterm.y - (int)pterm.y; part[fj] = pterm; } } // If this thread is inside the domain in Y } // If this thread is inside the domain in X }
c2cd98afa2d3a9ee09ea6131d859c0626a308d7e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "isinglib2.h" #define DEBUG 0 __global__ void mykernel(spintype *s, float *ran, float * coupling, int n, int dim, float temperature) { int r,i; float old_energy, new_energy; float test; r = ran[threadIdx.x] *n*dim; old_energy = 0; for (i=0; i < s[r].n_neigh; i++) { old_energy -= coupling[s[r].neigh_couple[i]]* s[s[r].neighbours[i]].s; } new_energy = 0; s[r].s = -s[r].s; for (i=0; i < s[r].n_neigh; i++) { new_energy -= coupling[s[r].neigh_couple[i]]* s[s[r].neighbours[i]].s; } if (ran[512+threadIdx.x] > exp(-(new_energy -old_energy)/temperature)) s[r].s = - s[r].s; } int main() { int n=50, dim=2; int i; spintype *h_s, *d_s; float *h_r, *d_r; h_s = setup(1, 50, 2); float coup[3] = {-1,-1,-1}; double coupl[3] = {-1,-1,-1}; float *d_coupl; coupling = coupl; h_r = (float*)malloc(1024*sizeof(float)); hipMalloc(&d_r, 1024*sizeof(float)); for (i =0; i < 1024; i ++) { h_r[i] = (float) rand()/RAND_MAX; } hipMalloc(&d_coupl, 3*sizeof(float)); hipMalloc(&d_s, pow(n,dim)*sizeof(spintype)); hipMemcpy(d_s, h_s, pow(n,dim)*sizeof(spintype), hipMemcpyHostToDevice); hipMemcpy(d_r, h_r, 1024*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_coupl, coup, pow(n,dim)*sizeof(spintype), hipMemcpyHostToDevice); hipLaunchKernelGGL(( mykernel), dim3(512),dim3(512), 0, 0, d_s, d_r, d_coupl, n, dim, 1.0); for (i =0; i < 1024; i ++) { h_r[i] = (float) rand()/RAND_MAX; } hipMemcpy(d_r, h_r, 1024*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( mykernel), dim3(512),dim3(512), 0, 0, d_s, d_r, d_coupl, n, dim, 1.0); for (i =0; i < 1024; i ++) { h_r[i] = (float) rand()/RAND_MAX; } hipMemcpy(d_r, h_r, pow(n,dim)*sizeof(spintype), hipMemcpyHostToDevice); hipLaunchKernelGGL(( mykernel), dim3(512),dim3(512), 0, 0, d_s, d_r, d_coupl, n, dim, 1.0); for (i =0; i < 1024; i ++) { h_r[i] = (float) rand()/RAND_MAX; } hipMemcpy(d_r, h_r, pow(n,dim)*sizeof(spintype), hipMemcpyHostToDevice); hipLaunchKernelGGL(( mykernel), dim3(512),dim3(512), 0, 0, d_s, d_r, d_coupl, n, dim, 1.0); for (i =0; i < 1024; i ++) { h_r[i] = (float) rand()/RAND_MAX; } hipMemcpy(d_r, h_r, pow(n,dim)*sizeof(spintype), hipMemcpyHostToDevice); hipLaunchKernelGGL(( mykernel), dim3(512),dim3(512), 0, 0, d_s, d_r, d_coupl, n, dim, 1.0); for (i =0; i < 1024; i ++) { h_r[i] = (float) rand()/RAND_MAX; } hipMemcpy(d_r, h_r, pow(n,dim)*sizeof(spintype), hipMemcpyHostToDevice); hipLaunchKernelGGL(( mykernel), dim3(512),dim3(512), 0, 0, d_s, d_r, d_coupl, n, dim, 1.0); for (i =0; i < 1024; i ++) { h_r[i] = (float) rand()/RAND_MAX; } hipMemcpy(d_r, h_r, pow(n,dim)*sizeof(spintype), hipMemcpyHostToDevice); hipLaunchKernelGGL(( mykernel), dim3(512),dim3(512), 0, 0, d_s, d_r, d_coupl, n, dim, 1.0); hipMemcpy(d_r, h_r, pow(n,dim)*sizeof(spintype), hipMemcpyHostToDevice); hipMemcpy(h_s, d_s, pow(n,dim)*sizeof(spintype), hipMemcpyDeviceToHost); printf("Got energy %lf\n ", energy_calc(h_s, n, dim, 0)); hipFree(d_s); cleanup(h_s, n, dim); return(0); } spintype * setup(int type, int n, int dim) { spintype *s; s = (spintype*)malloc(pow(n,dim)*sizeof(spintype)); if (s ==NULL) { printf("Couldn;t allocate memory\n"); exit(EXIT_FAILURE); } if (type == 1) { setupSqrSystem(s,n, dim); } else { setupTriSystem(s,n,dim); } initSpins(s,n,dim); return(s); } void cleanup(spintype * s, int n, int dim) { int i; for (i = 0; i < pow(n,dim); i ++) { free(s[i].neigh_couple); free(s[i].neighbours); } free(s); } double sumover(spintype *s, int n, int dim) { int i; double result; result = 0; //printf("%d, %d, %lf\n", n, dim, pow(n,dim)); for (i = 0; i < pow(n,dim); i++) { result += (double) s[i].s; } return result; } double magorder(spintype *s, int n, int dim) { int i; double result; if (coupling[0] > 0) { DEBUGLINE printf("Calling ferro order\n"); return(sumover(s,n,dim)); } result = 0; DEBUGLINE printf("Running Anti order routine\n"); for (i = 0; i < pow(n,dim); i++) { result += (i%2 == 0) ? s[i].s :-s[i].s; } return result; } void setupSqrSystem(spintype *s, int n, int dim) { int i,j,k; int curr_spin; for (i = 0; i < n; i ++) { for(j = 0; j < n; j++) { for (k = 0; k < n; k++ ) { if ( dim <= 2) k = 0; curr_spin = ai(i,j,k,n); s[curr_spin].n_neigh = 2*dim; s[curr_spin].neighbours = (int*)malloc(sizeof(int)*s[curr_spin].n_neigh); s[curr_spin].neigh_couple = (int*)malloc(sizeof(int)*2*dim); s[curr_spin].neighbours[0] = (i < (n-1)) ? ai(i+1,j,k,n) : ai(0,j,k,n); s[curr_spin].neighbours[1] = ( i == 0) ? ai(n-1,j,k,n) : ai(i-1,j,k,n); s[curr_spin].neigh_couple[0] = 0; s[curr_spin].neigh_couple[1] = 0; if(dim >= 2) { s[curr_spin].neighbours[2] = (j < (n-1)) ? ai(i,j+1,k,n) : ai(i,0,k,n); s[curr_spin].neighbours[3] = (j == 0) ? ai(i,n-1,k,n) : ai(i,j-1,k,n); s[curr_spin].neigh_couple[2] = 1; s[curr_spin].neigh_couple[3] = 1; } if(dim >= 3) { s[curr_spin].neighbours[4] = (k < (n-1)) ? ai(i,j,k+1,n) : ai(i,j,0,n); s[curr_spin].neighbours[5] = (k == 0) ? ai(i,j,n-1,n): ai(i,j,k-1,n); s[curr_spin].neigh_couple[4] = 2; s[curr_spin].neigh_couple[5] = 2; } if (dim <= 2) k = n; } } } } void setupTriSystem(spintype *s, int n, int dim) { int i,j,k; int curr_spin; if (dim < 2) { printf("Invalid Dimension for Triangular lattice... defaulting to square\n"); setupSqrSystem(s,n,dim); return; } for (i = 0; i < n; i ++) { for(j = 0; j < n; j++) { for (k = 0; k < n; k++ ) { if ( dim == 2) k = 0; curr_spin = ai(i,j,k,n); /* Initialise Arrays containing Neighbours and coupling info*/ s[curr_spin].n_neigh = 2*dim + 2; s[curr_spin].neighbours = (int*)malloc(sizeof(int)*s[curr_spin].n_neigh); s[curr_spin].neigh_couple = (int*)malloc(sizeof(int)*(2*dim + 2)); /*Neighbours on a line*/ s[curr_spin].neighbours[0] = (i < (n-1)) ? ai(i+1,j,k,n) : ai(0,j,k,n); s[curr_spin].neighbours[1] = ( i == 0) ? ai(n-1,j,k,n) : ai(i-1,j,k,n); s[curr_spin].neigh_couple[0] = 0; s[curr_spin].neigh_couple[1] = 0; if(dim >= 2) { /* neighbours in a plane */ s[curr_spin].neighbours[2] = (j < (n-1)) ? ai(i,j+1,k,n) : ai(i,0,k,n); s[curr_spin].neighbours[3] = (j == 0) ? ai(i,n-1,k,n) : ai(i,j-1,k,n); s[curr_spin].neigh_couple[2] = 1; s[curr_spin].neigh_couple[3] = 1; /*Diagonal Neighbours*/ s[curr_spin].neighbours[4] = (i<(n-1) && j<(n-1)) ? ai(i+1,j+1,k,n) : ai(0,0,k,n); s[curr_spin].neighbours[4] = (i>=(n-1) && j<(n-1)) ? ai(0,j+1,k,n) : s[curr_spin].neighbours[4]; s[curr_spin].neighbours[4] = (i<(n-1) && j >=(n-1)) ? ai(i+1,0,k,n) :s[curr_spin].neighbours[4]; s[curr_spin].neighbours[5] = (j == 0 && i == 0) ? ai(n-1,n-1,k,n) : ai(i-1,j-1,k,n); s[curr_spin].neighbours[5] = (j != 0 && i == 0) ? ai(n-1,j-1,k,n) : s[curr_spin].neighbours[5]; s[curr_spin].neighbours[5] = (j == 0 && i != 0) ? ai(i-1,n-1,k,n) : s[curr_spin].neighbours[5]; s[curr_spin].neigh_couple[4] = 2; s[curr_spin].neigh_couple[5] = 2; } if(dim >= 3) { /* Links between Planes */ s[curr_spin].neighbours[6] = (k < (n-1)) ? ai(i,j,k+1,n) : ai(i,j,0,n); s[curr_spin].neighbours[7] = (k == 0) ? ai(i,j,n-1,n): ai(i,j,k-1,n); s[curr_spin].neigh_couple[6] = 3; s[curr_spin].neigh_couple[7] = 3; } if (dim <= 2) k = n; } } } } int ai(int i, int j, int k, int n) { return (i + j*n + n*n*k); } void initSpins(spintype *s, int n, int dim) { int r; int i,j; j = pow(n,dim); for (i = 0; i < j; i++) { r = rand(); s[i].s = 0; s[i].s = (r <= RAND_MAX/2)? 1:-1; if (s[i].s == 0) { printf("Error: rand gave: %d\n",r); exit(1); } } // printf("Spins are good\n"); /* trivial check to make sure all spins initialized*/ for(i = 0; i< j; i ++) { if (s[i].s == 0 ) { printf("Error: Initalisation failed.\n"); printf("Error: Spin %d == 0\n", i); exit(1); } } } double energy_calc(spintype * s, int n, int dim, double field) { int i,l,j; double result; result = 0; j = pow(n,dim); for (i=0; i < j; i ++) { for (l = 0; l < s[i].n_neigh; l ++) { result -= 0.5*coupling[s[i].neigh_couple[l]] * s[i].s * s[s[i].neighbours[l]].s ; } result += -s[i].s * field; } result = (double) result / pow(n,dim); return result; }
c2cd98afa2d3a9ee09ea6131d859c0626a308d7e.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include "isinglib2.h" #define DEBUG 0 __global__ void mykernel(spintype *s, float *ran, float * coupling, int n, int dim, float temperature) { int r,i; float old_energy, new_energy; float test; r = ran[threadIdx.x] *n*dim; old_energy = 0; for (i=0; i < s[r].n_neigh; i++) { old_energy -= coupling[s[r].neigh_couple[i]]* s[s[r].neighbours[i]].s; } new_energy = 0; s[r].s = -s[r].s; for (i=0; i < s[r].n_neigh; i++) { new_energy -= coupling[s[r].neigh_couple[i]]* s[s[r].neighbours[i]].s; } if (ran[512+threadIdx.x] > exp(-(new_energy -old_energy)/temperature)) s[r].s = - s[r].s; } int main() { int n=50, dim=2; int i; spintype *h_s, *d_s; float *h_r, *d_r; h_s = setup(1, 50, 2); float coup[3] = {-1,-1,-1}; double coupl[3] = {-1,-1,-1}; float *d_coupl; coupling = coupl; h_r = (float*)malloc(1024*sizeof(float)); cudaMalloc(&d_r, 1024*sizeof(float)); for (i =0; i < 1024; i ++) { h_r[i] = (float) rand()/RAND_MAX; } cudaMalloc(&d_coupl, 3*sizeof(float)); cudaMalloc(&d_s, pow(n,dim)*sizeof(spintype)); cudaMemcpy(d_s, h_s, pow(n,dim)*sizeof(spintype), cudaMemcpyHostToDevice); cudaMemcpy(d_r, h_r, 1024*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_coupl, coup, pow(n,dim)*sizeof(spintype), cudaMemcpyHostToDevice); mykernel<<<512,512>>>(d_s, d_r, d_coupl, n, dim, 1.0); for (i =0; i < 1024; i ++) { h_r[i] = (float) rand()/RAND_MAX; } cudaMemcpy(d_r, h_r, 1024*sizeof(float), cudaMemcpyHostToDevice); mykernel<<<512,512>>>(d_s, d_r, d_coupl, n, dim, 1.0); for (i =0; i < 1024; i ++) { h_r[i] = (float) rand()/RAND_MAX; } cudaMemcpy(d_r, h_r, pow(n,dim)*sizeof(spintype), cudaMemcpyHostToDevice); mykernel<<<512,512>>>(d_s, d_r, d_coupl, n, dim, 1.0); for (i =0; i < 1024; i ++) { h_r[i] = (float) rand()/RAND_MAX; } cudaMemcpy(d_r, h_r, pow(n,dim)*sizeof(spintype), cudaMemcpyHostToDevice); mykernel<<<512,512>>>(d_s, d_r, d_coupl, n, dim, 1.0); for (i =0; i < 1024; i ++) { h_r[i] = (float) rand()/RAND_MAX; } cudaMemcpy(d_r, h_r, pow(n,dim)*sizeof(spintype), cudaMemcpyHostToDevice); mykernel<<<512,512>>>(d_s, d_r, d_coupl, n, dim, 1.0); for (i =0; i < 1024; i ++) { h_r[i] = (float) rand()/RAND_MAX; } cudaMemcpy(d_r, h_r, pow(n,dim)*sizeof(spintype), cudaMemcpyHostToDevice); mykernel<<<512,512>>>(d_s, d_r, d_coupl, n, dim, 1.0); for (i =0; i < 1024; i ++) { h_r[i] = (float) rand()/RAND_MAX; } cudaMemcpy(d_r, h_r, pow(n,dim)*sizeof(spintype), cudaMemcpyHostToDevice); mykernel<<<512,512>>>(d_s, d_r, d_coupl, n, dim, 1.0); cudaMemcpy(d_r, h_r, pow(n,dim)*sizeof(spintype), cudaMemcpyHostToDevice); cudaMemcpy(h_s, d_s, pow(n,dim)*sizeof(spintype), cudaMemcpyDeviceToHost); printf("Got energy %lf\n ", energy_calc(h_s, n, dim, 0)); cudaFree(d_s); cleanup(h_s, n, dim); return(0); } spintype * setup(int type, int n, int dim) { spintype *s; s = (spintype*)malloc(pow(n,dim)*sizeof(spintype)); if (s ==NULL) { printf("Couldn;t allocate memory\n"); exit(EXIT_FAILURE); } if (type == 1) { setupSqrSystem(s,n, dim); } else { setupTriSystem(s,n,dim); } initSpins(s,n,dim); return(s); } void cleanup(spintype * s, int n, int dim) { int i; for (i = 0; i < pow(n,dim); i ++) { free(s[i].neigh_couple); free(s[i].neighbours); } free(s); } double sumover(spintype *s, int n, int dim) { int i; double result; result = 0; //printf("%d, %d, %lf\n", n, dim, pow(n,dim)); for (i = 0; i < pow(n,dim); i++) { result += (double) s[i].s; } return result; } double magorder(spintype *s, int n, int dim) { int i; double result; if (coupling[0] > 0) { DEBUGLINE printf("Calling ferro order\n"); return(sumover(s,n,dim)); } result = 0; DEBUGLINE printf("Running Anti order routine\n"); for (i = 0; i < pow(n,dim); i++) { result += (i%2 == 0) ? s[i].s :-s[i].s; } return result; } void setupSqrSystem(spintype *s, int n, int dim) { int i,j,k; int curr_spin; for (i = 0; i < n; i ++) { for(j = 0; j < n; j++) { for (k = 0; k < n; k++ ) { if ( dim <= 2) k = 0; curr_spin = ai(i,j,k,n); s[curr_spin].n_neigh = 2*dim; s[curr_spin].neighbours = (int*)malloc(sizeof(int)*s[curr_spin].n_neigh); s[curr_spin].neigh_couple = (int*)malloc(sizeof(int)*2*dim); s[curr_spin].neighbours[0] = (i < (n-1)) ? ai(i+1,j,k,n) : ai(0,j,k,n); s[curr_spin].neighbours[1] = ( i == 0) ? ai(n-1,j,k,n) : ai(i-1,j,k,n); s[curr_spin].neigh_couple[0] = 0; s[curr_spin].neigh_couple[1] = 0; if(dim >= 2) { s[curr_spin].neighbours[2] = (j < (n-1)) ? ai(i,j+1,k,n) : ai(i,0,k,n); s[curr_spin].neighbours[3] = (j == 0) ? ai(i,n-1,k,n) : ai(i,j-1,k,n); s[curr_spin].neigh_couple[2] = 1; s[curr_spin].neigh_couple[3] = 1; } if(dim >= 3) { s[curr_spin].neighbours[4] = (k < (n-1)) ? ai(i,j,k+1,n) : ai(i,j,0,n); s[curr_spin].neighbours[5] = (k == 0) ? ai(i,j,n-1,n): ai(i,j,k-1,n); s[curr_spin].neigh_couple[4] = 2; s[curr_spin].neigh_couple[5] = 2; } if (dim <= 2) k = n; } } } } void setupTriSystem(spintype *s, int n, int dim) { int i,j,k; int curr_spin; if (dim < 2) { printf("Invalid Dimension for Triangular lattice... defaulting to square\n"); setupSqrSystem(s,n,dim); return; } for (i = 0; i < n; i ++) { for(j = 0; j < n; j++) { for (k = 0; k < n; k++ ) { if ( dim == 2) k = 0; curr_spin = ai(i,j,k,n); /* Initialise Arrays containing Neighbours and coupling info*/ s[curr_spin].n_neigh = 2*dim + 2; s[curr_spin].neighbours = (int*)malloc(sizeof(int)*s[curr_spin].n_neigh); s[curr_spin].neigh_couple = (int*)malloc(sizeof(int)*(2*dim + 2)); /*Neighbours on a line*/ s[curr_spin].neighbours[0] = (i < (n-1)) ? ai(i+1,j,k,n) : ai(0,j,k,n); s[curr_spin].neighbours[1] = ( i == 0) ? ai(n-1,j,k,n) : ai(i-1,j,k,n); s[curr_spin].neigh_couple[0] = 0; s[curr_spin].neigh_couple[1] = 0; if(dim >= 2) { /* neighbours in a plane */ s[curr_spin].neighbours[2] = (j < (n-1)) ? ai(i,j+1,k,n) : ai(i,0,k,n); s[curr_spin].neighbours[3] = (j == 0) ? ai(i,n-1,k,n) : ai(i,j-1,k,n); s[curr_spin].neigh_couple[2] = 1; s[curr_spin].neigh_couple[3] = 1; /*Diagonal Neighbours*/ s[curr_spin].neighbours[4] = (i<(n-1) && j<(n-1)) ? ai(i+1,j+1,k,n) : ai(0,0,k,n); s[curr_spin].neighbours[4] = (i>=(n-1) && j<(n-1)) ? ai(0,j+1,k,n) : s[curr_spin].neighbours[4]; s[curr_spin].neighbours[4] = (i<(n-1) && j >=(n-1)) ? ai(i+1,0,k,n) :s[curr_spin].neighbours[4]; s[curr_spin].neighbours[5] = (j == 0 && i == 0) ? ai(n-1,n-1,k,n) : ai(i-1,j-1,k,n); s[curr_spin].neighbours[5] = (j != 0 && i == 0) ? ai(n-1,j-1,k,n) : s[curr_spin].neighbours[5]; s[curr_spin].neighbours[5] = (j == 0 && i != 0) ? ai(i-1,n-1,k,n) : s[curr_spin].neighbours[5]; s[curr_spin].neigh_couple[4] = 2; s[curr_spin].neigh_couple[5] = 2; } if(dim >= 3) { /* Links between Planes */ s[curr_spin].neighbours[6] = (k < (n-1)) ? ai(i,j,k+1,n) : ai(i,j,0,n); s[curr_spin].neighbours[7] = (k == 0) ? ai(i,j,n-1,n): ai(i,j,k-1,n); s[curr_spin].neigh_couple[6] = 3; s[curr_spin].neigh_couple[7] = 3; } if (dim <= 2) k = n; } } } } int ai(int i, int j, int k, int n) { return (i + j*n + n*n*k); } void initSpins(spintype *s, int n, int dim) { int r; int i,j; j = pow(n,dim); for (i = 0; i < j; i++) { r = rand(); s[i].s = 0; s[i].s = (r <= RAND_MAX/2)? 1:-1; if (s[i].s == 0) { printf("Error: rand gave: %d\n",r); exit(1); } } // printf("Spins are good\n"); /* trivial check to make sure all spins initialized*/ for(i = 0; i< j; i ++) { if (s[i].s == 0 ) { printf("Error: Initalisation failed.\n"); printf("Error: Spin %d == 0\n", i); exit(1); } } } double energy_calc(spintype * s, int n, int dim, double field) { int i,l,j; double result; result = 0; j = pow(n,dim); for (i=0; i < j; i ++) { for (l = 0; l < s[i].n_neigh; l ++) { result -= 0.5*coupling[s[i].neigh_couple[l]] * s[i].s * s[s[i].neighbours[l]].s ; } result += -s[i].s * field; } result = (double) result / pow(n,dim); return result; }
00e36b5e79ce26f9aba8ca9cf83073d6bd5db3f0.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <stdio.h> #include <iostream> #include <iomanip> #include <string> #include <random> #include <chrono> #include <algorithm> // includes, cuda #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hiprand/hiprand_kernel.h> // includes, thrust #include <thrust/host_vector.h> #include <thrust/device_vector.h> //////////////////////////////////////////////////////////////////////////////// #define BG_BBLUE_FG_BLACK "\033[104;30m" #define BG_BLUE_FG_BLACK "\033[44;30m" #define BG_BLUE_FG_WHITE "\033[44;37m" #define BG_BLACK_FG_WHITE "\033[0m" #define BG_WHITE_FG_BLACK "\033[30;107m" // 0 - 0000 = empty // 4 - 0100 = black man // 5 - 0101 = black king // 6 - 0110 = white man // 7 - 0111 = white king // // 8 - 1000 in (tile_idx = 0) is used to save turn flag (1 - white, 0 - black) // // 8 tiles saved in one unsigned int with encoding as above // example: 0100 0100 0100 0100 0000 0000 0000 0000 // board indexing: 7 6 5 4 3 2 1 0 //#define DEBUG; #define MEASURE_TIME #define THREADS_PER_BLOCK 1024 #define BLOCKS_PER_SEQUENCE_X 1024 #define BLOCKS_PER_SEQUENCE_Y 1 #define BLOCKS_PER_SEQUENCE_Z 1 //////////////////////////////////////////////////////////////////////////////// - board state macros #define SET_VAL_BOARD(idx, val, board) board[idx >> 3] ^= (board[idx >> 3] ^ val << ((idx & 7) << 2)) & (15 << ((idx & 7) << 2)) #define GET_VAL_BOARD(idx, board) board[idx >> 3] << 28 - ((idx & 7) << 2) >> 28 #define GET_VAL_BOARD_S(idx, board) idx > 31 ? 8 : board[idx >> 3] << 28 - ((idx & 7) << 2) >> 28 //#define IS_EMPTY(tile) (bool)(!tile) -> IS_PIECE instead - ALWAYS #define IS_PIECE(tile) (bool)(tile & 4) #define IS_WHITE(tile) (bool)(tile & 2) #define IS_BLACK(tile) (bool)(~tile & 2) #define IS_KING(tile) (bool)(tile & 1) #define FLIP_TURN_FLAG(board) board[0] ^= 8 #define GET_TURN_FLAG(board) (bool)(board[0] & 8) //////////////////////////////////////////////////////////////////////////////// - move_pos array macros #define GET_BEATING_POS_FLAG(move_pos) (bool)(move_pos[3] & 1) #define SET_BEATING_POS_FLAG(move_pos) move_pos[3] |= 1 #define GET_MOVE_CHECK_GUARD(move_pos) (bool)(move_pos[3] & 2) #define SET_MOVE_CHECK_GUARD(move_pos) move_pos[3] |= 2 #define CLEAR_MOVE_CHECK_GUARD(move_pos) move_pos[3] &= ~2 #define GET_NUM_OF_MOVES(move_pos) move_pos[3] >> 2 #define SET_NUM_OF_MOVES(move_pos, num_of_moves) move_pos[3] |= num_of_moves << 2 #define GET_VAL_MOVE_POS(idx, move_pos) move_pos[idx >> 2] << 24 - ((idx & 3) << 3) >> 24 #define SET_VAL_MOVE_POS(idx, val, move_pos) move_pos[idx >> 2] |= val << ((idx & 3) << 3) #define GET_PIECE_NONBEATING_FLAG(dir, move_pos) (bool)((move_pos[2] << 30 - (dir << 1) >> 30) & 1) #define SET_PIECE_NONBEATING_FLAG(dir, move_pos) move_pos[2] |= 1 << (dir << 1) #define GET_PIECE_BEATING_FLAG(dir, move_pos) (bool)((move_pos[2] << 30 - (dir << 1) >> 30) & 2) #define SET_PIECE_BEATING_FLAG(dir, move_pos) move_pos[2] |= 2 << (dir << 1) //------------------------------------------------------------------------------------------------------------------- void init_board(unsigned int board[4]); void draw_board(unsigned int board[4]); //////////////////////////////////////////////////////////////////////////////// - get tile idx in specific direction from current __host__ __device__ unsigned int get_left_upper_idx(unsigned int& cur_tile_idx); __host__ __device__ unsigned int get_right_upper_idx(unsigned int& cur_tile_idx); __host__ __device__ unsigned int get_left_lower_idx(unsigned int& cur_tile_idx); __host__ __device__ unsigned int get_right_lower_idx(unsigned int& cur_tile_idx); //////////////////////////////////////////////////////////////////////////////// - piece movement __host__ __device__ void get_move_possibility_loop_fun(unsigned int board[4], unsigned int move_pos[4], unsigned int& cur_idx, unsigned int& moves_idx); __host__ __device__ void get_move_possibility(unsigned int board[4], unsigned int move_pos[4]); __host__ __device__ void get_piece_move_pos(unsigned int board[4], unsigned int move_pos[4], unsigned int& idx); __host__ __device__ void move_piece(unsigned int board[4], unsigned int& cur_tile_idx, unsigned int (*get_dir_idx_ptr)(unsigned int&)); //////////////////////////////////////////////////////////////////////////////// - game loop and players void game_loop(unsigned int board[4], void (*white_player)(unsigned int*, unsigned int*), void (*black_player)(unsigned int*, unsigned int*)); void human_player(unsigned int board[4], unsigned int move_pos[4]); void random_player(unsigned int board[4], unsigned int move_pos[4]); //////////////////////////////////////////////////////////////////////////////// - MCTS unsigned int simulate_game_CPU(unsigned int board[4]); unsigned int count_beating_sequences_for_piece_dir(unsigned int board[4], unsigned int cur_tile_idx, unsigned int dir); void MCTS_CPU_player(unsigned int board[4], unsigned int move_pos[4]); void MCTS_GPU_player(unsigned int board[4], unsigned int move_pos[4]); __global__ void MCTS_kernel(const unsigned int* d_first_layer, hiprandState_t* states, float* d_results, const unsigned int possible_sequences); __global__ void setup_kernel(hiprandState_t* states); __device__ float simulate_game_GPU(unsigned int board[4], hiprandState_t* states, const unsigned int possible_sequences); __device__ void random_player_GPU(unsigned int board[4], unsigned int move_pos[4], hiprandState_t* state); //////////////////////////////////////////////////////////////////////////////// - user interaction void disp_moveable_pieces(unsigned int board[4], unsigned int move_pos[4]); void disp_possible_dirs(unsigned int board[4], unsigned int move_pos[4], unsigned int& idx); void get_cords_from_console(char cords[2]); unsigned int translate_cords_to_idx(const char cords[2]); void translate_idx_to_cords(unsigned int idx, char cords[2]); void disp_end_state(unsigned int* board); //////////////////////////////////////////////////////////////////////////////// - game conclusion __host__ __device__ void get_end_state(unsigned int board[4]); //////////////////////////////////////////////////////////////////////////////// - for debugging void testing_function(); void test_get_idx_funs(unsigned int board[4]); void test_get_move_possibility(unsigned int board[4], unsigned int move_pos[4]); void test_get_move_possibility_board_init(unsigned int board[4], unsigned int test_choice); void test_get_move_possibility_init_loop(unsigned int board[4], int test_choice_lower_bound = 1, int test_choice_upper_bound = 7); void test_get_piece_move_pos(unsigned int board[4], unsigned int move_pos[4], unsigned int idx); void test_translate_cords_to_idx(); void test_translate_idx_to_cords(); //void bench(unsigned int board[4]); //------------------------------------------------------------------------------------------------------------------- void init_board(unsigned int board[4]) { // white bottom board[0] = 1145324612; //1st 2nd rows board[1] = 17476; //3rd 4th rows board[2] = 1717960704; //5th 6th rows board[3] = 1717986918; //7th 8th rows } void draw_board(unsigned int board[4]) { unsigned int left_side_idx = 1; // left_side_idx - labels counter bool white_first = true; // flag for alternating colors std::cout << BG_BBLUE_FG_BLACK << " "; for (char c = 'A'; c != 'I'; ++c) // print labels std::cout << ' ' << c << ' '; std::cout << BG_BLACK_FG_WHITE << std::endl; for (unsigned int i = 0; i < 4; ++i) // i = board_idx { for (unsigned int j = 0; j < 8; ++j) // j = tile_in_board_idx { unsigned int tile = board[i] << (28 - (j << 2)) >> 28; if (j == 0 || j == 4) std::cout << BG_BBLUE_FG_BLACK << ' ' << left_side_idx++ << ' '; // print label if (white_first) std::cout << BG_BBLUE_FG_BLACK << " "; if (IS_PIECE(tile)) { if (IS_WHITE(tile)) std::cout << BG_BLUE_FG_WHITE; else std::cout << BG_BLUE_FG_BLACK; if (IS_KING(tile)) std::cout << " K "; else std::cout << " @ "; } else std::cout << BG_BLUE_FG_BLACK << " "; if (!white_first) std::cout << BG_BBLUE_FG_BLACK << " "; if ((j & 3) == 3) // swap colors for second row { std::cout << BG_BLACK_FG_WHITE << std::endl; white_first = !white_first; } } } } //////////////////////////////////////////////////////////////////////////////// - get tile idx in specific direction from current (32 - cur_tile_idx out of bound) __host__ __device__ unsigned int get_left_upper_idx(unsigned int& cur_tile_idx) { if (cur_tile_idx > 31 || !(cur_tile_idx >> 2)) return 32; // second condition checks if is top row if (cur_tile_idx & 4) // even row (counting from 1) { if (cur_tile_idx & 3) // if not left-most return cur_tile_idx - 5; return 32; } else // odd row { return cur_tile_idx - 4; } } __host__ __device__ unsigned int get_right_upper_idx(unsigned int& cur_tile_idx) { if (cur_tile_idx > 31 || !(cur_tile_idx >> 2)) return 32; // second condition checks if is top row if (cur_tile_idx & 4) // even row (counting from 1) { return cur_tile_idx - 4; } else // odd row { if (~cur_tile_idx & 3) // if not right-most return cur_tile_idx - 3; return 32; } } __host__ __device__ unsigned int get_left_lower_idx(unsigned int& cur_tile_idx) { if (cur_tile_idx > 31 || (cur_tile_idx >> 2) == 7) return 32; // second condition checks if is bottom row if (cur_tile_idx & 4) // even row (counting from 1) { if (cur_tile_idx & 3) // if not left-most return cur_tile_idx + 3; return 32; } else // odd row { return cur_tile_idx + 4; } } __host__ __device__ unsigned int get_right_lower_idx(unsigned int& cur_tile_idx) { if (cur_tile_idx > 31 || (cur_tile_idx >> 2) == 7) return 32; // second condition checks if is bottom row if (cur_tile_idx & 4) // even row (counting from 1) { return cur_tile_idx + 4; } else // odd row { if (~cur_tile_idx & 3) // if not right-most return cur_tile_idx + 5; return 32; } } //////////////////////////////////////////////////////////////////////////////// - piece movement __host__ __device__ void get_move_possibility_loop_fun(unsigned int board[4], unsigned int move_pos[4], unsigned int& cur_idx, unsigned int& moves_idx) { unsigned int tile, tmp_idx, result; tile = GET_VAL_BOARD(cur_idx, board); // check if cur_idx tile holds a piece and if it belongs to the currently moving player if (IS_PIECE(tile) && (GET_TURN_FLAG(board) == IS_WHITE(tile))) { unsigned int (*get_dir_idx_ptr)(unsigned int&); for (unsigned int direction = 0; direction < 4; ++direction) { if (GET_TURN_FLAG(board) == (bool)(direction & 2) && !IS_KING(tile)) // do not check backwards movement continue; switch (direction) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: return; } tmp_idx = get_dir_idx_ptr(cur_idx); if (tmp_idx == 32) continue; // check next 'direction' if out of bound result = GET_VAL_BOARD(tmp_idx, board); if (IS_PIECE(result) && GET_TURN_FLAG(board) != IS_WHITE(result)) // proceed only if the piece in 'direction' belongs to the opponent { tmp_idx = get_dir_idx_ptr(tmp_idx); if (tmp_idx == 32) continue; result = GET_VAL_BOARD(tmp_idx, board); if (!IS_PIECE(result)) // check if tile behind opponents's piece is empty { if (!GET_BEATING_POS_FLAG(move_pos)) // set beating flag if no beating move was found previously, clear non-beating moves and save new idx { moves_idx = 0; move_pos[0] = move_pos[1] = move_pos[2] = move_pos[3] = 0; SET_BEATING_POS_FLAG(move_pos); } SET_VAL_MOVE_POS(moves_idx, cur_idx, move_pos); ++moves_idx; CLEAR_MOVE_CHECK_GUARD(move_pos); // clear for next iteration return; } } // check if tile in 'direction' is empty, skip if beating possibility is already saved in array // or a non-beating move was previously found for cur_idx tile else if (!IS_PIECE(result) && !GET_BEATING_POS_FLAG(move_pos) && !GET_MOVE_CHECK_GUARD(move_pos)) { SET_VAL_MOVE_POS(moves_idx, cur_idx, move_pos); ++moves_idx; SET_MOVE_CHECK_GUARD(move_pos); // set flag to check only possibility of beating in next iterations continue; } } CLEAR_MOVE_CHECK_GUARD(move_pos); // clear for next iteration } } // Index of tile that can be moved is stored similarly as board representation, but in 8 bits instead of 4 bits // Additionally move_pos[3] is used for flags and saving number of indexes in the whole array (0 <= n <= 12) // Flags include - availability of beating for returned indexes, other flag for loop_fun purpose only __host__ __device__ void get_move_possibility(unsigned int board[4], unsigned int move_pos[4]) { unsigned int moves_idx = 0; move_pos[0] = move_pos[1] = move_pos[2] = move_pos[3] = 0; for (unsigned int i = 0; i < 32; ++i) get_move_possibility_loop_fun(board, move_pos, i, moves_idx); SET_NUM_OF_MOVES(move_pos, moves_idx); // record number of possible moves } // flags in 2 bit pairs: 01 - non-beating move, 10 - beating move, move_pos[2] is used for storing all pairs, // the same spots in move_pos[3] as in get_move_possibility are used for beating available flag and number of indexes saved (0 <= n <= 3) // 0 - left upper, 1 - right upper, 2 - left lower, 3 - right lower __host__ __device__ void get_piece_move_pos(unsigned int board[4], unsigned int move_pos[4], unsigned int& idx) { unsigned int tile, tmp_idx, result, move_counter = 0; move_pos[2] = move_pos[3] = 0; // [0],[1] - not used tile = GET_VAL_BOARD_S(idx, board); if (IS_PIECE(tile)) { unsigned int (*get_dir_idx_ptr)(unsigned int&); for (unsigned int direction = 0; direction < 4; ++direction) { if (IS_WHITE(tile) == (bool)(direction & 2) && !IS_KING(tile)) // do not check backwards movement continue; switch (direction) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: return; } tmp_idx = get_dir_idx_ptr(idx); if (tmp_idx == 32) continue; // check next 'direction' if out of bound result = GET_VAL_BOARD(tmp_idx, board); if (IS_PIECE(result) && IS_WHITE(tile) != IS_WHITE(result)) // proceed only if the piece in 'direction' belongs to the opponent { tmp_idx = get_dir_idx_ptr(tmp_idx); if (tmp_idx == 32) continue; result = GET_VAL_BOARD(tmp_idx, board); if (!IS_PIECE(result)) // check if tile behind opponents's piece is empty { if (!GET_BEATING_POS_FLAG(move_pos)) { // set general beating flag if no beating move was found previously, clearing move_pos[2] not necessary move_counter = 0; SET_BEATING_POS_FLAG(move_pos); } SET_PIECE_BEATING_FLAG(direction, move_pos); // set direction beating flag ++move_counter; } } else if (!IS_PIECE(result) && !GET_BEATING_POS_FLAG(move_pos)) { SET_PIECE_NONBEATING_FLAG(direction, move_pos); // set empty tile in direction flag ++move_counter; } } } SET_NUM_OF_MOVES(move_pos, move_counter); } // move piece in the direction specified by get_dir_idx_ptr function pointer, reaching last row promotes Man to King // !!! - no game logic is checked in this function - correct moves are guaranteed by get_move_possibility and get_piece_move_pos __host__ __device__ void move_piece(unsigned int board[4], unsigned int& cur_tile_idx, unsigned int (*get_dir_idx_ptr)(unsigned int&)) { if (cur_tile_idx > 31) return; // safety guard unsigned int other_tile_idx = get_dir_idx_ptr(cur_tile_idx); if (other_tile_idx == 32) return; // do not move out of bounds unsigned int cur_tile = GET_VAL_BOARD(cur_tile_idx, board); if (!IS_PIECE(GET_VAL_BOARD(other_tile_idx, board))) // empty tile - move by one in 'direction', nonbeating { SET_VAL_BOARD(other_tile_idx, cur_tile, board); SET_VAL_BOARD(cur_tile_idx, 0, board); } else // not empty tile - move by two in 'direction', beating { if (get_dir_idx_ptr(other_tile_idx) == 32) return; // do not move out of bounds SET_VAL_BOARD(other_tile_idx, 0, board); SET_VAL_BOARD(cur_tile_idx, 0, board); other_tile_idx = get_dir_idx_ptr(other_tile_idx); SET_VAL_BOARD(other_tile_idx, cur_tile, board); } // if reached tile is last row - promote to king if ((!IS_KING(cur_tile)) && ((IS_WHITE(cur_tile) && other_tile_idx < 4) || (IS_BLACK(cur_tile) && other_tile_idx > 27))) SET_VAL_BOARD(other_tile_idx, (cur_tile | 1), board); // promote to king } //////////////////////////////////////////////////////////////////////////////// - game loop and players void game_loop(unsigned int board[4], void (*white_player)(unsigned int*, unsigned int*), void (*black_player)(unsigned int*, unsigned int*)) { unsigned int move_pos[4]; get_move_possibility(board, move_pos); while (0 != (GET_NUM_OF_MOVES(move_pos))) // end game if noone can move { system("cls"); draw_board(board); std::cout << std::endl << (GET_TURN_FLAG(board) ? BG_WHITE_FG_BLACK : BG_BLACK_FG_WHITE) << (GET_TURN_FLAG(board) ? "White" : "Black") << "'s turn!" << BG_BLACK_FG_WHITE << std::endl << std::endl; if (GET_TURN_FLAG(board)) white_player(board, move_pos); else black_player(board, move_pos); get_move_possibility(board, move_pos); } } void human_player(unsigned int board[4], unsigned int move_pos[4]) { unsigned int choosen_idx_tile, choosen_idx_dir, dir; char cords[2]; bool board_beating_flag, beating_sequence_in_progress = false, was_king_before_move; // lambdas are for updating displayed information auto redraw_beginning = [board]() { system("cls"); draw_board(board); std::cout << std::endl << (GET_TURN_FLAG(board) ? BG_WHITE_FG_BLACK : BG_BLACK_FG_WHITE) << (GET_TURN_FLAG(board) ? "White" : "Black") << "'s turn!" << BG_BLACK_FG_WHITE << std::endl << std::endl; }; auto redraw_first_stage = [board, move_pos, redraw_beginning]() { redraw_beginning(); get_move_possibility(board, move_pos); disp_moveable_pieces(board, move_pos); std::cout << std::endl; }; auto redraw_second_stage = [board, move_pos, &choosen_idx_tile, redraw_beginning]() { redraw_beginning(); get_piece_move_pos(board, move_pos, choosen_idx_tile); disp_possible_dirs(board, move_pos, choosen_idx_tile); std::cout << std::endl; }; human_player_reset: while (true) // piece choice loop { redraw_first_stage(); get_cords_from_console(cords); choosen_idx_tile = translate_cords_to_idx(cords); // choose tile with piece to be moved board_beating_flag = GET_BEATING_POS_FLAG(move_pos); get_piece_move_pos(board, move_pos, choosen_idx_tile); if (0 == (GET_NUM_OF_MOVES(move_pos))) { std::cout << std::endl << "This piece cannot move!" << std::endl << "Please choose a different piece!" << std::endl << std::endl; system("pause"); continue; } else if (board_beating_flag != GET_BEATING_POS_FLAG(move_pos)) // force beating { std::cout << std::endl << "BEATING POSSIBLE!" << std::endl << "Please choose a different piece!" << std::endl << std::endl; system("pause"); continue; } break; } while (true) // move sequence loop { redraw_second_stage(); get_cords_from_console(cords); choosen_idx_dir = translate_cords_to_idx(cords); // choose tile in the dir to move (in distance 1 (diagonally) from idx_tile) unsigned int (*get_dir_idx_ptr)(unsigned int&); for (dir = 0; dir < 4; ++dir) { if (dir < 2 && choosen_idx_dir > choosen_idx_tile) // idx_dir > idx_tile only if the chosen tile is in down 'dir', so skip first two (upper) 'dir' continue; switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: system("cls"); std::cout << "ERROR - human_player"; system("pause"); exit(EXIT_FAILURE); } if (choosen_idx_dir != get_dir_idx_ptr(choosen_idx_tile)) // skip dir if idx_dir is not in distance 1 continue; if (GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_BEATING_FLAG(dir, move_pos)) // move is beating { was_king_before_move = IS_KING((GET_VAL_BOARD(choosen_idx_tile, board))); move_piece(board, choosen_idx_tile, get_dir_idx_ptr); choosen_idx_tile = get_dir_idx_ptr(choosen_idx_dir); if (was_king_before_move != (IS_KING((GET_VAL_BOARD(choosen_idx_tile, board))))) // stop beating sequence and end turn if promotion to king happens after a move { FLIP_TURN_FLAG(board); return; } break; } else if (!GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_NONBEATING_FLAG(dir, move_pos)) // move is nonbeating { move_piece(board, choosen_idx_tile, get_dir_idx_ptr); FLIP_TURN_FLAG(board); return; } std::cout << std::endl << "Impossible move!" << std::endl << "Please choose a different move!" << std::endl << std::endl; system("pause"); if (!beating_sequence_in_progress) // reset to piece choice - if invalid first move was choosen goto human_player_reset; } if (dir == 4) // this is visited only if idx_dir was not in distance 1 from idx_tile { std::cout << std::endl << "Impossible move!" << std::endl << "Please choose a different move!" << std::endl << std::endl; system("pause"); if (!beating_sequence_in_progress) // reset to piece choice - if invalid first move was choosen goto human_player_reset; else continue; } get_piece_move_pos(board, move_pos, choosen_idx_tile); if (!GET_BEATING_POS_FLAG(move_pos)) break; // end turn if no more beating possible in current sequence beating_sequence_in_progress = true; } FLIP_TURN_FLAG(board); } void random_player(unsigned int board[4], unsigned int move_pos[4]) { std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> dist(0, 0); unsigned int choosen_idx_tile, choosen_idx_dir, dir = 0, dir_idx_upper_bound, dir_idx_counter = 0; bool beating_sequence_in_progress = false, was_king_before_move; unsigned int (*get_dir_idx_ptr)(unsigned int&); // choose tile with piece to be moved get_move_possibility(board, move_pos); dist = std::uniform_int_distribution<>(0, ((GET_NUM_OF_MOVES(move_pos)) - 1)); choosen_idx_tile = dist(gen); choosen_idx_tile = GET_VAL_MOVE_POS(choosen_idx_tile, move_pos); do { // choose tile in the dir to move (in distance 1 (diagonally) from idx_tile) // the rng dir choice is done on the interval [0;n-1] where n is the number of dirs with valid move choices get_piece_move_pos(board, move_pos, choosen_idx_tile); dir_idx_upper_bound = (GET_NUM_OF_MOVES(move_pos)) - 1; // this is guaranteed o be >= 0 if the game is in progress dist = std::uniform_int_distribution<>(0, dir_idx_upper_bound); choosen_idx_dir = dist(gen); // dir_idx_counter is only incremented if a possible move in 'dir' is encountered but is not the chosen one for (dir = 0, dir_idx_counter = 0; dir_idx_counter <= dir_idx_upper_bound && dir < 4; ++dir) { switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: system("cls"); std::cout << "ERROR - random_player"; system("pause"); exit(EXIT_FAILURE); } if (dir_idx_counter == choosen_idx_dir); // proceed to make a move after dir is a correct idx else if ((GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_BEATING_FLAG(dir, move_pos)) || (!GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_NONBEATING_FLAG(dir, move_pos))) { ++dir_idx_counter; continue; } else continue; if (GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_BEATING_FLAG(dir, move_pos)) // move is beating { was_king_before_move = IS_KING((GET_VAL_BOARD(choosen_idx_tile, board))); choosen_idx_dir = get_dir_idx_ptr(choosen_idx_tile); move_piece(board, choosen_idx_tile, get_dir_idx_ptr); choosen_idx_tile = get_dir_idx_ptr(choosen_idx_dir); if (was_king_before_move != (IS_KING((GET_VAL_BOARD(choosen_idx_tile, board))))) // stop beating sequence and end turn if promotion to king happens after a move { FLIP_TURN_FLAG(board); return; } break; } else if (!GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_NONBEATING_FLAG(dir, move_pos)) // move is nonbeating { move_piece(board, choosen_idx_tile, get_dir_idx_ptr); FLIP_TURN_FLAG(board); return; } } if (dir == 4) { system("cls"); std::cout << "ERROR - random_player"; system("pause"); exit(EXIT_FAILURE); } get_piece_move_pos(board, move_pos, choosen_idx_tile); if (!GET_BEATING_POS_FLAG(move_pos)) break; // end turn if no more beating possible in current sequence beating_sequence_in_progress = true; } while (beating_sequence_in_progress); FLIP_TURN_FLAG(board); } //////////////////////////////////////////////////////////////////////////////// - MCTS unsigned int simulate_game_CPU(unsigned int board[4]) { unsigned int move_pos[4]; get_move_possibility(board, move_pos); while (0 != (GET_NUM_OF_MOVES(move_pos))) // end game if noone can move { random_player(board, move_pos); get_move_possibility(board, move_pos); } get_end_state(board); return (board[0] & 2048 ? 2 : 0) | (board[0] & 128 ? 1 : 0); } // traverses the sequence tree like DFS unsigned int count_beating_sequences_for_piece_dir(unsigned int board[4], unsigned int cur_tile_idx, unsigned int dir) { unsigned int piece_pos[4], tmp_board[4]{}, possible_moves = 0, dir_tile_idx; bool was_king_before_move; unsigned int (*get_dir_idx_ptr)(unsigned int&); tmp_board[0] = board[0]; tmp_board[1] = board[1]; tmp_board[2] = board[2]; tmp_board[3] = board[3]; get_piece_move_pos(tmp_board, piece_pos, cur_tile_idx); switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: system("cls"); std::cout << "ERROR - count_beating_sequences_for_piece_dir"; system("pause"); exit(EXIT_FAILURE); } if (GET_BEATING_POS_FLAG(piece_pos) && GET_PIECE_BEATING_FLAG(dir, piece_pos)) { was_king_before_move = IS_KING((GET_VAL_BOARD(cur_tile_idx, tmp_board))); dir_tile_idx = get_dir_idx_ptr(cur_tile_idx); move_piece(tmp_board, cur_tile_idx, get_dir_idx_ptr); cur_tile_idx = get_dir_idx_ptr(dir_tile_idx); ++possible_moves; if (was_king_before_move != (IS_KING((GET_VAL_BOARD(cur_tile_idx, tmp_board))))) // stop counting if promotion to king happens after a move { return possible_moves; } get_piece_move_pos(tmp_board, piece_pos, cur_tile_idx); if (GET_BEATING_POS_FLAG(piece_pos)) // check if more beatings in sequence { possible_moves = 0; for (unsigned int dir = 0; dir < 4; ++dir) possible_moves += count_beating_sequences_for_piece_dir(tmp_board, cur_tile_idx, dir); } } return possible_moves; } void MCTS_CPU_player(unsigned int board[4], unsigned int move_pos[4]) { unsigned int*** first_layer, * sequence_count, * selected_tile, choosable_piece_count = 0; double** success_rate, ** tries; #ifdef MEASURE_TIME std::chrono::steady_clock::time_point start, stop; std::chrono::duration<double, std::milli> elapsed; start = std::chrono::high_resolution_clock::now(); #endif // MEASURE_TIME // allocate memory for first layer get_move_possibility(board, move_pos); choosable_piece_count = GET_NUM_OF_MOVES(move_pos); first_layer = new unsigned int** [choosable_piece_count]; sequence_count = new unsigned int[choosable_piece_count]; selected_tile = new unsigned int[choosable_piece_count]; success_rate = new double* [choosable_piece_count]; tries = new double* [choosable_piece_count]; // count needed size and save sequence_count for (unsigned int i = 0; i < choosable_piece_count; ++i) { unsigned int possible_moves = 0; selected_tile[i] = GET_VAL_MOVE_POS(i, move_pos); if (GET_BEATING_POS_FLAG(move_pos)) for (unsigned int dir = 0; dir < 4; ++dir) possible_moves += count_beating_sequences_for_piece_dir(board, selected_tile[i], dir); else { get_piece_move_pos(board, move_pos, selected_tile[i]); possible_moves = GET_NUM_OF_MOVES(move_pos); get_move_possibility(board, move_pos); } sequence_count[i] = possible_moves; first_layer[i] = new unsigned int* [sequence_count[i]]; success_rate[i] = new double[sequence_count[i]]; tries[i] = new double[sequence_count[i]]; for (unsigned int j = 0; j < sequence_count[i]; ++j) { first_layer[i][j] = new unsigned int[4]{}; success_rate[i][j] = 0; tries[i][j] = 0; } } // build first layer for (unsigned int i = 0; i < choosable_piece_count; ++i) { unsigned int tmp_board[4]; tmp_board[0] = board[0]; tmp_board[1] = board[1]; tmp_board[2] = board[2]; tmp_board[3] = board[3]; get_piece_move_pos(tmp_board, move_pos, selected_tile[i]); if (!GET_BEATING_POS_FLAG(move_pos)) { if (GET_NUM_OF_MOVES(move_pos) > 4) exit(EXIT_FAILURE); for (unsigned int j = 0, dir = 0; dir < 4 && j < sequence_count[i]; ++dir) { tmp_board[0] = board[0]; tmp_board[1] = board[1]; tmp_board[2] = board[2]; tmp_board[3] = board[3]; if (!GET_PIECE_NONBEATING_FLAG(dir, move_pos)) continue; unsigned int (*get_dir_idx_ptr)(unsigned int&); switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: system("cls"); std::cout << "ERROR - MCTS_CPU_player"; system("pause"); exit(EXIT_FAILURE); } move_piece(tmp_board, selected_tile[i], get_dir_idx_ptr); first_layer[i][j][0] = tmp_board[0]; first_layer[i][j][1] = tmp_board[1]; first_layer[i][j][2] = tmp_board[2]; first_layer[i][j][3] = tmp_board[3]; FLIP_TURN_FLAG(first_layer[i][j]); ++j; } } else // this visits nodes in the tree similarly as in count_beating_sequences_for_piece_dir { unsigned int chaser = 0, j = 0, tmp_count = 0, cur_tile_idx = selected_tile[i]; while (j < sequence_count[i]) { for (unsigned int dir = 0; dir < 4; ++dir) { tmp_count = count_beating_sequences_for_piece_dir(tmp_board, cur_tile_idx, dir); if (!tmp_count) continue; chaser += tmp_count; if (chaser <= j) continue; unsigned int (*get_dir_idx_ptr)(unsigned int&); switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: system("cls"); std::cout << "ERROR - MCTS_CPU_player"; system("pause"); exit(EXIT_FAILURE); } move_piece(tmp_board, cur_tile_idx, get_dir_idx_ptr); cur_tile_idx = get_dir_idx_ptr(cur_tile_idx); cur_tile_idx = get_dir_idx_ptr(cur_tile_idx); } chaser = chaser - tmp_count; if (((sequence_count[i] - j) != 1 && chaser <= j) || chaser < j) continue; first_layer[i][j][0] = tmp_board[0]; first_layer[i][j][1] = tmp_board[1]; first_layer[i][j][2] = tmp_board[2]; first_layer[i][j][3] = tmp_board[3]; FLIP_TURN_FLAG(first_layer[i][j]); cur_tile_idx = selected_tile[i]; tmp_board[0] = board[0]; tmp_board[1] = board[1]; tmp_board[2] = board[2]; tmp_board[3] = board[3]; chaser = 0; ++j; } } } #ifdef DEBUG // test if layer build correctly - debug for (unsigned int i = 0; i < choosable_piece_count; ++i) { for (unsigned int j = 0; j < sequence_count[i]; ++j) { system("cls"); draw_board(board); std::cout << std::endl << (GET_TURN_FLAG(board) ? BG_WHITE_FG_BLACK : BG_BLACK_FG_WHITE) << (GET_TURN_FLAG(board) ? "White" : "Black") << "'s turn!" << BG_BLACK_FG_WHITE << std::endl << std::endl; std::cout << std::endl; draw_board(first_layer[i][j]); std::cout << std::endl << (GET_TURN_FLAG(first_layer[i][j]) ? BG_WHITE_FG_BLACK : BG_BLACK_FG_WHITE) << (GET_TURN_FLAG(first_layer[i][j]) ? "White" : "Black") << "'s turn!" << BG_BLACK_FG_WHITE << std::endl << std::endl; system("pause"); } } #endif // DEBUG #ifdef MEASURE_TIME stop = std::chrono::high_resolution_clock::now(); elapsed = (stop - start); std::cout << "CPU - First Layer Building time: " << elapsed.count() << " ms" << std::endl; start = std::chrono::high_resolution_clock::now(); #endif // MEASURE_TIME // run simulations { std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> dist1, dist2; unsigned int piece_choice, sequence_choice, simulation_result, tmp_board[4]; dist1 = std::uniform_int_distribution<>(0, choosable_piece_count - 1); unsigned int possible_sequences = 0; for (unsigned int i = 0; i < choosable_piece_count; ++i) possible_sequences += sequence_count[i]; for (unsigned int i = 0; i < possible_sequences * THREADS_PER_BLOCK * BLOCKS_PER_SEQUENCE_X * BLOCKS_PER_SEQUENCE_Y * BLOCKS_PER_SEQUENCE_Z; ++i) { piece_choice = dist1(gen); dist2 = std::uniform_int_distribution<>(0, sequence_count[piece_choice] - 1); sequence_choice = dist2(gen); tmp_board[0] = first_layer[piece_choice][sequence_choice][0]; tmp_board[1] = first_layer[piece_choice][sequence_choice][1]; tmp_board[2] = first_layer[piece_choice][sequence_choice][2]; tmp_board[3] = first_layer[piece_choice][sequence_choice][3]; simulation_result = simulate_game_CPU(tmp_board); if (!simulation_result) continue; else if (simulation_result == 3) success_rate[piece_choice][sequence_choice] += 0.5; else if ((simulation_result == 2 && GET_TURN_FLAG(board)) || (simulation_result == 1 && GET_TURN_FLAG(board))) success_rate[piece_choice][sequence_choice] += 1.0; tries[piece_choice][sequence_choice] += 1.0; } } #ifdef MEASURE_TIME stop = std::chrono::high_resolution_clock::now(); elapsed = (stop - start); std::cout << std::endl << "CPU - Simulation time: " << std::chrono::duration_cast<std::chrono::seconds>(elapsed).count() << " s" << std::endl; start = std::chrono::high_resolution_clock::now(); #endif // MEASURE_TIME // extract success rate for (unsigned int i = 0; i < choosable_piece_count; ++i) for (unsigned int j = 0; j < sequence_count[i]; ++j) if (tries[i][j] > 0) success_rate[i][j] /= tries[i][j]; // make a move { double max = -1.0; unsigned int idx1, idx2; for (unsigned int i = 0; i < choosable_piece_count; ++i) for (unsigned int j = 0; j < sequence_count[i]; ++j) if (success_rate[i][j] > max) { max = success_rate[i][j]; idx1 = i; idx2 = j; } board[0] = first_layer[idx1][idx2][0]; board[1] = first_layer[idx1][idx2][1]; board[2] = first_layer[idx1][idx2][2]; board[3] = first_layer[idx1][idx2][3]; } #ifdef MEASURE_TIME stop = std::chrono::high_resolution_clock::now(); elapsed = (stop - start); std::cout << std::endl << "CPU - Choosing move time: " << elapsed.count() << " ms" << std::endl << std::endl; system("pause"); #endif // MEASURE_TIME // deallocate memory for first layer for (unsigned int i = 0; i < choosable_piece_count; ++i) { for (unsigned int j = 0; j < sequence_count[i]; ++j) delete[] first_layer[i][j]; delete[] first_layer[i]; delete[] success_rate[i]; delete[] tries[i]; } delete[] first_layer; delete[] success_rate; delete[] tries; delete[] sequence_count; } void MCTS_GPU_player(unsigned int board[4], unsigned int move_pos[4]) { thrust::host_vector<unsigned int> h_first_layer; thrust::device_vector<unsigned int> d_first_layer; thrust::device_vector<float> d_results; unsigned int* sequence_count, * selected_tile, choosable_piece_count = 0, possible_sequences = 0; float* success_rates; #ifdef MEASURE_TIME float elapsedGPU; hipEvent_t startGPU, stopGPU; hipEventCreate(&startGPU); hipEventCreate(&stopGPU); std::chrono::steady_clock::time_point startCPU, stopCPU; std::chrono::duration<double, std::milli> elapsedCPU; startCPU = std::chrono::high_resolution_clock::now(); #endif // MEASURE_TIME // allocate memory for computing first layer get_move_possibility(board, move_pos); choosable_piece_count = GET_NUM_OF_MOVES(move_pos); sequence_count = new unsigned int[choosable_piece_count]; selected_tile = new unsigned int[choosable_piece_count]; for (unsigned int i = 0; i < choosable_piece_count; ++i) { unsigned int possible_moves = 0; selected_tile[i] = GET_VAL_MOVE_POS(i, move_pos); if (GET_BEATING_POS_FLAG(move_pos)) for (unsigned int dir = 0; dir < 4; ++dir) possible_moves += count_beating_sequences_for_piece_dir(board, selected_tile[i], dir); else { get_piece_move_pos(board, move_pos, selected_tile[i]); possible_moves = GET_NUM_OF_MOVES(move_pos); get_move_possibility(board, move_pos); } sequence_count[i] = possible_moves; possible_sequences += possible_moves; } // allocate memory for host_vector h_first_layer = thrust::host_vector<unsigned int>(static_cast<size_t>(possible_sequences) * 4); d_results = thrust::device_vector<float>(static_cast<size_t>(possible_sequences) * THREADS_PER_BLOCK * BLOCKS_PER_SEQUENCE_X * BLOCKS_PER_SEQUENCE_Y * BLOCKS_PER_SEQUENCE_Z); success_rates = new float[possible_sequences * BLOCKS_PER_SEQUENCE_X * BLOCKS_PER_SEQUENCE_Y * BLOCKS_PER_SEQUENCE_Z]; // build first layer for (unsigned int host_idx = 0, i = 0; i < choosable_piece_count; ++i) { unsigned int tmp_board[4]; tmp_board[0] = board[0]; tmp_board[1] = board[1]; tmp_board[2] = board[2]; tmp_board[3] = board[3]; get_piece_move_pos(tmp_board, move_pos, selected_tile[i]); if (!GET_BEATING_POS_FLAG(move_pos)) { if (GET_NUM_OF_MOVES(move_pos) > 4) exit(EXIT_FAILURE); for (unsigned int j = 0, dir = 0; dir < 4 && j < sequence_count[i]; ++dir) { tmp_board[0] = board[0]; tmp_board[1] = board[1]; tmp_board[2] = board[2]; tmp_board[3] = board[3]; if (!GET_PIECE_NONBEATING_FLAG(dir, move_pos)) continue; unsigned int (*get_dir_idx_ptr)(unsigned int&); switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: system("cls"); std::cout << "ERROR - MCTS_GPU_player"; system("pause"); exit(EXIT_FAILURE); } move_piece(tmp_board, selected_tile[i], get_dir_idx_ptr); FLIP_TURN_FLAG(tmp_board); h_first_layer[static_cast<size_t>(host_idx)] = tmp_board[0]; h_first_layer[static_cast<size_t>(host_idx) + 1] = tmp_board[1]; h_first_layer[static_cast<size_t>(host_idx) + 2] = tmp_board[2]; h_first_layer[static_cast<size_t>(host_idx) + 3] = tmp_board[3]; host_idx += 4; ++j; } } else // this visits nodes in the tree similarly as in count_beating_sequences_for_piece_dir { unsigned int chaser = 0, j = 0, tmp_count = 0, cur_tile_idx = selected_tile[i]; while (j < sequence_count[i]) { for (unsigned int dir = 0; dir < 4; ++dir) { tmp_count = count_beating_sequences_for_piece_dir(tmp_board, cur_tile_idx, dir); if (!tmp_count) continue; chaser += tmp_count; if (chaser <= j) continue; unsigned int (*get_dir_idx_ptr)(unsigned int&); switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: system("cls"); std::cout << "ERROR - MCTS_GPU_player"; system("pause"); exit(EXIT_FAILURE); } move_piece(tmp_board, cur_tile_idx, get_dir_idx_ptr); cur_tile_idx = get_dir_idx_ptr(cur_tile_idx); cur_tile_idx = get_dir_idx_ptr(cur_tile_idx); } chaser = chaser - tmp_count; if (((sequence_count[i] - j) != 1 && chaser <= j) || chaser < j) continue; FLIP_TURN_FLAG(tmp_board); h_first_layer[static_cast<size_t>(host_idx)] = tmp_board[0]; h_first_layer[static_cast<size_t>(host_idx) + 1] = tmp_board[1]; h_first_layer[static_cast<size_t>(host_idx) + 2] = tmp_board[2]; h_first_layer[static_cast<size_t>(host_idx) + 3] = tmp_board[3]; cur_tile_idx = selected_tile[i]; tmp_board[0] = board[0]; tmp_board[1] = board[1]; tmp_board[2] = board[2]; tmp_board[3] = board[3]; chaser = 0; host_idx += 4; ++j; } } } #ifdef MEASURE_TIME stopCPU = std::chrono::high_resolution_clock::now(); elapsedCPU = (stopCPU - startCPU); std::cout << "CPU - First Layer Building time: " << elapsedCPU.count() << " ms" << std::endl; #endif // MEASURE_TIME // deallocate memory used for computing first layer delete[] selected_tile; delete[] sequence_count; #ifdef DEBUG // test if layer build correctly - debug for (unsigned int i = 0; i < possible_sequences; ++i) { unsigned int tmp_board[4]; tmp_board[0] = h_first_layer[4 * i]; tmp_board[1] = h_first_layer[4 * i + 1]; tmp_board[2] = h_first_layer[4 * i + 2]; tmp_board[3] = h_first_layer[4 * i + 3]; system("cls"); draw_board(board); std::cout << std::endl << (GET_TURN_FLAG(board) ? BG_WHITE_FG_BLACK : BG_BLACK_FG_WHITE) << (GET_TURN_FLAG(board) ? "White" : "Black") << "'s turn!" << BG_BLACK_FG_WHITE << std::endl << std::endl; std::cout << std::endl; draw_board(tmp_board); std::cout << std::endl << (GET_TURN_FLAG(tmp_board) ? BG_WHITE_FG_BLACK : BG_BLACK_FG_WHITE) << (GET_TURN_FLAG(tmp_board) ? "White" : "Black") << "'s turn!" << BG_BLACK_FG_WHITE << std::endl << std::endl; system("pause"); } #endif // DEBUG #ifdef MEASURE_TIME hipEventRecord(startGPU); #endif // MEASURE_TIME // move data to GPU d_first_layer = h_first_layer; dim3 dimBlock(THREADS_PER_BLOCK, 1, 1); dim3 dimGrid(possible_sequences * BLOCKS_PER_SEQUENCE_X, BLOCKS_PER_SEQUENCE_Y, BLOCKS_PER_SEQUENCE_Z); thrust::device_vector<hiprandState_t> states(64); // init states for hiprand hipLaunchKernelGGL(( setup_kernel), dim3(1), dim3(64), 0, 0, thrust::raw_pointer_cast(states.begin().base())); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("%s\n", hipGetErrorString(err)); hipDeviceSynchronize(); // run simulations hipLaunchKernelGGL(( MCTS_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, thrust::raw_pointer_cast(d_first_layer.begin().base()), thrust::raw_pointer_cast(states.begin().base()), thrust::raw_pointer_cast(d_results.begin().base()), possible_sequences); err = hipGetLastError(); if (err != hipSuccess) printf("%s\n", hipGetErrorString(err)); hipDeviceSynchronize(); #ifdef MEASURE_TIME hipEventRecord(stopGPU); hipEventSynchronize(stopGPU); hipEventElapsedTime(&elapsedGPU, startGPU, stopGPU); std::cout << std::endl << "GPU - Simulation time: " << elapsedGPU << " ms" << std::endl; hipEventRecord(startGPU); #endif // MEASURE_TIME for (unsigned int i = 0; i < possible_sequences * BLOCKS_PER_SEQUENCE_X * BLOCKS_PER_SEQUENCE_Y * BLOCKS_PER_SEQUENCE_Z; ++i) success_rates[i] = thrust::reduce(d_results.begin() + (THREADS_PER_BLOCK*i), d_results.begin() + (THREADS_PER_BLOCK * (i+1))) / THREADS_PER_BLOCK.0f; #ifdef MEASURE_TIME hipEventRecord(stopGPU); hipEventSynchronize(stopGPU); hipEventElapsedTime(&elapsedGPU, startGPU, stopGPU); std::cout << std::endl << "GPU - Results Reduction time: " << elapsedGPU << " ms" << std::endl; hipEventDestroy(startGPU); hipEventDestroy(stopGPU); startCPU = std::chrono::high_resolution_clock::now(); #endif // MEASURE_TIME // sum success_rates for each sequence for (unsigned int i = possible_sequences; i < possible_sequences * BLOCKS_PER_SEQUENCE_X * BLOCKS_PER_SEQUENCE_Y * BLOCKS_PER_SEQUENCE_Z; ++i) success_rates[i % possible_sequences] += success_rates[i]; // make a move { double max = -1.0; unsigned int idx; for (unsigned int i = 0; i < possible_sequences; ++i) if (success_rates[i] > max) { max = success_rates[i]; idx = i % possible_sequences; } board[0] = h_first_layer[4 * idx]; board[1] = h_first_layer[4 * idx + 1]; board[2] = h_first_layer[4 * idx + 2]; board[3] = h_first_layer[4 * idx + 3]; } #ifdef MEASURE_TIME stopCPU = std::chrono::high_resolution_clock::now(); elapsedCPU = (stopCPU - startCPU); std::cout << std::endl << "CPU - Choosing move time: " << elapsedCPU.count() << " ms" << std::endl << std::endl; system("pause"); #endif // MEASURE_TIME delete[] success_rates; } __global__ void MCTS_kernel(const unsigned int* d_first_layer, hiprandState_t* states, float* d_results, const unsigned int possible_sequences) { const unsigned int tid = threadIdx.x; const unsigned int bid = blockIdx.x + blockDim.y * (blockIdx.y + blockIdx.z * blockDim.z); unsigned int tmp_board[4]; tmp_board[0] = d_first_layer[4 * (bid % possible_sequences)]; tmp_board[1] = d_first_layer[4 * (bid % possible_sequences) + 1]; tmp_board[2] = d_first_layer[4 * (bid % possible_sequences) + 2]; tmp_board[3] = d_first_layer[4 * (bid % possible_sequences) + 3]; unsigned int simulation_result = simulate_game_GPU(tmp_board, states, possible_sequences); if (!simulation_result) d_results[tid + THREADS_PER_BLOCK * bid] = 0.0f; else if (simulation_result == 3) d_results[tid + THREADS_PER_BLOCK * bid] = 0.5f; else if ((simulation_result == 2 && GET_TURN_FLAG(tmp_board)) || (simulation_result == 1 && GET_TURN_FLAG(tmp_board))) d_results[tid + THREADS_PER_BLOCK * bid] = 1.0f; } __global__ void setup_kernel(hiprandState_t* states) { int id = threadIdx.x; hiprand_init(1234, id, 0, &states[id]); } __device__ float simulate_game_GPU(unsigned int board[4], hiprandState_t* states, const unsigned int possible_sequences) { unsigned int id = (blockIdx.x + blockDim.y * (blockIdx.y + blockIdx.z * blockDim.z)) % possible_sequences; unsigned int move_pos[4]; get_move_possibility(board, move_pos); while (0 != (GET_NUM_OF_MOVES(move_pos))) // end game if noone can move { random_player_GPU(board, move_pos, &states[id]); get_move_possibility(board, move_pos); } get_end_state(board); return (board[0] & 2048 ? 2 : 0) | (board[0] & 128 ? 1 : 0); } __device__ void random_player_GPU(unsigned int board[4], unsigned int move_pos[4], hiprandState_t* state) { unsigned int choosen_idx_tile, choosen_idx_dir, dir = 0, dir_idx_upper_bound, dir_idx_counter = 0; bool beating_sequence_in_progress = false, was_king_before_move; unsigned int (*get_dir_idx_ptr)(unsigned int&); // choose tile with piece to be moved get_move_possibility(board, move_pos); choosen_idx_tile = hiprand(state) % (GET_NUM_OF_MOVES(move_pos)); choosen_idx_tile = GET_VAL_MOVE_POS(choosen_idx_tile, move_pos); do { // choose tile in the dir to move(in distance 1 (diagonally)from idx_tile) // the rng dir choice is done on the interval [0;n-1] where n is the number of dirs with valid move choices get_piece_move_pos(board, move_pos, choosen_idx_tile); dir_idx_upper_bound = GET_NUM_OF_MOVES(move_pos); choosen_idx_dir = hiprand(state) % dir_idx_upper_bound; // dir_idx_counter is only incremented if a possible move in 'dir' is encountered but is not the chosen one for (dir = 0, dir_idx_counter = 0; dir_idx_counter <= dir_idx_upper_bound && dir < 4; ++dir) { switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: return; } if (dir_idx_counter == choosen_idx_dir); // proceed to make a move after dir is a correct idx else if ((GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_BEATING_FLAG(dir, move_pos)) || (!GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_NONBEATING_FLAG(dir, move_pos))) { ++dir_idx_counter; continue; } else continue; if (GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_BEATING_FLAG(dir, move_pos)) // move is beating { was_king_before_move = IS_KING((GET_VAL_BOARD(choosen_idx_tile, board))); choosen_idx_dir = get_dir_idx_ptr(choosen_idx_tile); move_piece(board, choosen_idx_tile, get_dir_idx_ptr); choosen_idx_tile = get_dir_idx_ptr(choosen_idx_dir); if (was_king_before_move != (IS_KING((GET_VAL_BOARD(choosen_idx_tile, board))))) // stop beating sequence and end turn if promotion to king happens after a move { FLIP_TURN_FLAG(board); return; } break; } else if (!GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_NONBEATING_FLAG(dir, move_pos)) // move is nonbeating { move_piece(board, choosen_idx_tile, get_dir_idx_ptr); FLIP_TURN_FLAG(board); return; } } if (dir == 4) return; get_piece_move_pos(board, move_pos, choosen_idx_tile); if (!GET_BEATING_POS_FLAG(move_pos)) break; // end turn if no more beating possible in current sequence beating_sequence_in_progress = true; } while (beating_sequence_in_progress); FLIP_TURN_FLAG(board); } //////////////////////////////////////////////////////////////////////////////// - user interaction void disp_moveable_pieces(unsigned int board[4], unsigned int move_pos[4]) { char cords[2]{ '-' }; std::cout << "Possible moves for " << (GET_TURN_FLAG(board) ? "white" : "black") << " - " << (GET_NUM_OF_MOVES(move_pos)) << std::endl; std::cout << "Tiles with moveable pieces: "; get_move_possibility(board, move_pos); for (unsigned int i = 0; i < GET_NUM_OF_MOVES(move_pos); ++i) { translate_idx_to_cords((GET_VAL_MOVE_POS(i, move_pos)), cords); std::cout << cords[0] << cords[1] << ' '; } std::cout << std::endl; } void disp_possible_dirs(unsigned int board[4], unsigned int move_pos[4], unsigned int& idx) { char cords[2]{ '-' }; translate_idx_to_cords(idx, cords); get_piece_move_pos(board, move_pos, idx); if (GET_NUM_OF_MOVES(move_pos)) { std::cout << "Moves possible for piece on " << cords[0] << cords[1] << " - " << (GET_NUM_OF_MOVES(move_pos)) << std::endl; if (GET_BEATING_POS_FLAG(move_pos)) std::cout << "BEATING POSSIBLE!" << std::endl; std::cout << "List of tiles to choose from: "; unsigned int (*get_dir_idx_ptr)(unsigned int&); for (unsigned int dir = 0; dir < 4; ++dir) { switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: system("cls"); std::cout << "ERROR - disp_possible_dirs"; system("pause"); exit(EXIT_FAILURE); } translate_idx_to_cords(get_dir_idx_ptr(idx), cords); if (GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_BEATING_FLAG(dir, move_pos)) std::cout << cords[0] << cords[1] << ' '; else if (!GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_NONBEATING_FLAG(dir, move_pos)) std::cout << cords[0] << cords[1] << ' '; } std::cout << std::endl; } else std::cout << "Movement not possible for piece on " << cords[0] << cords[1] << std::endl; } void get_cords_from_console(char cords[2]) { while (true) { std::string input = ""; std::cout << "Please provide coordinates: "; std::getline(std::cin, input); if (input.size() != 2) { std::cout << "Incorrect input length!" << std::endl << std::endl; continue; } cords[0] = toupper(input[0]); cords[1] = toupper(input[1]); if ((cords[0] == 'A' || cords[0] == 'C' || cords[0] == 'E' || cords[0] == 'G') && (cords[1] == '2' || cords[1] == '4' || cords[1] == '6' || cords[1] == '8')) break; else if ((cords[0] == 'B' || cords[0] == 'D' || cords[0] == 'F' || cords[0] == 'H') && (cords[1] == '1' || cords[1] == '3' || cords[1] == '5' || cords[1] == '7')) break; std::cout << "Incorrect coordinates given!" << std::endl << std::endl; } } unsigned int translate_cords_to_idx(const char cords[2]) { if (cords[1] < '0' || cords[1] > '8') return 32; // out of bounds unsigned int cord1 = cords[1] - '1'; // not '0' because we count cords from 1 switch (cords[0]) { case 'A': if (~cord1 & 1) return 32; return cord1 << 2; case 'B': if (cord1 & 1) return 32; return cord1 << 2; case 'C': if (~cord1 & 1) return 32; return (cord1 << 2) + 1; case 'D': if (cord1 & 1) return 32; return (cord1 << 2) + 1; case 'E': if (~cord1 & 1) return 32; return (cord1 << 2) + 2; case 'F': if (cord1 & 1) return 32; return (cord1 << 2) + 2; case 'G': if (~cord1 & 1) return 32; return (cord1 << 2) + 3; case 'H': if (cord1 & 1) return 32; return (cord1 << 2) + 3; default: return 32; } } void translate_idx_to_cords(unsigned int idx, char cords[2]) { if (idx > 31) { cords[0] = '-'; cords[1] = '-'; return; } else if (idx < 4) cords[1] = '1'; else if (idx >= 4 && idx < 8) cords[1] = '2'; else if (idx >= 8 && idx < 12) cords[1] = '3'; else if (idx >= 12 && idx < 16) cords[1] = '4'; else if (idx >= 16 && idx < 20) cords[1] = '5'; else if (idx >= 20 && idx < 24) cords[1] = '6'; else if (idx >= 24 && idx < 28) cords[1] = '7'; else if (idx >= 28 && idx < 32) cords[1] = '8'; if ((idx & 7) == 0) cords[0] = 'B'; else if ((idx & 7) == 1) cords[0] = 'D'; else if ((idx & 7) == 2) cords[0] = 'F'; else if ((idx & 7) == 3) cords[0] = 'H'; else if ((idx & 7) == 4) cords[0] = 'A'; else if ((idx & 7) == 5) cords[0] = 'C'; else if ((idx & 7) == 6) cords[0] = 'E'; else if ((idx & 7) == 7) cords[0] = 'G'; } void disp_end_state(unsigned int* board) { system("cls"); draw_board(board); get_end_state(board); if (board[0] & 2048 && board[0] & 128) std::cout << std::endl << "Game ended in a draw!" << std::endl << std::endl; else if (board[0] & 2048) std::cout << std::endl << BG_WHITE_FG_BLACK << "White won!" << BG_BLACK_FG_WHITE << std::endl << std::endl; else if (board[0] & 128) std::cout << std::endl << "Black won!" << std::endl << std::endl; else if (!board[0]) std::cout << std::endl << "Error occured!" << std::endl << std::endl; } //////////////////////////////////////////////////////////////////////////////// - game conclusion // saves end state in board[0], none - error, 1xxx xxxxx - black win, 1xxx xxxx xxxx - white win, both win - draw // after extracting: 0 - error, 1 - black win, 2 - white win, 3 - draw // to extract - (board[0] & 2048 ? 2 : 0) | (board[0] & 128 ? 1 : 0) __host__ __device__ void get_end_state(unsigned int board[4]) { unsigned int move_pos[4]; get_move_possibility(board, move_pos); for (unsigned int i = 0; i < 32; ++i) { move_pos[0] = GET_VAL_BOARD(i, board); if (IS_PIECE(move_pos[0])) { if (IS_WHITE(move_pos[0])) board[0] |= 2048; if (IS_BLACK(move_pos[0])) board[0] |= 128; } } } //////////////////////////////////////////////////////////////////////////////// - main int main(int argc, char** argv) { unsigned int board[4]; unsigned short menu_choice = 0; bool player_chosen = false; void (*white_player)(unsigned int*, unsigned int*); void (*black_player)(unsigned int*, unsigned int*); std::cout << BG_WHITE_FG_BLACK << BG_BLACK_FG_WHITE; system("cls"); //testing_function(); while (menu_choice != 2) { player_chosen = false; std::cout << BG_BBLUE_FG_BLACK << "!!! Monte-Carlo Tree Search Checkers !!!" << BG_BLACK_FG_WHITE << std::endl << std::endl; std::cout << "1. Start Game - Black Always Begins" << std::endl; std::cout << "2. Exit" << std::endl; std::cout << "Choice: "; std::cin >> menu_choice; switch (menu_choice) { case 1: while (!player_chosen) { system("cls"); std::cout << "1. Human Player" << std::endl; std::cout << "2. MCTS_CPU Player" << std::endl; std::cout << "3. MCTS_GPU Player" << std::endl; std::cout << BG_WHITE_FG_BLACK << "White" << BG_BLACK_FG_WHITE << " Player Choice: "; std::cin >> menu_choice; std::cout << std::endl; switch (menu_choice) { case 1: white_player = &human_player; player_chosen = true; break; case 2: white_player = &MCTS_CPU_player; player_chosen = true; break; case 3: white_player = &MCTS_GPU_player; player_chosen = true; break; default: system("cls"); std::cout << "Please provide a valid choice!" << std::endl << std::endl; } } player_chosen = false; while (!player_chosen) { system("cls"); std::cout << "1. Human Player" << std::endl; std::cout << "2. MCTS_CPU Player" << std::endl; std::cout << "3. MCTS_GPU Player" << std::endl; std::cout << "Black Player Choice: "; std::cin >> menu_choice; std::cout << std::endl; switch (menu_choice) { case 1: black_player = &human_player; player_chosen = true; break; case 2: black_player = &MCTS_CPU_player; player_chosen = true; break; case 3: black_player = &MCTS_GPU_player; player_chosen = true; break; default: system("cls"); std::cout << "Please provide a valid choice!" << std::endl << std::endl; } } menu_choice = 1; std::cin.ignore(); init_board(board); game_loop(board, white_player, black_player); disp_end_state(board); system("pause"); system("cls"); break; case 2: break; default: system("cls"); std::cout << "Please provide a valid choice!" << std::endl << std::endl; break; } } exit(EXIT_SUCCESS); } //////////////////////////////////////////////////////////////////////////////// - for debugging void testing_function() { unsigned int board[4]; unsigned int move_possibility[3]{}; //init_board(board); //draw_board(board); //test_get_move_possibility(board, move_possibility); //FLIP_TURN_FLAG(board); //test_get_move_possibility(board, move_possibility); //std::cout << std::endl; //std::cout << std::endl; ////test_get_idx_funs(board); ////std::cout << std::endl; //test_translate_cords_to_idx(); //test_translate_idx_to_cords(); //std::cout << std::endl; ////test_get_move_possibility_init_loop(board); ////std::cout << std::endl; ////test_get_piece_move_pos(board, move_possibility, 9, 6); init_board(board); board[0] = 1074020352; board[1] = 1178861808; board[2] = 102; board[3] = 419424; board[0] = 6569984; board[1] = 0; board[2] = 0; board[3] = 0; FLIP_TURN_FLAG(board); system("cls"); draw_board(board); std::cout << std::endl << (GET_TURN_FLAG(board) ? BG_WHITE_FG_BLACK : BG_BLACK_FG_WHITE) << (GET_TURN_FLAG(board) ? "White" : "Black") << "'s turn!" << BG_BLACK_FG_WHITE << std::endl << std::endl; get_move_possibility(board, move_possibility); disp_moveable_pieces(board, move_possibility); unsigned int idx = 5; disp_possible_dirs(board, move_possibility, idx); random_player(board, move_possibility); //move_piece(board, idx, &get_left_upper_idx); //move_piece(board, idx, &get_right_upper_idx); draw_board(board); //MCTS_GPU_player(board, move_possibility); //draw_board(board); //std::cout << std::endl << (GET_TURN_FLAG(board) ? BG_WHITE_FG_BLACK : BG_BLACK_FG_WHITE) << (GET_TURN_FLAG(board) ? "White" : "Black") << "'s turn!" << BG_BLACK_FG_WHITE << std::endl << std::endl; //game_loop(board, MCTS_GPU_player, MCTS_GPU_player); //disp_end_state(board); system("pause"); //unsigned int game_count = 1000000; //std::chrono::steady_clock::time_point start, finish; //std::chrono::duration<double> elapsed; // //start = std::chrono::high_resolution_clock::now(); //for (unsigned int i = 0; i < game_count; ++i) //{ // init_board(board); // game_loop(board, &random_player, &random_player); // get_end_state(board); // //disp_end_state(board); //} //finish = std::chrono::high_resolution_clock::now(); //elapsed = (finish - start); //std::cout << "Games played: " << game_count << std::endl; //std::cout << "Elapsed time: " << elapsed.count() << std::endl; //std::cout << "Average time: " << elapsed.count() / game_count << std::endl; exit(EXIT_SUCCESS); } void test_get_idx_funs(unsigned int board[4]) { //test top unsigned int tmp = 0; std::cout << (32 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (32 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (4 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (5 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; tmp = 1; std::cout << std::endl << (32 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (32 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (5 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (6 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; tmp = 3; std::cout << std::endl << (32 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (32 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (7 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (32 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; // test even tmp = 4; std::cout << std::endl << (32 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (0 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (32 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (8 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; tmp = 5; std::cout << std::endl << (0 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (1 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (8 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (9 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; tmp = 7; std::cout << std::endl << (2 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (3 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (10 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (11 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; //test odd tmp = 8; std::cout << std::endl << (4 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (5 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (12 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (13 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; tmp = 9; std::cout << std::endl << (5 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (6 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (13 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (14 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; tmp = 11; std::cout << std::endl << (7 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (32 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (15 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (32 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; //test bottom tmp = 28; std::cout << std::endl << (32 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (24 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (32 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (32 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; tmp = 29; std::cout << std::endl << (24 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (25 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (32 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (32 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; tmp = 31; std::cout << std::endl << (26 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (27 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (32 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (32 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; } void test_get_move_possibility(unsigned int board[4], unsigned int move_pos[4]) { get_move_possibility(board, move_pos); std::cout << std::endl << "Possible moves " << (GET_TURN_FLAG(board) ? "for white: " : "for black: ") << (GET_NUM_OF_MOVES(move_pos)) << std::endl; std::cout << "Indices of pawns possible to move: "; for (unsigned int i = 0; i < GET_NUM_OF_MOVES(move_pos); ++i) { std::cout << (GET_VAL_MOVE_POS(i, move_pos)) << ' '; } std::cout << std::endl; } void test_get_move_possibility_board_init(unsigned int board[4], unsigned int test_choice) { init_board(board); switch (test_choice) { case 0: // black bottom - outdated board[0] = 1717986918; //1st 2nd rows board[1] = 26214; //3rd 4th rows board[2] = 1145307136; //5th 6th rows board[3] = 1145324612; //7th 8th rows break; case 1: // test 1 - white forward beating // expected - white = 2 moves, idx : 22 23 // expected - black = 4 moves, idx : 8 9 10 11 board[2] = 1717986304; //5th 6th rows break; case 2: // test 2 - white no backward beating, black forward beating // expected - white = 2 moves, idx: 19 23 // expected - black = 2 moves, idx: 5 18 board[1] = 1078198368; board[2] = 1717986304; board[3] = 1717986822; break; case 3: // test 3 - black no backward beating // expected - white = 5 moves, idx: 9 20 21 22 23 // expected - black = 1 move, idx: 5 board[0] = 1078215748; board[1] = 1078198368; break; case 4: // test 4 // expected - white = 5 moves, idx: 9 20 21 22 23 // expected - black = 8 moves, idx: 0 1 4 6 7 12 13 15 board[0] = 1141130308; board[1] = 1078198368; break; case 5: // test 5 - black King backward beating // expected - white = 5 moves, idx: 9 20 21 22 23 // expected - black = 1 move, idx: 5 13 board[0] = 1078215748; board[1] = 1079246944; break; case 6: // test 6 - white King backward beating // expected - white = 1 move, idx: 9 // expected - black = 8 moves, idx: 0 1 4 6 7 12 13 15 board[0] = 1141130308; board[1] = 1078198384; break; case 7: // test 7 - promotion switch turn board[0] = 1073759296; board[1] = 17412; board[2] = 1617168128; board[3] = 1711695462; default: break; } } void test_get_move_possibility_init_loop(unsigned int board[4], int test_choice_lower_bound, int test_choice_upper_bound) { for (int i = test_choice_lower_bound; i < test_choice_upper_bound; ++i) { system("pause"); test_get_move_possibility_board_init(board, i); system("cls"); draw_board(board); std::cout << "Running test " << i << std::endl; unsigned int move_possibility[3]{}; test_get_move_possibility(board, move_possibility); FLIP_TURN_FLAG(board); test_get_move_possibility(board, move_possibility); FLIP_TURN_FLAG(board); std::cout << std::endl; std::cout << std::endl; test_translate_cords_to_idx(); std::cout << std::endl; } } void test_get_piece_move_pos(unsigned int board[4], unsigned int move_pos[4], unsigned int idx) { char cords[2]; translate_idx_to_cords(idx, cords); system("cls"); draw_board(board); test_translate_cords_to_idx(); test_translate_idx_to_cords(); std::cout << std::endl; test_get_move_possibility(board, move_pos); FLIP_TURN_FLAG(board); test_get_move_possibility(board, move_pos); FLIP_TURN_FLAG(board); std::cout << std::endl; get_piece_move_pos(board, move_pos, idx); if (GET_NUM_OF_MOVES(move_pos)) { std::cout << "Moves possible for piece on " << cords[0] << cords[1] << " - " << (GET_NUM_OF_MOVES(move_pos)) << std::endl; if (GET_BEATING_POS_FLAG(move_pos)) std::cout << "BEATING POSSIBLE!" << std::endl; std::cout << "List of tiles to choose from: "; unsigned int (*get_dir_idx_ptr)(unsigned int&); for (unsigned int dir = 0; dir < 4; ++dir) { switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: break; } translate_idx_to_cords(get_dir_idx_ptr(idx), cords); if (GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_BEATING_FLAG(dir, move_pos)) std::cout << cords[0] << cords[1] << ' '; else if (!GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_NONBEATING_FLAG(dir, move_pos)) std::cout << cords[0] << cords[1] << ' '; } std::cout << std::endl; } else std::cout << "Movement not possible for piece on " << cords[0] << cords[1] << std::endl; } void test_translate_cords_to_idx() { char cords[2] = { 'A', '1' }; for (char c2 = '1'; c2 < '9'; ++c2) { cords[1] = c2; for (char c1 = 'A'; c1 < 'I'; ++c1) { cords[0] = c1; unsigned int idx = translate_cords_to_idx(cords); std::cout << cords[0] << cords[1] << ": " << (32 == idx ? "--" : std::to_string(idx)) << '\t'; } std::cout << std::endl; } std::cout << std::endl; } void test_translate_idx_to_cords() { char cords[2] = { '-', '-' }; std::cout << '\t'; for (unsigned int idx = 0; idx < 32; ++idx) { translate_idx_to_cords(idx, cords); std::cout << (idx > 9 ? '\0' : ' ') << idx << ": " << cords[0] << cords[1] << "\t\t"; if ((idx & 3) == 3) std::cout << std::endl; if ((idx & 7) == 7) std::cout << '\t'; } std::cout << std::endl; } //void bench(unsigned int board[4]) //{ // std::chrono::steady_clock::time_point start, finish, start2, finish2; // std::chrono::duration<double> elapsed, elapsed2; // // start = std::chrono::high_resolution_clock::now(); // for (unsigned int i = 0; i < 1000000; ++i) // { // for (unsigned int idx = 0; idx < 32; ++idx) // { // // old - GET_VAL_BOARD_S(idx, board); // int tmp = GET_VAL_BOARD_S(idx, board) & 3; // //int tmp = GET_VAL_BOARD_S(idx, board) << 2; // //int tmp = GET_VAL_BOARD_S(idx, board) >> 2; // //int tmp = GET_VAL_BOARD_S(idx, board); // //int tmp = 16 | 123; // } // } // finish = std::chrono::high_resolution_clock::now(); // elapsed = (finish - start) / 1000000; // // start2 = std::chrono::high_resolution_clock::now(); // for (unsigned int i = 0; i < 1000000; ++i) // { // for (unsigned int idx = 0; idx < 32; ++idx) // { // // old - GET_VAL_BOARD_S2(idx, board); // int tmp = GET_VAL_BOARD_S(idx, board) % 4; // //int tmp = GET_VAL_BOARD_S(idx, board) * 4; // //int tmp = GET_VAL_BOARD_S(idx, board) / 4; // //int tmp = GET_VAL_BOARD_S(idx, board) / 4; // //int tmp = 16 ^ 123; // } // } // finish2 = std::chrono::high_resolution_clock::now(); // elapsed2 = (finish2 - start2) / 1000000; // // //old - std::cout << "Average time for GET_VAL_BOARD_S: " << elapsed.count() << std::endl; // //old - std::cout << "Average time for GET_VAL_BOARD_S2: " << elapsed2.count() << std::endl << std::endl; // std::cout << "Average time for & 3:\t" << elapsed.count() << std::endl; // std::cout << "Average time for % 4:\t" << elapsed2.count() << std::endl << std::endl; // //std::cout << "Average time for << 2:\t" << elapsed.count() << std::endl; // //std::cout << "Average time for * 4:\t" << elapsed2.count() << std::endl << std::endl; // //std::cout << "Average time for >> 2:\t" << elapsed.count() << std::endl; // //std::cout << "Average time for / 4:\t" << elapsed2.count() << std::endl << std::endl; // //std::cout << "Average time for get:\t" << elapsed.count() << std::endl; // //std::cout << "Average time for get/4:\t" << elapsed2.count() << std::endl << std::endl; // //std::cout << "Average time for | :\t" << elapsed.count() << std::endl; // //std::cout << "Average time for ^ :\t" << elapsed2.count() << std::endl << std::endl; //}
00e36b5e79ce26f9aba8ca9cf83073d6bd5db3f0.cu
// includes, system #include <stdio.h> #include <iostream> #include <iomanip> #include <string> #include <random> #include <chrono> #include <algorithm> // includes, cuda #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <curand_kernel.h> // includes, thrust #include <thrust/host_vector.h> #include <thrust/device_vector.h> //////////////////////////////////////////////////////////////////////////////// #define BG_BBLUE_FG_BLACK "\033[104;30m" #define BG_BLUE_FG_BLACK "\033[44;30m" #define BG_BLUE_FG_WHITE "\033[44;37m" #define BG_BLACK_FG_WHITE "\033[0m" #define BG_WHITE_FG_BLACK "\033[30;107m" // 0 - 0000 = empty // 4 - 0100 = black man // 5 - 0101 = black king // 6 - 0110 = white man // 7 - 0111 = white king // // 8 - 1000 in (tile_idx = 0) is used to save turn flag (1 - white, 0 - black) // // 8 tiles saved in one unsigned int with encoding as above // example: 0100 0100 0100 0100 0000 0000 0000 0000 // board indexing: 7 6 5 4 3 2 1 0 //#define DEBUG; #define MEASURE_TIME #define THREADS_PER_BLOCK 1024 #define BLOCKS_PER_SEQUENCE_X 1024 #define BLOCKS_PER_SEQUENCE_Y 1 #define BLOCKS_PER_SEQUENCE_Z 1 //////////////////////////////////////////////////////////////////////////////// - board state macros #define SET_VAL_BOARD(idx, val, board) board[idx >> 3] ^= (board[idx >> 3] ^ val << ((idx & 7) << 2)) & (15 << ((idx & 7) << 2)) #define GET_VAL_BOARD(idx, board) board[idx >> 3] << 28 - ((idx & 7) << 2) >> 28 #define GET_VAL_BOARD_S(idx, board) idx > 31 ? 8 : board[idx >> 3] << 28 - ((idx & 7) << 2) >> 28 //#define IS_EMPTY(tile) (bool)(!tile) -> IS_PIECE instead - ALWAYS #define IS_PIECE(tile) (bool)(tile & 4) #define IS_WHITE(tile) (bool)(tile & 2) #define IS_BLACK(tile) (bool)(~tile & 2) #define IS_KING(tile) (bool)(tile & 1) #define FLIP_TURN_FLAG(board) board[0] ^= 8 #define GET_TURN_FLAG(board) (bool)(board[0] & 8) //////////////////////////////////////////////////////////////////////////////// - move_pos array macros #define GET_BEATING_POS_FLAG(move_pos) (bool)(move_pos[3] & 1) #define SET_BEATING_POS_FLAG(move_pos) move_pos[3] |= 1 #define GET_MOVE_CHECK_GUARD(move_pos) (bool)(move_pos[3] & 2) #define SET_MOVE_CHECK_GUARD(move_pos) move_pos[3] |= 2 #define CLEAR_MOVE_CHECK_GUARD(move_pos) move_pos[3] &= ~2 #define GET_NUM_OF_MOVES(move_pos) move_pos[3] >> 2 #define SET_NUM_OF_MOVES(move_pos, num_of_moves) move_pos[3] |= num_of_moves << 2 #define GET_VAL_MOVE_POS(idx, move_pos) move_pos[idx >> 2] << 24 - ((idx & 3) << 3) >> 24 #define SET_VAL_MOVE_POS(idx, val, move_pos) move_pos[idx >> 2] |= val << ((idx & 3) << 3) #define GET_PIECE_NONBEATING_FLAG(dir, move_pos) (bool)((move_pos[2] << 30 - (dir << 1) >> 30) & 1) #define SET_PIECE_NONBEATING_FLAG(dir, move_pos) move_pos[2] |= 1 << (dir << 1) #define GET_PIECE_BEATING_FLAG(dir, move_pos) (bool)((move_pos[2] << 30 - (dir << 1) >> 30) & 2) #define SET_PIECE_BEATING_FLAG(dir, move_pos) move_pos[2] |= 2 << (dir << 1) //------------------------------------------------------------------------------------------------------------------- void init_board(unsigned int board[4]); void draw_board(unsigned int board[4]); //////////////////////////////////////////////////////////////////////////////// - get tile idx in specific direction from current __host__ __device__ unsigned int get_left_upper_idx(unsigned int& cur_tile_idx); __host__ __device__ unsigned int get_right_upper_idx(unsigned int& cur_tile_idx); __host__ __device__ unsigned int get_left_lower_idx(unsigned int& cur_tile_idx); __host__ __device__ unsigned int get_right_lower_idx(unsigned int& cur_tile_idx); //////////////////////////////////////////////////////////////////////////////// - piece movement __host__ __device__ void get_move_possibility_loop_fun(unsigned int board[4], unsigned int move_pos[4], unsigned int& cur_idx, unsigned int& moves_idx); __host__ __device__ void get_move_possibility(unsigned int board[4], unsigned int move_pos[4]); __host__ __device__ void get_piece_move_pos(unsigned int board[4], unsigned int move_pos[4], unsigned int& idx); __host__ __device__ void move_piece(unsigned int board[4], unsigned int& cur_tile_idx, unsigned int (*get_dir_idx_ptr)(unsigned int&)); //////////////////////////////////////////////////////////////////////////////// - game loop and players void game_loop(unsigned int board[4], void (*white_player)(unsigned int*, unsigned int*), void (*black_player)(unsigned int*, unsigned int*)); void human_player(unsigned int board[4], unsigned int move_pos[4]); void random_player(unsigned int board[4], unsigned int move_pos[4]); //////////////////////////////////////////////////////////////////////////////// - MCTS unsigned int simulate_game_CPU(unsigned int board[4]); unsigned int count_beating_sequences_for_piece_dir(unsigned int board[4], unsigned int cur_tile_idx, unsigned int dir); void MCTS_CPU_player(unsigned int board[4], unsigned int move_pos[4]); void MCTS_GPU_player(unsigned int board[4], unsigned int move_pos[4]); __global__ void MCTS_kernel(const unsigned int* d_first_layer, curandState* states, float* d_results, const unsigned int possible_sequences); __global__ void setup_kernel(curandState* states); __device__ float simulate_game_GPU(unsigned int board[4], curandState* states, const unsigned int possible_sequences); __device__ void random_player_GPU(unsigned int board[4], unsigned int move_pos[4], curandState* state); //////////////////////////////////////////////////////////////////////////////// - user interaction void disp_moveable_pieces(unsigned int board[4], unsigned int move_pos[4]); void disp_possible_dirs(unsigned int board[4], unsigned int move_pos[4], unsigned int& idx); void get_cords_from_console(char cords[2]); unsigned int translate_cords_to_idx(const char cords[2]); void translate_idx_to_cords(unsigned int idx, char cords[2]); void disp_end_state(unsigned int* board); //////////////////////////////////////////////////////////////////////////////// - game conclusion __host__ __device__ void get_end_state(unsigned int board[4]); //////////////////////////////////////////////////////////////////////////////// - for debugging void testing_function(); void test_get_idx_funs(unsigned int board[4]); void test_get_move_possibility(unsigned int board[4], unsigned int move_pos[4]); void test_get_move_possibility_board_init(unsigned int board[4], unsigned int test_choice); void test_get_move_possibility_init_loop(unsigned int board[4], int test_choice_lower_bound = 1, int test_choice_upper_bound = 7); void test_get_piece_move_pos(unsigned int board[4], unsigned int move_pos[4], unsigned int idx); void test_translate_cords_to_idx(); void test_translate_idx_to_cords(); //void bench(unsigned int board[4]); //------------------------------------------------------------------------------------------------------------------- void init_board(unsigned int board[4]) { // white bottom board[0] = 1145324612; //1st 2nd rows board[1] = 17476; //3rd 4th rows board[2] = 1717960704; //5th 6th rows board[3] = 1717986918; //7th 8th rows } void draw_board(unsigned int board[4]) { unsigned int left_side_idx = 1; // left_side_idx - labels counter bool white_first = true; // flag for alternating colors std::cout << BG_BBLUE_FG_BLACK << " "; for (char c = 'A'; c != 'I'; ++c) // print labels std::cout << ' ' << c << ' '; std::cout << BG_BLACK_FG_WHITE << std::endl; for (unsigned int i = 0; i < 4; ++i) // i = board_idx { for (unsigned int j = 0; j < 8; ++j) // j = tile_in_board_idx { unsigned int tile = board[i] << (28 - (j << 2)) >> 28; if (j == 0 || j == 4) std::cout << BG_BBLUE_FG_BLACK << ' ' << left_side_idx++ << ' '; // print label if (white_first) std::cout << BG_BBLUE_FG_BLACK << " "; if (IS_PIECE(tile)) { if (IS_WHITE(tile)) std::cout << BG_BLUE_FG_WHITE; else std::cout << BG_BLUE_FG_BLACK; if (IS_KING(tile)) std::cout << " K "; else std::cout << " @ "; } else std::cout << BG_BLUE_FG_BLACK << " "; if (!white_first) std::cout << BG_BBLUE_FG_BLACK << " "; if ((j & 3) == 3) // swap colors for second row { std::cout << BG_BLACK_FG_WHITE << std::endl; white_first = !white_first; } } } } //////////////////////////////////////////////////////////////////////////////// - get tile idx in specific direction from current (32 - cur_tile_idx out of bound) __host__ __device__ unsigned int get_left_upper_idx(unsigned int& cur_tile_idx) { if (cur_tile_idx > 31 || !(cur_tile_idx >> 2)) return 32; // second condition checks if is top row if (cur_tile_idx & 4) // even row (counting from 1) { if (cur_tile_idx & 3) // if not left-most return cur_tile_idx - 5; return 32; } else // odd row { return cur_tile_idx - 4; } } __host__ __device__ unsigned int get_right_upper_idx(unsigned int& cur_tile_idx) { if (cur_tile_idx > 31 || !(cur_tile_idx >> 2)) return 32; // second condition checks if is top row if (cur_tile_idx & 4) // even row (counting from 1) { return cur_tile_idx - 4; } else // odd row { if (~cur_tile_idx & 3) // if not right-most return cur_tile_idx - 3; return 32; } } __host__ __device__ unsigned int get_left_lower_idx(unsigned int& cur_tile_idx) { if (cur_tile_idx > 31 || (cur_tile_idx >> 2) == 7) return 32; // second condition checks if is bottom row if (cur_tile_idx & 4) // even row (counting from 1) { if (cur_tile_idx & 3) // if not left-most return cur_tile_idx + 3; return 32; } else // odd row { return cur_tile_idx + 4; } } __host__ __device__ unsigned int get_right_lower_idx(unsigned int& cur_tile_idx) { if (cur_tile_idx > 31 || (cur_tile_idx >> 2) == 7) return 32; // second condition checks if is bottom row if (cur_tile_idx & 4) // even row (counting from 1) { return cur_tile_idx + 4; } else // odd row { if (~cur_tile_idx & 3) // if not right-most return cur_tile_idx + 5; return 32; } } //////////////////////////////////////////////////////////////////////////////// - piece movement __host__ __device__ void get_move_possibility_loop_fun(unsigned int board[4], unsigned int move_pos[4], unsigned int& cur_idx, unsigned int& moves_idx) { unsigned int tile, tmp_idx, result; tile = GET_VAL_BOARD(cur_idx, board); // check if cur_idx tile holds a piece and if it belongs to the currently moving player if (IS_PIECE(tile) && (GET_TURN_FLAG(board) == IS_WHITE(tile))) { unsigned int (*get_dir_idx_ptr)(unsigned int&); for (unsigned int direction = 0; direction < 4; ++direction) { if (GET_TURN_FLAG(board) == (bool)(direction & 2) && !IS_KING(tile)) // do not check backwards movement continue; switch (direction) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: return; } tmp_idx = get_dir_idx_ptr(cur_idx); if (tmp_idx == 32) continue; // check next 'direction' if out of bound result = GET_VAL_BOARD(tmp_idx, board); if (IS_PIECE(result) && GET_TURN_FLAG(board) != IS_WHITE(result)) // proceed only if the piece in 'direction' belongs to the opponent { tmp_idx = get_dir_idx_ptr(tmp_idx); if (tmp_idx == 32) continue; result = GET_VAL_BOARD(tmp_idx, board); if (!IS_PIECE(result)) // check if tile behind opponents's piece is empty { if (!GET_BEATING_POS_FLAG(move_pos)) // set beating flag if no beating move was found previously, clear non-beating moves and save new idx { moves_idx = 0; move_pos[0] = move_pos[1] = move_pos[2] = move_pos[3] = 0; SET_BEATING_POS_FLAG(move_pos); } SET_VAL_MOVE_POS(moves_idx, cur_idx, move_pos); ++moves_idx; CLEAR_MOVE_CHECK_GUARD(move_pos); // clear for next iteration return; } } // check if tile in 'direction' is empty, skip if beating possibility is already saved in array // or a non-beating move was previously found for cur_idx tile else if (!IS_PIECE(result) && !GET_BEATING_POS_FLAG(move_pos) && !GET_MOVE_CHECK_GUARD(move_pos)) { SET_VAL_MOVE_POS(moves_idx, cur_idx, move_pos); ++moves_idx; SET_MOVE_CHECK_GUARD(move_pos); // set flag to check only possibility of beating in next iterations continue; } } CLEAR_MOVE_CHECK_GUARD(move_pos); // clear for next iteration } } // Index of tile that can be moved is stored similarly as board representation, but in 8 bits instead of 4 bits // Additionally move_pos[3] is used for flags and saving number of indexes in the whole array (0 <= n <= 12) // Flags include - availability of beating for returned indexes, other flag for loop_fun purpose only __host__ __device__ void get_move_possibility(unsigned int board[4], unsigned int move_pos[4]) { unsigned int moves_idx = 0; move_pos[0] = move_pos[1] = move_pos[2] = move_pos[3] = 0; for (unsigned int i = 0; i < 32; ++i) get_move_possibility_loop_fun(board, move_pos, i, moves_idx); SET_NUM_OF_MOVES(move_pos, moves_idx); // record number of possible moves } // flags in 2 bit pairs: 01 - non-beating move, 10 - beating move, move_pos[2] is used for storing all pairs, // the same spots in move_pos[3] as in get_move_possibility are used for beating available flag and number of indexes saved (0 <= n <= 3) // 0 - left upper, 1 - right upper, 2 - left lower, 3 - right lower __host__ __device__ void get_piece_move_pos(unsigned int board[4], unsigned int move_pos[4], unsigned int& idx) { unsigned int tile, tmp_idx, result, move_counter = 0; move_pos[2] = move_pos[3] = 0; // [0],[1] - not used tile = GET_VAL_BOARD_S(idx, board); if (IS_PIECE(tile)) { unsigned int (*get_dir_idx_ptr)(unsigned int&); for (unsigned int direction = 0; direction < 4; ++direction) { if (IS_WHITE(tile) == (bool)(direction & 2) && !IS_KING(tile)) // do not check backwards movement continue; switch (direction) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: return; } tmp_idx = get_dir_idx_ptr(idx); if (tmp_idx == 32) continue; // check next 'direction' if out of bound result = GET_VAL_BOARD(tmp_idx, board); if (IS_PIECE(result) && IS_WHITE(tile) != IS_WHITE(result)) // proceed only if the piece in 'direction' belongs to the opponent { tmp_idx = get_dir_idx_ptr(tmp_idx); if (tmp_idx == 32) continue; result = GET_VAL_BOARD(tmp_idx, board); if (!IS_PIECE(result)) // check if tile behind opponents's piece is empty { if (!GET_BEATING_POS_FLAG(move_pos)) { // set general beating flag if no beating move was found previously, clearing move_pos[2] not necessary move_counter = 0; SET_BEATING_POS_FLAG(move_pos); } SET_PIECE_BEATING_FLAG(direction, move_pos); // set direction beating flag ++move_counter; } } else if (!IS_PIECE(result) && !GET_BEATING_POS_FLAG(move_pos)) { SET_PIECE_NONBEATING_FLAG(direction, move_pos); // set empty tile in direction flag ++move_counter; } } } SET_NUM_OF_MOVES(move_pos, move_counter); } // move piece in the direction specified by get_dir_idx_ptr function pointer, reaching last row promotes Man to King // !!! - no game logic is checked in this function - correct moves are guaranteed by get_move_possibility and get_piece_move_pos __host__ __device__ void move_piece(unsigned int board[4], unsigned int& cur_tile_idx, unsigned int (*get_dir_idx_ptr)(unsigned int&)) { if (cur_tile_idx > 31) return; // safety guard unsigned int other_tile_idx = get_dir_idx_ptr(cur_tile_idx); if (other_tile_idx == 32) return; // do not move out of bounds unsigned int cur_tile = GET_VAL_BOARD(cur_tile_idx, board); if (!IS_PIECE(GET_VAL_BOARD(other_tile_idx, board))) // empty tile - move by one in 'direction', nonbeating { SET_VAL_BOARD(other_tile_idx, cur_tile, board); SET_VAL_BOARD(cur_tile_idx, 0, board); } else // not empty tile - move by two in 'direction', beating { if (get_dir_idx_ptr(other_tile_idx) == 32) return; // do not move out of bounds SET_VAL_BOARD(other_tile_idx, 0, board); SET_VAL_BOARD(cur_tile_idx, 0, board); other_tile_idx = get_dir_idx_ptr(other_tile_idx); SET_VAL_BOARD(other_tile_idx, cur_tile, board); } // if reached tile is last row - promote to king if ((!IS_KING(cur_tile)) && ((IS_WHITE(cur_tile) && other_tile_idx < 4) || (IS_BLACK(cur_tile) && other_tile_idx > 27))) SET_VAL_BOARD(other_tile_idx, (cur_tile | 1), board); // promote to king } //////////////////////////////////////////////////////////////////////////////// - game loop and players void game_loop(unsigned int board[4], void (*white_player)(unsigned int*, unsigned int*), void (*black_player)(unsigned int*, unsigned int*)) { unsigned int move_pos[4]; get_move_possibility(board, move_pos); while (0 != (GET_NUM_OF_MOVES(move_pos))) // end game if noone can move { system("cls"); draw_board(board); std::cout << std::endl << (GET_TURN_FLAG(board) ? BG_WHITE_FG_BLACK : BG_BLACK_FG_WHITE) << (GET_TURN_FLAG(board) ? "White" : "Black") << "'s turn!" << BG_BLACK_FG_WHITE << std::endl << std::endl; if (GET_TURN_FLAG(board)) white_player(board, move_pos); else black_player(board, move_pos); get_move_possibility(board, move_pos); } } void human_player(unsigned int board[4], unsigned int move_pos[4]) { unsigned int choosen_idx_tile, choosen_idx_dir, dir; char cords[2]; bool board_beating_flag, beating_sequence_in_progress = false, was_king_before_move; // lambdas are for updating displayed information auto redraw_beginning = [board]() { system("cls"); draw_board(board); std::cout << std::endl << (GET_TURN_FLAG(board) ? BG_WHITE_FG_BLACK : BG_BLACK_FG_WHITE) << (GET_TURN_FLAG(board) ? "White" : "Black") << "'s turn!" << BG_BLACK_FG_WHITE << std::endl << std::endl; }; auto redraw_first_stage = [board, move_pos, redraw_beginning]() { redraw_beginning(); get_move_possibility(board, move_pos); disp_moveable_pieces(board, move_pos); std::cout << std::endl; }; auto redraw_second_stage = [board, move_pos, &choosen_idx_tile, redraw_beginning]() { redraw_beginning(); get_piece_move_pos(board, move_pos, choosen_idx_tile); disp_possible_dirs(board, move_pos, choosen_idx_tile); std::cout << std::endl; }; human_player_reset: while (true) // piece choice loop { redraw_first_stage(); get_cords_from_console(cords); choosen_idx_tile = translate_cords_to_idx(cords); // choose tile with piece to be moved board_beating_flag = GET_BEATING_POS_FLAG(move_pos); get_piece_move_pos(board, move_pos, choosen_idx_tile); if (0 == (GET_NUM_OF_MOVES(move_pos))) { std::cout << std::endl << "This piece cannot move!" << std::endl << "Please choose a different piece!" << std::endl << std::endl; system("pause"); continue; } else if (board_beating_flag != GET_BEATING_POS_FLAG(move_pos)) // force beating { std::cout << std::endl << "BEATING POSSIBLE!" << std::endl << "Please choose a different piece!" << std::endl << std::endl; system("pause"); continue; } break; } while (true) // move sequence loop { redraw_second_stage(); get_cords_from_console(cords); choosen_idx_dir = translate_cords_to_idx(cords); // choose tile in the dir to move (in distance 1 (diagonally) from idx_tile) unsigned int (*get_dir_idx_ptr)(unsigned int&); for (dir = 0; dir < 4; ++dir) { if (dir < 2 && choosen_idx_dir > choosen_idx_tile) // idx_dir > idx_tile only if the chosen tile is in down 'dir', so skip first two (upper) 'dir' continue; switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: system("cls"); std::cout << "ERROR - human_player"; system("pause"); exit(EXIT_FAILURE); } if (choosen_idx_dir != get_dir_idx_ptr(choosen_idx_tile)) // skip dir if idx_dir is not in distance 1 continue; if (GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_BEATING_FLAG(dir, move_pos)) // move is beating { was_king_before_move = IS_KING((GET_VAL_BOARD(choosen_idx_tile, board))); move_piece(board, choosen_idx_tile, get_dir_idx_ptr); choosen_idx_tile = get_dir_idx_ptr(choosen_idx_dir); if (was_king_before_move != (IS_KING((GET_VAL_BOARD(choosen_idx_tile, board))))) // stop beating sequence and end turn if promotion to king happens after a move { FLIP_TURN_FLAG(board); return; } break; } else if (!GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_NONBEATING_FLAG(dir, move_pos)) // move is nonbeating { move_piece(board, choosen_idx_tile, get_dir_idx_ptr); FLIP_TURN_FLAG(board); return; } std::cout << std::endl << "Impossible move!" << std::endl << "Please choose a different move!" << std::endl << std::endl; system("pause"); if (!beating_sequence_in_progress) // reset to piece choice - if invalid first move was choosen goto human_player_reset; } if (dir == 4) // this is visited only if idx_dir was not in distance 1 from idx_tile { std::cout << std::endl << "Impossible move!" << std::endl << "Please choose a different move!" << std::endl << std::endl; system("pause"); if (!beating_sequence_in_progress) // reset to piece choice - if invalid first move was choosen goto human_player_reset; else continue; } get_piece_move_pos(board, move_pos, choosen_idx_tile); if (!GET_BEATING_POS_FLAG(move_pos)) break; // end turn if no more beating possible in current sequence beating_sequence_in_progress = true; } FLIP_TURN_FLAG(board); } void random_player(unsigned int board[4], unsigned int move_pos[4]) { std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> dist(0, 0); unsigned int choosen_idx_tile, choosen_idx_dir, dir = 0, dir_idx_upper_bound, dir_idx_counter = 0; bool beating_sequence_in_progress = false, was_king_before_move; unsigned int (*get_dir_idx_ptr)(unsigned int&); // choose tile with piece to be moved get_move_possibility(board, move_pos); dist = std::uniform_int_distribution<>(0, ((GET_NUM_OF_MOVES(move_pos)) - 1)); choosen_idx_tile = dist(gen); choosen_idx_tile = GET_VAL_MOVE_POS(choosen_idx_tile, move_pos); do { // choose tile in the dir to move (in distance 1 (diagonally) from idx_tile) // the rng dir choice is done on the interval [0;n-1] where n is the number of dirs with valid move choices get_piece_move_pos(board, move_pos, choosen_idx_tile); dir_idx_upper_bound = (GET_NUM_OF_MOVES(move_pos)) - 1; // this is guaranteed o be >= 0 if the game is in progress dist = std::uniform_int_distribution<>(0, dir_idx_upper_bound); choosen_idx_dir = dist(gen); // dir_idx_counter is only incremented if a possible move in 'dir' is encountered but is not the chosen one for (dir = 0, dir_idx_counter = 0; dir_idx_counter <= dir_idx_upper_bound && dir < 4; ++dir) { switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: system("cls"); std::cout << "ERROR - random_player"; system("pause"); exit(EXIT_FAILURE); } if (dir_idx_counter == choosen_idx_dir); // proceed to make a move after dir is a correct idx else if ((GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_BEATING_FLAG(dir, move_pos)) || (!GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_NONBEATING_FLAG(dir, move_pos))) { ++dir_idx_counter; continue; } else continue; if (GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_BEATING_FLAG(dir, move_pos)) // move is beating { was_king_before_move = IS_KING((GET_VAL_BOARD(choosen_idx_tile, board))); choosen_idx_dir = get_dir_idx_ptr(choosen_idx_tile); move_piece(board, choosen_idx_tile, get_dir_idx_ptr); choosen_idx_tile = get_dir_idx_ptr(choosen_idx_dir); if (was_king_before_move != (IS_KING((GET_VAL_BOARD(choosen_idx_tile, board))))) // stop beating sequence and end turn if promotion to king happens after a move { FLIP_TURN_FLAG(board); return; } break; } else if (!GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_NONBEATING_FLAG(dir, move_pos)) // move is nonbeating { move_piece(board, choosen_idx_tile, get_dir_idx_ptr); FLIP_TURN_FLAG(board); return; } } if (dir == 4) { system("cls"); std::cout << "ERROR - random_player"; system("pause"); exit(EXIT_FAILURE); } get_piece_move_pos(board, move_pos, choosen_idx_tile); if (!GET_BEATING_POS_FLAG(move_pos)) break; // end turn if no more beating possible in current sequence beating_sequence_in_progress = true; } while (beating_sequence_in_progress); FLIP_TURN_FLAG(board); } //////////////////////////////////////////////////////////////////////////////// - MCTS unsigned int simulate_game_CPU(unsigned int board[4]) { unsigned int move_pos[4]; get_move_possibility(board, move_pos); while (0 != (GET_NUM_OF_MOVES(move_pos))) // end game if noone can move { random_player(board, move_pos); get_move_possibility(board, move_pos); } get_end_state(board); return (board[0] & 2048 ? 2 : 0) | (board[0] & 128 ? 1 : 0); } // traverses the sequence tree like DFS unsigned int count_beating_sequences_for_piece_dir(unsigned int board[4], unsigned int cur_tile_idx, unsigned int dir) { unsigned int piece_pos[4], tmp_board[4]{}, possible_moves = 0, dir_tile_idx; bool was_king_before_move; unsigned int (*get_dir_idx_ptr)(unsigned int&); tmp_board[0] = board[0]; tmp_board[1] = board[1]; tmp_board[2] = board[2]; tmp_board[3] = board[3]; get_piece_move_pos(tmp_board, piece_pos, cur_tile_idx); switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: system("cls"); std::cout << "ERROR - count_beating_sequences_for_piece_dir"; system("pause"); exit(EXIT_FAILURE); } if (GET_BEATING_POS_FLAG(piece_pos) && GET_PIECE_BEATING_FLAG(dir, piece_pos)) { was_king_before_move = IS_KING((GET_VAL_BOARD(cur_tile_idx, tmp_board))); dir_tile_idx = get_dir_idx_ptr(cur_tile_idx); move_piece(tmp_board, cur_tile_idx, get_dir_idx_ptr); cur_tile_idx = get_dir_idx_ptr(dir_tile_idx); ++possible_moves; if (was_king_before_move != (IS_KING((GET_VAL_BOARD(cur_tile_idx, tmp_board))))) // stop counting if promotion to king happens after a move { return possible_moves; } get_piece_move_pos(tmp_board, piece_pos, cur_tile_idx); if (GET_BEATING_POS_FLAG(piece_pos)) // check if more beatings in sequence { possible_moves = 0; for (unsigned int dir = 0; dir < 4; ++dir) possible_moves += count_beating_sequences_for_piece_dir(tmp_board, cur_tile_idx, dir); } } return possible_moves; } void MCTS_CPU_player(unsigned int board[4], unsigned int move_pos[4]) { unsigned int*** first_layer, * sequence_count, * selected_tile, choosable_piece_count = 0; double** success_rate, ** tries; #ifdef MEASURE_TIME std::chrono::steady_clock::time_point start, stop; std::chrono::duration<double, std::milli> elapsed; start = std::chrono::high_resolution_clock::now(); #endif // MEASURE_TIME // allocate memory for first layer get_move_possibility(board, move_pos); choosable_piece_count = GET_NUM_OF_MOVES(move_pos); first_layer = new unsigned int** [choosable_piece_count]; sequence_count = new unsigned int[choosable_piece_count]; selected_tile = new unsigned int[choosable_piece_count]; success_rate = new double* [choosable_piece_count]; tries = new double* [choosable_piece_count]; // count needed size and save sequence_count for (unsigned int i = 0; i < choosable_piece_count; ++i) { unsigned int possible_moves = 0; selected_tile[i] = GET_VAL_MOVE_POS(i, move_pos); if (GET_BEATING_POS_FLAG(move_pos)) for (unsigned int dir = 0; dir < 4; ++dir) possible_moves += count_beating_sequences_for_piece_dir(board, selected_tile[i], dir); else { get_piece_move_pos(board, move_pos, selected_tile[i]); possible_moves = GET_NUM_OF_MOVES(move_pos); get_move_possibility(board, move_pos); } sequence_count[i] = possible_moves; first_layer[i] = new unsigned int* [sequence_count[i]]; success_rate[i] = new double[sequence_count[i]]; tries[i] = new double[sequence_count[i]]; for (unsigned int j = 0; j < sequence_count[i]; ++j) { first_layer[i][j] = new unsigned int[4]{}; success_rate[i][j] = 0; tries[i][j] = 0; } } // build first layer for (unsigned int i = 0; i < choosable_piece_count; ++i) { unsigned int tmp_board[4]; tmp_board[0] = board[0]; tmp_board[1] = board[1]; tmp_board[2] = board[2]; tmp_board[3] = board[3]; get_piece_move_pos(tmp_board, move_pos, selected_tile[i]); if (!GET_BEATING_POS_FLAG(move_pos)) { if (GET_NUM_OF_MOVES(move_pos) > 4) exit(EXIT_FAILURE); for (unsigned int j = 0, dir = 0; dir < 4 && j < sequence_count[i]; ++dir) { tmp_board[0] = board[0]; tmp_board[1] = board[1]; tmp_board[2] = board[2]; tmp_board[3] = board[3]; if (!GET_PIECE_NONBEATING_FLAG(dir, move_pos)) continue; unsigned int (*get_dir_idx_ptr)(unsigned int&); switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: system("cls"); std::cout << "ERROR - MCTS_CPU_player"; system("pause"); exit(EXIT_FAILURE); } move_piece(tmp_board, selected_tile[i], get_dir_idx_ptr); first_layer[i][j][0] = tmp_board[0]; first_layer[i][j][1] = tmp_board[1]; first_layer[i][j][2] = tmp_board[2]; first_layer[i][j][3] = tmp_board[3]; FLIP_TURN_FLAG(first_layer[i][j]); ++j; } } else // this visits nodes in the tree similarly as in count_beating_sequences_for_piece_dir { unsigned int chaser = 0, j = 0, tmp_count = 0, cur_tile_idx = selected_tile[i]; while (j < sequence_count[i]) { for (unsigned int dir = 0; dir < 4; ++dir) { tmp_count = count_beating_sequences_for_piece_dir(tmp_board, cur_tile_idx, dir); if (!tmp_count) continue; chaser += tmp_count; if (chaser <= j) continue; unsigned int (*get_dir_idx_ptr)(unsigned int&); switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: system("cls"); std::cout << "ERROR - MCTS_CPU_player"; system("pause"); exit(EXIT_FAILURE); } move_piece(tmp_board, cur_tile_idx, get_dir_idx_ptr); cur_tile_idx = get_dir_idx_ptr(cur_tile_idx); cur_tile_idx = get_dir_idx_ptr(cur_tile_idx); } chaser = chaser - tmp_count; if (((sequence_count[i] - j) != 1 && chaser <= j) || chaser < j) continue; first_layer[i][j][0] = tmp_board[0]; first_layer[i][j][1] = tmp_board[1]; first_layer[i][j][2] = tmp_board[2]; first_layer[i][j][3] = tmp_board[3]; FLIP_TURN_FLAG(first_layer[i][j]); cur_tile_idx = selected_tile[i]; tmp_board[0] = board[0]; tmp_board[1] = board[1]; tmp_board[2] = board[2]; tmp_board[3] = board[3]; chaser = 0; ++j; } } } #ifdef DEBUG // test if layer build correctly - debug for (unsigned int i = 0; i < choosable_piece_count; ++i) { for (unsigned int j = 0; j < sequence_count[i]; ++j) { system("cls"); draw_board(board); std::cout << std::endl << (GET_TURN_FLAG(board) ? BG_WHITE_FG_BLACK : BG_BLACK_FG_WHITE) << (GET_TURN_FLAG(board) ? "White" : "Black") << "'s turn!" << BG_BLACK_FG_WHITE << std::endl << std::endl; std::cout << std::endl; draw_board(first_layer[i][j]); std::cout << std::endl << (GET_TURN_FLAG(first_layer[i][j]) ? BG_WHITE_FG_BLACK : BG_BLACK_FG_WHITE) << (GET_TURN_FLAG(first_layer[i][j]) ? "White" : "Black") << "'s turn!" << BG_BLACK_FG_WHITE << std::endl << std::endl; system("pause"); } } #endif // DEBUG #ifdef MEASURE_TIME stop = std::chrono::high_resolution_clock::now(); elapsed = (stop - start); std::cout << "CPU - First Layer Building time: " << elapsed.count() << " ms" << std::endl; start = std::chrono::high_resolution_clock::now(); #endif // MEASURE_TIME // run simulations { std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> dist1, dist2; unsigned int piece_choice, sequence_choice, simulation_result, tmp_board[4]; dist1 = std::uniform_int_distribution<>(0, choosable_piece_count - 1); unsigned int possible_sequences = 0; for (unsigned int i = 0; i < choosable_piece_count; ++i) possible_sequences += sequence_count[i]; for (unsigned int i = 0; i < possible_sequences * THREADS_PER_BLOCK * BLOCKS_PER_SEQUENCE_X * BLOCKS_PER_SEQUENCE_Y * BLOCKS_PER_SEQUENCE_Z; ++i) { piece_choice = dist1(gen); dist2 = std::uniform_int_distribution<>(0, sequence_count[piece_choice] - 1); sequence_choice = dist2(gen); tmp_board[0] = first_layer[piece_choice][sequence_choice][0]; tmp_board[1] = first_layer[piece_choice][sequence_choice][1]; tmp_board[2] = first_layer[piece_choice][sequence_choice][2]; tmp_board[3] = first_layer[piece_choice][sequence_choice][3]; simulation_result = simulate_game_CPU(tmp_board); if (!simulation_result) continue; else if (simulation_result == 3) success_rate[piece_choice][sequence_choice] += 0.5; else if ((simulation_result == 2 && GET_TURN_FLAG(board)) || (simulation_result == 1 && GET_TURN_FLAG(board))) success_rate[piece_choice][sequence_choice] += 1.0; tries[piece_choice][sequence_choice] += 1.0; } } #ifdef MEASURE_TIME stop = std::chrono::high_resolution_clock::now(); elapsed = (stop - start); std::cout << std::endl << "CPU - Simulation time: " << std::chrono::duration_cast<std::chrono::seconds>(elapsed).count() << " s" << std::endl; start = std::chrono::high_resolution_clock::now(); #endif // MEASURE_TIME // extract success rate for (unsigned int i = 0; i < choosable_piece_count; ++i) for (unsigned int j = 0; j < sequence_count[i]; ++j) if (tries[i][j] > 0) success_rate[i][j] /= tries[i][j]; // make a move { double max = -1.0; unsigned int idx1, idx2; for (unsigned int i = 0; i < choosable_piece_count; ++i) for (unsigned int j = 0; j < sequence_count[i]; ++j) if (success_rate[i][j] > max) { max = success_rate[i][j]; idx1 = i; idx2 = j; } board[0] = first_layer[idx1][idx2][0]; board[1] = first_layer[idx1][idx2][1]; board[2] = first_layer[idx1][idx2][2]; board[3] = first_layer[idx1][idx2][3]; } #ifdef MEASURE_TIME stop = std::chrono::high_resolution_clock::now(); elapsed = (stop - start); std::cout << std::endl << "CPU - Choosing move time: " << elapsed.count() << " ms" << std::endl << std::endl; system("pause"); #endif // MEASURE_TIME // deallocate memory for first layer for (unsigned int i = 0; i < choosable_piece_count; ++i) { for (unsigned int j = 0; j < sequence_count[i]; ++j) delete[] first_layer[i][j]; delete[] first_layer[i]; delete[] success_rate[i]; delete[] tries[i]; } delete[] first_layer; delete[] success_rate; delete[] tries; delete[] sequence_count; } void MCTS_GPU_player(unsigned int board[4], unsigned int move_pos[4]) { thrust::host_vector<unsigned int> h_first_layer; thrust::device_vector<unsigned int> d_first_layer; thrust::device_vector<float> d_results; unsigned int* sequence_count, * selected_tile, choosable_piece_count = 0, possible_sequences = 0; float* success_rates; #ifdef MEASURE_TIME float elapsedGPU; cudaEvent_t startGPU, stopGPU; cudaEventCreate(&startGPU); cudaEventCreate(&stopGPU); std::chrono::steady_clock::time_point startCPU, stopCPU; std::chrono::duration<double, std::milli> elapsedCPU; startCPU = std::chrono::high_resolution_clock::now(); #endif // MEASURE_TIME // allocate memory for computing first layer get_move_possibility(board, move_pos); choosable_piece_count = GET_NUM_OF_MOVES(move_pos); sequence_count = new unsigned int[choosable_piece_count]; selected_tile = new unsigned int[choosable_piece_count]; for (unsigned int i = 0; i < choosable_piece_count; ++i) { unsigned int possible_moves = 0; selected_tile[i] = GET_VAL_MOVE_POS(i, move_pos); if (GET_BEATING_POS_FLAG(move_pos)) for (unsigned int dir = 0; dir < 4; ++dir) possible_moves += count_beating_sequences_for_piece_dir(board, selected_tile[i], dir); else { get_piece_move_pos(board, move_pos, selected_tile[i]); possible_moves = GET_NUM_OF_MOVES(move_pos); get_move_possibility(board, move_pos); } sequence_count[i] = possible_moves; possible_sequences += possible_moves; } // allocate memory for host_vector h_first_layer = thrust::host_vector<unsigned int>(static_cast<size_t>(possible_sequences) * 4); d_results = thrust::device_vector<float>(static_cast<size_t>(possible_sequences) * THREADS_PER_BLOCK * BLOCKS_PER_SEQUENCE_X * BLOCKS_PER_SEQUENCE_Y * BLOCKS_PER_SEQUENCE_Z); success_rates = new float[possible_sequences * BLOCKS_PER_SEQUENCE_X * BLOCKS_PER_SEQUENCE_Y * BLOCKS_PER_SEQUENCE_Z]; // build first layer for (unsigned int host_idx = 0, i = 0; i < choosable_piece_count; ++i) { unsigned int tmp_board[4]; tmp_board[0] = board[0]; tmp_board[1] = board[1]; tmp_board[2] = board[2]; tmp_board[3] = board[3]; get_piece_move_pos(tmp_board, move_pos, selected_tile[i]); if (!GET_BEATING_POS_FLAG(move_pos)) { if (GET_NUM_OF_MOVES(move_pos) > 4) exit(EXIT_FAILURE); for (unsigned int j = 0, dir = 0; dir < 4 && j < sequence_count[i]; ++dir) { tmp_board[0] = board[0]; tmp_board[1] = board[1]; tmp_board[2] = board[2]; tmp_board[3] = board[3]; if (!GET_PIECE_NONBEATING_FLAG(dir, move_pos)) continue; unsigned int (*get_dir_idx_ptr)(unsigned int&); switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: system("cls"); std::cout << "ERROR - MCTS_GPU_player"; system("pause"); exit(EXIT_FAILURE); } move_piece(tmp_board, selected_tile[i], get_dir_idx_ptr); FLIP_TURN_FLAG(tmp_board); h_first_layer[static_cast<size_t>(host_idx)] = tmp_board[0]; h_first_layer[static_cast<size_t>(host_idx) + 1] = tmp_board[1]; h_first_layer[static_cast<size_t>(host_idx) + 2] = tmp_board[2]; h_first_layer[static_cast<size_t>(host_idx) + 3] = tmp_board[3]; host_idx += 4; ++j; } } else // this visits nodes in the tree similarly as in count_beating_sequences_for_piece_dir { unsigned int chaser = 0, j = 0, tmp_count = 0, cur_tile_idx = selected_tile[i]; while (j < sequence_count[i]) { for (unsigned int dir = 0; dir < 4; ++dir) { tmp_count = count_beating_sequences_for_piece_dir(tmp_board, cur_tile_idx, dir); if (!tmp_count) continue; chaser += tmp_count; if (chaser <= j) continue; unsigned int (*get_dir_idx_ptr)(unsigned int&); switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: system("cls"); std::cout << "ERROR - MCTS_GPU_player"; system("pause"); exit(EXIT_FAILURE); } move_piece(tmp_board, cur_tile_idx, get_dir_idx_ptr); cur_tile_idx = get_dir_idx_ptr(cur_tile_idx); cur_tile_idx = get_dir_idx_ptr(cur_tile_idx); } chaser = chaser - tmp_count; if (((sequence_count[i] - j) != 1 && chaser <= j) || chaser < j) continue; FLIP_TURN_FLAG(tmp_board); h_first_layer[static_cast<size_t>(host_idx)] = tmp_board[0]; h_first_layer[static_cast<size_t>(host_idx) + 1] = tmp_board[1]; h_first_layer[static_cast<size_t>(host_idx) + 2] = tmp_board[2]; h_first_layer[static_cast<size_t>(host_idx) + 3] = tmp_board[3]; cur_tile_idx = selected_tile[i]; tmp_board[0] = board[0]; tmp_board[1] = board[1]; tmp_board[2] = board[2]; tmp_board[3] = board[3]; chaser = 0; host_idx += 4; ++j; } } } #ifdef MEASURE_TIME stopCPU = std::chrono::high_resolution_clock::now(); elapsedCPU = (stopCPU - startCPU); std::cout << "CPU - First Layer Building time: " << elapsedCPU.count() << " ms" << std::endl; #endif // MEASURE_TIME // deallocate memory used for computing first layer delete[] selected_tile; delete[] sequence_count; #ifdef DEBUG // test if layer build correctly - debug for (unsigned int i = 0; i < possible_sequences; ++i) { unsigned int tmp_board[4]; tmp_board[0] = h_first_layer[4 * i]; tmp_board[1] = h_first_layer[4 * i + 1]; tmp_board[2] = h_first_layer[4 * i + 2]; tmp_board[3] = h_first_layer[4 * i + 3]; system("cls"); draw_board(board); std::cout << std::endl << (GET_TURN_FLAG(board) ? BG_WHITE_FG_BLACK : BG_BLACK_FG_WHITE) << (GET_TURN_FLAG(board) ? "White" : "Black") << "'s turn!" << BG_BLACK_FG_WHITE << std::endl << std::endl; std::cout << std::endl; draw_board(tmp_board); std::cout << std::endl << (GET_TURN_FLAG(tmp_board) ? BG_WHITE_FG_BLACK : BG_BLACK_FG_WHITE) << (GET_TURN_FLAG(tmp_board) ? "White" : "Black") << "'s turn!" << BG_BLACK_FG_WHITE << std::endl << std::endl; system("pause"); } #endif // DEBUG #ifdef MEASURE_TIME cudaEventRecord(startGPU); #endif // MEASURE_TIME // move data to GPU d_first_layer = h_first_layer; dim3 dimBlock(THREADS_PER_BLOCK, 1, 1); dim3 dimGrid(possible_sequences * BLOCKS_PER_SEQUENCE_X, BLOCKS_PER_SEQUENCE_Y, BLOCKS_PER_SEQUENCE_Z); thrust::device_vector<curandState> states(64); // init states for curand setup_kernel<<<1, 64>>>(thrust::raw_pointer_cast(states.begin().base())); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("%s\n", cudaGetErrorString(err)); cudaDeviceSynchronize(); // run simulations MCTS_kernel<<<dimGrid, dimBlock>>>(thrust::raw_pointer_cast(d_first_layer.begin().base()), thrust::raw_pointer_cast(states.begin().base()), thrust::raw_pointer_cast(d_results.begin().base()), possible_sequences); err = cudaGetLastError(); if (err != cudaSuccess) printf("%s\n", cudaGetErrorString(err)); cudaDeviceSynchronize(); #ifdef MEASURE_TIME cudaEventRecord(stopGPU); cudaEventSynchronize(stopGPU); cudaEventElapsedTime(&elapsedGPU, startGPU, stopGPU); std::cout << std::endl << "GPU - Simulation time: " << elapsedGPU << " ms" << std::endl; cudaEventRecord(startGPU); #endif // MEASURE_TIME for (unsigned int i = 0; i < possible_sequences * BLOCKS_PER_SEQUENCE_X * BLOCKS_PER_SEQUENCE_Y * BLOCKS_PER_SEQUENCE_Z; ++i) success_rates[i] = thrust::reduce(d_results.begin() + (THREADS_PER_BLOCK*i), d_results.begin() + (THREADS_PER_BLOCK * (i+1))) / THREADS_PER_BLOCK.0f; #ifdef MEASURE_TIME cudaEventRecord(stopGPU); cudaEventSynchronize(stopGPU); cudaEventElapsedTime(&elapsedGPU, startGPU, stopGPU); std::cout << std::endl << "GPU - Results Reduction time: " << elapsedGPU << " ms" << std::endl; cudaEventDestroy(startGPU); cudaEventDestroy(stopGPU); startCPU = std::chrono::high_resolution_clock::now(); #endif // MEASURE_TIME // sum success_rates for each sequence for (unsigned int i = possible_sequences; i < possible_sequences * BLOCKS_PER_SEQUENCE_X * BLOCKS_PER_SEQUENCE_Y * BLOCKS_PER_SEQUENCE_Z; ++i) success_rates[i % possible_sequences] += success_rates[i]; // make a move { double max = -1.0; unsigned int idx; for (unsigned int i = 0; i < possible_sequences; ++i) if (success_rates[i] > max) { max = success_rates[i]; idx = i % possible_sequences; } board[0] = h_first_layer[4 * idx]; board[1] = h_first_layer[4 * idx + 1]; board[2] = h_first_layer[4 * idx + 2]; board[3] = h_first_layer[4 * idx + 3]; } #ifdef MEASURE_TIME stopCPU = std::chrono::high_resolution_clock::now(); elapsedCPU = (stopCPU - startCPU); std::cout << std::endl << "CPU - Choosing move time: " << elapsedCPU.count() << " ms" << std::endl << std::endl; system("pause"); #endif // MEASURE_TIME delete[] success_rates; } __global__ void MCTS_kernel(const unsigned int* d_first_layer, curandState* states, float* d_results, const unsigned int possible_sequences) { const unsigned int tid = threadIdx.x; const unsigned int bid = blockIdx.x + blockDim.y * (blockIdx.y + blockIdx.z * blockDim.z); unsigned int tmp_board[4]; tmp_board[0] = d_first_layer[4 * (bid % possible_sequences)]; tmp_board[1] = d_first_layer[4 * (bid % possible_sequences) + 1]; tmp_board[2] = d_first_layer[4 * (bid % possible_sequences) + 2]; tmp_board[3] = d_first_layer[4 * (bid % possible_sequences) + 3]; unsigned int simulation_result = simulate_game_GPU(tmp_board, states, possible_sequences); if (!simulation_result) d_results[tid + THREADS_PER_BLOCK * bid] = 0.0f; else if (simulation_result == 3) d_results[tid + THREADS_PER_BLOCK * bid] = 0.5f; else if ((simulation_result == 2 && GET_TURN_FLAG(tmp_board)) || (simulation_result == 1 && GET_TURN_FLAG(tmp_board))) d_results[tid + THREADS_PER_BLOCK * bid] = 1.0f; } __global__ void setup_kernel(curandState* states) { int id = threadIdx.x; curand_init(1234, id, 0, &states[id]); } __device__ float simulate_game_GPU(unsigned int board[4], curandState* states, const unsigned int possible_sequences) { unsigned int id = (blockIdx.x + blockDim.y * (blockIdx.y + blockIdx.z * blockDim.z)) % possible_sequences; unsigned int move_pos[4]; get_move_possibility(board, move_pos); while (0 != (GET_NUM_OF_MOVES(move_pos))) // end game if noone can move { random_player_GPU(board, move_pos, &states[id]); get_move_possibility(board, move_pos); } get_end_state(board); return (board[0] & 2048 ? 2 : 0) | (board[0] & 128 ? 1 : 0); } __device__ void random_player_GPU(unsigned int board[4], unsigned int move_pos[4], curandState* state) { unsigned int choosen_idx_tile, choosen_idx_dir, dir = 0, dir_idx_upper_bound, dir_idx_counter = 0; bool beating_sequence_in_progress = false, was_king_before_move; unsigned int (*get_dir_idx_ptr)(unsigned int&); // choose tile with piece to be moved get_move_possibility(board, move_pos); choosen_idx_tile = curand(state) % (GET_NUM_OF_MOVES(move_pos)); choosen_idx_tile = GET_VAL_MOVE_POS(choosen_idx_tile, move_pos); do { // choose tile in the dir to move(in distance 1 (diagonally)from idx_tile) // the rng dir choice is done on the interval [0;n-1] where n is the number of dirs with valid move choices get_piece_move_pos(board, move_pos, choosen_idx_tile); dir_idx_upper_bound = GET_NUM_OF_MOVES(move_pos); choosen_idx_dir = curand(state) % dir_idx_upper_bound; // dir_idx_counter is only incremented if a possible move in 'dir' is encountered but is not the chosen one for (dir = 0, dir_idx_counter = 0; dir_idx_counter <= dir_idx_upper_bound && dir < 4; ++dir) { switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: return; } if (dir_idx_counter == choosen_idx_dir); // proceed to make a move after dir is a correct idx else if ((GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_BEATING_FLAG(dir, move_pos)) || (!GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_NONBEATING_FLAG(dir, move_pos))) { ++dir_idx_counter; continue; } else continue; if (GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_BEATING_FLAG(dir, move_pos)) // move is beating { was_king_before_move = IS_KING((GET_VAL_BOARD(choosen_idx_tile, board))); choosen_idx_dir = get_dir_idx_ptr(choosen_idx_tile); move_piece(board, choosen_idx_tile, get_dir_idx_ptr); choosen_idx_tile = get_dir_idx_ptr(choosen_idx_dir); if (was_king_before_move != (IS_KING((GET_VAL_BOARD(choosen_idx_tile, board))))) // stop beating sequence and end turn if promotion to king happens after a move { FLIP_TURN_FLAG(board); return; } break; } else if (!GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_NONBEATING_FLAG(dir, move_pos)) // move is nonbeating { move_piece(board, choosen_idx_tile, get_dir_idx_ptr); FLIP_TURN_FLAG(board); return; } } if (dir == 4) return; get_piece_move_pos(board, move_pos, choosen_idx_tile); if (!GET_BEATING_POS_FLAG(move_pos)) break; // end turn if no more beating possible in current sequence beating_sequence_in_progress = true; } while (beating_sequence_in_progress); FLIP_TURN_FLAG(board); } //////////////////////////////////////////////////////////////////////////////// - user interaction void disp_moveable_pieces(unsigned int board[4], unsigned int move_pos[4]) { char cords[2]{ '-' }; std::cout << "Possible moves for " << (GET_TURN_FLAG(board) ? "white" : "black") << " - " << (GET_NUM_OF_MOVES(move_pos)) << std::endl; std::cout << "Tiles with moveable pieces: "; get_move_possibility(board, move_pos); for (unsigned int i = 0; i < GET_NUM_OF_MOVES(move_pos); ++i) { translate_idx_to_cords((GET_VAL_MOVE_POS(i, move_pos)), cords); std::cout << cords[0] << cords[1] << ' '; } std::cout << std::endl; } void disp_possible_dirs(unsigned int board[4], unsigned int move_pos[4], unsigned int& idx) { char cords[2]{ '-' }; translate_idx_to_cords(idx, cords); get_piece_move_pos(board, move_pos, idx); if (GET_NUM_OF_MOVES(move_pos)) { std::cout << "Moves possible for piece on " << cords[0] << cords[1] << " - " << (GET_NUM_OF_MOVES(move_pos)) << std::endl; if (GET_BEATING_POS_FLAG(move_pos)) std::cout << "BEATING POSSIBLE!" << std::endl; std::cout << "List of tiles to choose from: "; unsigned int (*get_dir_idx_ptr)(unsigned int&); for (unsigned int dir = 0; dir < 4; ++dir) { switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: system("cls"); std::cout << "ERROR - disp_possible_dirs"; system("pause"); exit(EXIT_FAILURE); } translate_idx_to_cords(get_dir_idx_ptr(idx), cords); if (GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_BEATING_FLAG(dir, move_pos)) std::cout << cords[0] << cords[1] << ' '; else if (!GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_NONBEATING_FLAG(dir, move_pos)) std::cout << cords[0] << cords[1] << ' '; } std::cout << std::endl; } else std::cout << "Movement not possible for piece on " << cords[0] << cords[1] << std::endl; } void get_cords_from_console(char cords[2]) { while (true) { std::string input = ""; std::cout << "Please provide coordinates: "; std::getline(std::cin, input); if (input.size() != 2) { std::cout << "Incorrect input length!" << std::endl << std::endl; continue; } cords[0] = toupper(input[0]); cords[1] = toupper(input[1]); if ((cords[0] == 'A' || cords[0] == 'C' || cords[0] == 'E' || cords[0] == 'G') && (cords[1] == '2' || cords[1] == '4' || cords[1] == '6' || cords[1] == '8')) break; else if ((cords[0] == 'B' || cords[0] == 'D' || cords[0] == 'F' || cords[0] == 'H') && (cords[1] == '1' || cords[1] == '3' || cords[1] == '5' || cords[1] == '7')) break; std::cout << "Incorrect coordinates given!" << std::endl << std::endl; } } unsigned int translate_cords_to_idx(const char cords[2]) { if (cords[1] < '0' || cords[1] > '8') return 32; // out of bounds unsigned int cord1 = cords[1] - '1'; // not '0' because we count cords from 1 switch (cords[0]) { case 'A': if (~cord1 & 1) return 32; return cord1 << 2; case 'B': if (cord1 & 1) return 32; return cord1 << 2; case 'C': if (~cord1 & 1) return 32; return (cord1 << 2) + 1; case 'D': if (cord1 & 1) return 32; return (cord1 << 2) + 1; case 'E': if (~cord1 & 1) return 32; return (cord1 << 2) + 2; case 'F': if (cord1 & 1) return 32; return (cord1 << 2) + 2; case 'G': if (~cord1 & 1) return 32; return (cord1 << 2) + 3; case 'H': if (cord1 & 1) return 32; return (cord1 << 2) + 3; default: return 32; } } void translate_idx_to_cords(unsigned int idx, char cords[2]) { if (idx > 31) { cords[0] = '-'; cords[1] = '-'; return; } else if (idx < 4) cords[1] = '1'; else if (idx >= 4 && idx < 8) cords[1] = '2'; else if (idx >= 8 && idx < 12) cords[1] = '3'; else if (idx >= 12 && idx < 16) cords[1] = '4'; else if (idx >= 16 && idx < 20) cords[1] = '5'; else if (idx >= 20 && idx < 24) cords[1] = '6'; else if (idx >= 24 && idx < 28) cords[1] = '7'; else if (idx >= 28 && idx < 32) cords[1] = '8'; if ((idx & 7) == 0) cords[0] = 'B'; else if ((idx & 7) == 1) cords[0] = 'D'; else if ((idx & 7) == 2) cords[0] = 'F'; else if ((idx & 7) == 3) cords[0] = 'H'; else if ((idx & 7) == 4) cords[0] = 'A'; else if ((idx & 7) == 5) cords[0] = 'C'; else if ((idx & 7) == 6) cords[0] = 'E'; else if ((idx & 7) == 7) cords[0] = 'G'; } void disp_end_state(unsigned int* board) { system("cls"); draw_board(board); get_end_state(board); if (board[0] & 2048 && board[0] & 128) std::cout << std::endl << "Game ended in a draw!" << std::endl << std::endl; else if (board[0] & 2048) std::cout << std::endl << BG_WHITE_FG_BLACK << "White won!" << BG_BLACK_FG_WHITE << std::endl << std::endl; else if (board[0] & 128) std::cout << std::endl << "Black won!" << std::endl << std::endl; else if (!board[0]) std::cout << std::endl << "Error occured!" << std::endl << std::endl; } //////////////////////////////////////////////////////////////////////////////// - game conclusion // saves end state in board[0], none - error, 1xxx xxxxx - black win, 1xxx xxxx xxxx - white win, both win - draw // after extracting: 0 - error, 1 - black win, 2 - white win, 3 - draw // to extract - (board[0] & 2048 ? 2 : 0) | (board[0] & 128 ? 1 : 0) __host__ __device__ void get_end_state(unsigned int board[4]) { unsigned int move_pos[4]; get_move_possibility(board, move_pos); for (unsigned int i = 0; i < 32; ++i) { move_pos[0] = GET_VAL_BOARD(i, board); if (IS_PIECE(move_pos[0])) { if (IS_WHITE(move_pos[0])) board[0] |= 2048; if (IS_BLACK(move_pos[0])) board[0] |= 128; } } } //////////////////////////////////////////////////////////////////////////////// - main int main(int argc, char** argv) { unsigned int board[4]; unsigned short menu_choice = 0; bool player_chosen = false; void (*white_player)(unsigned int*, unsigned int*); void (*black_player)(unsigned int*, unsigned int*); std::cout << BG_WHITE_FG_BLACK << BG_BLACK_FG_WHITE; system("cls"); //testing_function(); while (menu_choice != 2) { player_chosen = false; std::cout << BG_BBLUE_FG_BLACK << "!!! Monte-Carlo Tree Search Checkers !!!" << BG_BLACK_FG_WHITE << std::endl << std::endl; std::cout << "1. Start Game - Black Always Begins" << std::endl; std::cout << "2. Exit" << std::endl; std::cout << "Choice: "; std::cin >> menu_choice; switch (menu_choice) { case 1: while (!player_chosen) { system("cls"); std::cout << "1. Human Player" << std::endl; std::cout << "2. MCTS_CPU Player" << std::endl; std::cout << "3. MCTS_GPU Player" << std::endl; std::cout << BG_WHITE_FG_BLACK << "White" << BG_BLACK_FG_WHITE << " Player Choice: "; std::cin >> menu_choice; std::cout << std::endl; switch (menu_choice) { case 1: white_player = &human_player; player_chosen = true; break; case 2: white_player = &MCTS_CPU_player; player_chosen = true; break; case 3: white_player = &MCTS_GPU_player; player_chosen = true; break; default: system("cls"); std::cout << "Please provide a valid choice!" << std::endl << std::endl; } } player_chosen = false; while (!player_chosen) { system("cls"); std::cout << "1. Human Player" << std::endl; std::cout << "2. MCTS_CPU Player" << std::endl; std::cout << "3. MCTS_GPU Player" << std::endl; std::cout << "Black Player Choice: "; std::cin >> menu_choice; std::cout << std::endl; switch (menu_choice) { case 1: black_player = &human_player; player_chosen = true; break; case 2: black_player = &MCTS_CPU_player; player_chosen = true; break; case 3: black_player = &MCTS_GPU_player; player_chosen = true; break; default: system("cls"); std::cout << "Please provide a valid choice!" << std::endl << std::endl; } } menu_choice = 1; std::cin.ignore(); init_board(board); game_loop(board, white_player, black_player); disp_end_state(board); system("pause"); system("cls"); break; case 2: break; default: system("cls"); std::cout << "Please provide a valid choice!" << std::endl << std::endl; break; } } exit(EXIT_SUCCESS); } //////////////////////////////////////////////////////////////////////////////// - for debugging void testing_function() { unsigned int board[4]; unsigned int move_possibility[3]{}; //init_board(board); //draw_board(board); //test_get_move_possibility(board, move_possibility); //FLIP_TURN_FLAG(board); //test_get_move_possibility(board, move_possibility); //std::cout << std::endl; //std::cout << std::endl; ////test_get_idx_funs(board); ////std::cout << std::endl; //test_translate_cords_to_idx(); //test_translate_idx_to_cords(); //std::cout << std::endl; ////test_get_move_possibility_init_loop(board); ////std::cout << std::endl; ////test_get_piece_move_pos(board, move_possibility, 9, 6); init_board(board); board[0] = 1074020352; board[1] = 1178861808; board[2] = 102; board[3] = 419424; board[0] = 6569984; board[1] = 0; board[2] = 0; board[3] = 0; FLIP_TURN_FLAG(board); system("cls"); draw_board(board); std::cout << std::endl << (GET_TURN_FLAG(board) ? BG_WHITE_FG_BLACK : BG_BLACK_FG_WHITE) << (GET_TURN_FLAG(board) ? "White" : "Black") << "'s turn!" << BG_BLACK_FG_WHITE << std::endl << std::endl; get_move_possibility(board, move_possibility); disp_moveable_pieces(board, move_possibility); unsigned int idx = 5; disp_possible_dirs(board, move_possibility, idx); random_player(board, move_possibility); //move_piece(board, idx, &get_left_upper_idx); //move_piece(board, idx, &get_right_upper_idx); draw_board(board); //MCTS_GPU_player(board, move_possibility); //draw_board(board); //std::cout << std::endl << (GET_TURN_FLAG(board) ? BG_WHITE_FG_BLACK : BG_BLACK_FG_WHITE) << (GET_TURN_FLAG(board) ? "White" : "Black") << "'s turn!" << BG_BLACK_FG_WHITE << std::endl << std::endl; //game_loop(board, MCTS_GPU_player, MCTS_GPU_player); //disp_end_state(board); system("pause"); //unsigned int game_count = 1000000; //std::chrono::steady_clock::time_point start, finish; //std::chrono::duration<double> elapsed; // //start = std::chrono::high_resolution_clock::now(); //for (unsigned int i = 0; i < game_count; ++i) //{ // init_board(board); // game_loop(board, &random_player, &random_player); // get_end_state(board); // //disp_end_state(board); //} //finish = std::chrono::high_resolution_clock::now(); //elapsed = (finish - start); //std::cout << "Games played: " << game_count << std::endl; //std::cout << "Elapsed time: " << elapsed.count() << std::endl; //std::cout << "Average time: " << elapsed.count() / game_count << std::endl; exit(EXIT_SUCCESS); } void test_get_idx_funs(unsigned int board[4]) { //test top unsigned int tmp = 0; std::cout << (32 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (32 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (4 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (5 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; tmp = 1; std::cout << std::endl << (32 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (32 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (5 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (6 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; tmp = 3; std::cout << std::endl << (32 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (32 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (7 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (32 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; // test even tmp = 4; std::cout << std::endl << (32 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (0 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (32 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (8 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; tmp = 5; std::cout << std::endl << (0 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (1 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (8 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (9 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; tmp = 7; std::cout << std::endl << (2 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (3 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (10 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (11 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; //test odd tmp = 8; std::cout << std::endl << (4 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (5 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (12 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (13 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; tmp = 9; std::cout << std::endl << (5 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (6 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (13 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (14 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; tmp = 11; std::cout << std::endl << (7 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (32 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (15 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (32 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; //test bottom tmp = 28; std::cout << std::endl << (32 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (24 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (32 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (32 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; tmp = 29; std::cout << std::endl << (24 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (25 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (32 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (32 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; tmp = 31; std::cout << std::endl << (26 == get_left_upper_idx(tmp)) << ": " << "Left upper to " << tmp << ": " << get_left_upper_idx(tmp); std::cout << std::endl << (27 == get_right_upper_idx(tmp)) << ": " << "Right upper to " << tmp << ": " << get_right_upper_idx(tmp); std::cout << std::endl << (32 == get_left_lower_idx(tmp)) << ": " << "Left lower to " << tmp << ": " << get_left_lower_idx(tmp); std::cout << std::endl << (32 == get_right_lower_idx(tmp)) << ": " << "Right lower to " << tmp << ": " << get_right_lower_idx(tmp); std::cout << std::endl; } void test_get_move_possibility(unsigned int board[4], unsigned int move_pos[4]) { get_move_possibility(board, move_pos); std::cout << std::endl << "Possible moves " << (GET_TURN_FLAG(board) ? "for white: " : "for black: ") << (GET_NUM_OF_MOVES(move_pos)) << std::endl; std::cout << "Indices of pawns possible to move: "; for (unsigned int i = 0; i < GET_NUM_OF_MOVES(move_pos); ++i) { std::cout << (GET_VAL_MOVE_POS(i, move_pos)) << ' '; } std::cout << std::endl; } void test_get_move_possibility_board_init(unsigned int board[4], unsigned int test_choice) { init_board(board); switch (test_choice) { case 0: // black bottom - outdated board[0] = 1717986918; //1st 2nd rows board[1] = 26214; //3rd 4th rows board[2] = 1145307136; //5th 6th rows board[3] = 1145324612; //7th 8th rows break; case 1: // test 1 - white forward beating // expected - white = 2 moves, idx : 22 23 // expected - black = 4 moves, idx : 8 9 10 11 board[2] = 1717986304; //5th 6th rows break; case 2: // test 2 - white no backward beating, black forward beating // expected - white = 2 moves, idx: 19 23 // expected - black = 2 moves, idx: 5 18 board[1] = 1078198368; board[2] = 1717986304; board[3] = 1717986822; break; case 3: // test 3 - black no backward beating // expected - white = 5 moves, idx: 9 20 21 22 23 // expected - black = 1 move, idx: 5 board[0] = 1078215748; board[1] = 1078198368; break; case 4: // test 4 // expected - white = 5 moves, idx: 9 20 21 22 23 // expected - black = 8 moves, idx: 0 1 4 6 7 12 13 15 board[0] = 1141130308; board[1] = 1078198368; break; case 5: // test 5 - black King backward beating // expected - white = 5 moves, idx: 9 20 21 22 23 // expected - black = 1 move, idx: 5 13 board[0] = 1078215748; board[1] = 1079246944; break; case 6: // test 6 - white King backward beating // expected - white = 1 move, idx: 9 // expected - black = 8 moves, idx: 0 1 4 6 7 12 13 15 board[0] = 1141130308; board[1] = 1078198384; break; case 7: // test 7 - promotion switch turn board[0] = 1073759296; board[1] = 17412; board[2] = 1617168128; board[3] = 1711695462; default: break; } } void test_get_move_possibility_init_loop(unsigned int board[4], int test_choice_lower_bound, int test_choice_upper_bound) { for (int i = test_choice_lower_bound; i < test_choice_upper_bound; ++i) { system("pause"); test_get_move_possibility_board_init(board, i); system("cls"); draw_board(board); std::cout << "Running test " << i << std::endl; unsigned int move_possibility[3]{}; test_get_move_possibility(board, move_possibility); FLIP_TURN_FLAG(board); test_get_move_possibility(board, move_possibility); FLIP_TURN_FLAG(board); std::cout << std::endl; std::cout << std::endl; test_translate_cords_to_idx(); std::cout << std::endl; } } void test_get_piece_move_pos(unsigned int board[4], unsigned int move_pos[4], unsigned int idx) { char cords[2]; translate_idx_to_cords(idx, cords); system("cls"); draw_board(board); test_translate_cords_to_idx(); test_translate_idx_to_cords(); std::cout << std::endl; test_get_move_possibility(board, move_pos); FLIP_TURN_FLAG(board); test_get_move_possibility(board, move_pos); FLIP_TURN_FLAG(board); std::cout << std::endl; get_piece_move_pos(board, move_pos, idx); if (GET_NUM_OF_MOVES(move_pos)) { std::cout << "Moves possible for piece on " << cords[0] << cords[1] << " - " << (GET_NUM_OF_MOVES(move_pos)) << std::endl; if (GET_BEATING_POS_FLAG(move_pos)) std::cout << "BEATING POSSIBLE!" << std::endl; std::cout << "List of tiles to choose from: "; unsigned int (*get_dir_idx_ptr)(unsigned int&); for (unsigned int dir = 0; dir < 4; ++dir) { switch (dir) { case 0: get_dir_idx_ptr = &get_left_upper_idx; break; case 1: get_dir_idx_ptr = &get_right_upper_idx; break; case 2: get_dir_idx_ptr = &get_left_lower_idx; break; case 3: get_dir_idx_ptr = &get_right_lower_idx; break; default: break; } translate_idx_to_cords(get_dir_idx_ptr(idx), cords); if (GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_BEATING_FLAG(dir, move_pos)) std::cout << cords[0] << cords[1] << ' '; else if (!GET_BEATING_POS_FLAG(move_pos) && GET_PIECE_NONBEATING_FLAG(dir, move_pos)) std::cout << cords[0] << cords[1] << ' '; } std::cout << std::endl; } else std::cout << "Movement not possible for piece on " << cords[0] << cords[1] << std::endl; } void test_translate_cords_to_idx() { char cords[2] = { 'A', '1' }; for (char c2 = '1'; c2 < '9'; ++c2) { cords[1] = c2; for (char c1 = 'A'; c1 < 'I'; ++c1) { cords[0] = c1; unsigned int idx = translate_cords_to_idx(cords); std::cout << cords[0] << cords[1] << ": " << (32 == idx ? "--" : std::to_string(idx)) << '\t'; } std::cout << std::endl; } std::cout << std::endl; } void test_translate_idx_to_cords() { char cords[2] = { '-', '-' }; std::cout << '\t'; for (unsigned int idx = 0; idx < 32; ++idx) { translate_idx_to_cords(idx, cords); std::cout << (idx > 9 ? '\0' : ' ') << idx << ": " << cords[0] << cords[1] << "\t\t"; if ((idx & 3) == 3) std::cout << std::endl; if ((idx & 7) == 7) std::cout << '\t'; } std::cout << std::endl; } //void bench(unsigned int board[4]) //{ // std::chrono::steady_clock::time_point start, finish, start2, finish2; // std::chrono::duration<double> elapsed, elapsed2; // // start = std::chrono::high_resolution_clock::now(); // for (unsigned int i = 0; i < 1000000; ++i) // { // for (unsigned int idx = 0; idx < 32; ++idx) // { // // old - GET_VAL_BOARD_S(idx, board); // int tmp = GET_VAL_BOARD_S(idx, board) & 3; // //int tmp = GET_VAL_BOARD_S(idx, board) << 2; // //int tmp = GET_VAL_BOARD_S(idx, board) >> 2; // //int tmp = GET_VAL_BOARD_S(idx, board); // //int tmp = 16 | 123; // } // } // finish = std::chrono::high_resolution_clock::now(); // elapsed = (finish - start) / 1000000; // // start2 = std::chrono::high_resolution_clock::now(); // for (unsigned int i = 0; i < 1000000; ++i) // { // for (unsigned int idx = 0; idx < 32; ++idx) // { // // old - GET_VAL_BOARD_S2(idx, board); // int tmp = GET_VAL_BOARD_S(idx, board) % 4; // //int tmp = GET_VAL_BOARD_S(idx, board) * 4; // //int tmp = GET_VAL_BOARD_S(idx, board) / 4; // //int tmp = GET_VAL_BOARD_S(idx, board) / 4; // //int tmp = 16 ^ 123; // } // } // finish2 = std::chrono::high_resolution_clock::now(); // elapsed2 = (finish2 - start2) / 1000000; // // //old - std::cout << "Average time for GET_VAL_BOARD_S: " << elapsed.count() << std::endl; // //old - std::cout << "Average time for GET_VAL_BOARD_S2: " << elapsed2.count() << std::endl << std::endl; // std::cout << "Average time for & 3:\t" << elapsed.count() << std::endl; // std::cout << "Average time for % 4:\t" << elapsed2.count() << std::endl << std::endl; // //std::cout << "Average time for << 2:\t" << elapsed.count() << std::endl; // //std::cout << "Average time for * 4:\t" << elapsed2.count() << std::endl << std::endl; // //std::cout << "Average time for >> 2:\t" << elapsed.count() << std::endl; // //std::cout << "Average time for / 4:\t" << elapsed2.count() << std::endl << std::endl; // //std::cout << "Average time for get:\t" << elapsed.count() << std::endl; // //std::cout << "Average time for get/4:\t" << elapsed2.count() << std::endl << std::endl; // //std::cout << "Average time for | :\t" << elapsed.count() << std::endl; // //std::cout << "Average time for ^ :\t" << elapsed2.count() << std::endl << std::endl; //}
81242019bab7b7d8f2d7d0387657691c8ddee8c1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ unsigned int getGid3d3d(){ int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.y * blockDim.x) + (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x; return threadId; } __global__ void energy_sum(double2 *in1, double2 *in2, double *out){ int gid = getGid3d3d(); out[gid] = in1[gid].x + in2[gid].x; }
81242019bab7b7d8f2d7d0387657691c8ddee8c1.cu
#include "includes.h" __device__ unsigned int getGid3d3d(){ int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.y * blockDim.x) + (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x; return threadId; } __global__ void energy_sum(double2 *in1, double2 *in2, double *out){ int gid = getGid3d3d(); out[gid] = in1[gid].x + in2[gid].x; }
aa5cf430a34782f2d4839986cdd2c3d610cfb2a2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void BinaryErosion (unsigned int *dst, int imageW, int imageH, int mask_w, int mask_h) { const int ix = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; const int iy = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if(ix < imageW && iy < imageH){ int match = 0; for (int m = ix - mask_w ; m < ix + mask_w && !match; m++){ for (int n = iy - mask_h ; n < iy + mask_h && !match; n++){ float4 fresult = tex2D(texUCHAR, m, n); if (fresult.x == 1.f && fresult.y == 1.f && fresult.z == 1.f ) match = 1; } } if(!match) dst[imageW * iy + ix] = make_color(0.f, 0.f, 0.f , 1.f); else dst[imageW * iy + ix] = make_color(1.f, 1.f, 1.f , 1.f); } } extern "C" float binaryErosionWrapper (unsigned int *dst, int imageW, int imageH, int threshold, int iteration, float brightness, float contrast, int mask_w, int mask_h, int adjust) { //for more effective kernel execution dim3 threads(BLOCKDIM_X, BLOCKDIM_Y); dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y)); unsigned int timer; float runtime; cutCreateTimer(&timer); cutStartTimer(timer); if(adjust) hipLaunchKernelGGL(( Grayscale), dim3(grid), dim3(threads), 0, 0, dst, imageW, imageH, brightness, contrast); else hipLaunchKernelGGL(( Grayscale2), dim3(grid), dim3(threads), 0, 0, dst, imageW, imageH); hipLaunchKernelGGL(( Binarize), dim3(grid), dim3(threads), 0, 0, dst, imageW, imageH, threshold); for(int i=0; i<iteration; i++) { hipMemcpyToArray( d_tempArray, 0, 0, dst, imageW * imageH * sizeof(unsigned int), hipMemcpyDeviceToDevice); hipBindTextureToArray(texUCHAR, d_tempArray); hipLaunchKernelGGL(( BinaryErosion), dim3(grid), dim3(threads), 0, 0, dst, imageW, imageH, mask_w, mask_h); } hipUnbindTexture(texUCHAR); hipDeviceSynchronize(); cutStopTimer(timer); runtime = cutGetTimerValue(timer)/1000; cutDeleteTimer(timer); return runtime; }
aa5cf430a34782f2d4839986cdd2c3d610cfb2a2.cu
__global__ void BinaryErosion (unsigned int *dst, int imageW, int imageH, int mask_w, int mask_h) { const int ix = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; const int iy = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; if(ix < imageW && iy < imageH){ int match = 0; for (int m = ix - mask_w ; m < ix + mask_w && !match; m++){ for (int n = iy - mask_h ; n < iy + mask_h && !match; n++){ float4 fresult = tex2D(texUCHAR, m, n); if (fresult.x == 1.f && fresult.y == 1.f && fresult.z == 1.f ) match = 1; } } if(!match) dst[imageW * iy + ix] = make_color(0.f, 0.f, 0.f , 1.f); else dst[imageW * iy + ix] = make_color(1.f, 1.f, 1.f , 1.f); } } extern "C" float binaryErosionWrapper (unsigned int *dst, int imageW, int imageH, int threshold, int iteration, float brightness, float contrast, int mask_w, int mask_h, int adjust) { //for more effective kernel execution dim3 threads(BLOCKDIM_X, BLOCKDIM_Y); dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y)); unsigned int timer; float runtime; cutCreateTimer(&timer); cutStartTimer(timer); if(adjust) Grayscale<<<grid, threads>>>(dst, imageW, imageH, brightness, contrast); else Grayscale2<<<grid, threads>>>(dst, imageW, imageH); Binarize<<<grid, threads>>>(dst, imageW, imageH, threshold); for(int i=0; i<iteration; i++) { cudaMemcpyToArray( d_tempArray, 0, 0, dst, imageW * imageH * sizeof(unsigned int), cudaMemcpyDeviceToDevice); cudaBindTextureToArray(texUCHAR, d_tempArray); BinaryErosion<<<grid, threads>>>(dst, imageW, imageH, mask_w, mask_h); } cudaUnbindTexture(texUCHAR); cudaThreadSynchronize(); cutStopTimer(timer); runtime = cutGetTimerValue(timer)/1000; cutDeleteTimer(timer); return runtime; }
cusublayer.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /**************************************************************************//** * * \file cusublayer.cu * \author Daniel Strigl, Klaus Kofler * \date Jun 09 2009 * * $Id: cusublayer.cu 3558 2010-11-22 11:04:51Z klaus $ * * \brief Implementation of cnnplus::CuSubLayer. * *****************************************************************************/ #include "cudautils.hh" /////////////////////////////////////////////////////////////////////////////// // CUDA kernels __global__ void subsample_kernel1(float const * in, size_t const strideIn, float const * weights, float const * biases, float * inSub, size_t const strideInSub, float * sum, size_t const strideSum, size_t const mapsInH, size_t const mapsInW, size_t const mapsOutH, size_t const mapsOutW, size_t const sampleH, size_t const sampleW) { size_t const numMap = blockIdx.y; float const * mapIn = in + CNN_UIMUL(numMap, strideIn); size_t const r = threadIdx.y; size_t const c = threadIdx.x; size_t const top = CNN_UIMUL(r, sampleH); size_t const left = CNN_UIMUL(c, sampleW); size_t const bottom = top + sampleH; size_t const right = left + sampleW; size_t const i = CNN_UIMUL(r, mapsOutW) + c; float tmp = 0; for (size_t y = top; y < bottom; ++y) { for (size_t x = left; x < right; ++x) { tmp += mapIn[CNN_UIMUL(y, mapsInW) + x]; } } inSub[CNN_UIMUL(numMap, strideInSub) + i] = tmp; sum [CNN_UIMUL(numMap, strideSum ) + i] = tmp * weights[numMap] + biases[numMap]; } __global__ void subsample_kernel2(float const * in, size_t const strideIn, float const * weights, float const * biases, float * inSub, size_t const strideInSub, float * sum, size_t const strideSum, size_t const mapsInH, size_t const mapsInW, size_t const mapsOutH, size_t const mapsOutW, size_t const sampleH, size_t const sampleW, size_t const numMaps) { size_t const numMap = CNN_UIMUL(blockIdx.y, blockDim.y) + threadIdx.y; size_t const i = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x; if (numMap >= numMaps || i >= CNN_UIMUL(mapsOutH, mapsOutW)) return; float const * mapIn = in + CNN_UIMUL(numMap, strideIn); size_t const r = i / mapsOutW; size_t const c = i % mapsOutW; size_t const top = CNN_UIMUL(r, sampleH); size_t const left = CNN_UIMUL(c, sampleW); size_t const bottom = top + sampleH; size_t const right = left + sampleW; float tmp = 0; for (size_t y = top; y < bottom; ++y) { for (size_t x = left; x < right; ++x) { tmp += mapIn[CNN_UIMUL(y, mapsInW) + x]; } } inSub[CNN_UIMUL(numMap, strideInSub) + i] = tmp; sum [CNN_UIMUL(numMap, strideSum ) + i] = tmp * weights[numMap] + biases[numMap]; } __global__ void upsample_kernel1(float const * delta, size_t const strideDelta, float const * weights, float * in, size_t const strideIn, size_t const mapsInH, size_t const mapsInW, size_t const mapsOutH, size_t const mapsOutW, size_t const sampleH, size_t const sampleW) { size_t const numMap = blockIdx.y; float * mapIn = in + CNN_UIMUL(numMap, strideIn); size_t const r = threadIdx.y; size_t const c = threadIdx.x; size_t const top = CNN_UIMUL(r, sampleH); size_t const left = CNN_UIMUL(c, sampleW); size_t const bottom = top + sampleH; size_t const right = left + sampleW; size_t const i = CNN_UIMUL(r, mapsOutW) + c; float const tmp = delta[CNN_UIMUL(numMap, strideDelta) + i] * weights[numMap]; for (size_t y = top; y < bottom; ++y) { for (size_t x = left; x < right; ++x) { mapIn[CNN_UIMUL(y, mapsInW) + x] = tmp; } } } __global__ void upsample_kernel2(float const * delta, size_t const strideDelta, float const * weights, float * in, size_t const strideIn, size_t const mapsInH, size_t const mapsInW, size_t const mapsOutH, size_t const mapsOutW, size_t const sampleH, size_t const sampleW, size_t const numMaps) { size_t const numMap = CNN_UIMUL(blockIdx.y, blockDim.y) + threadIdx.y; size_t const i = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x; if (numMap >= numMaps || i >= CNN_UIMUL(mapsOutH, mapsOutW)) return; float * mapIn = in + CNN_UIMUL(numMap, strideIn); size_t const r = i / mapsOutW; size_t const c = i % mapsOutW; size_t const top = CNN_UIMUL(r, sampleH); size_t const left = CNN_UIMUL(c, sampleW); size_t const bottom = top + sampleH; size_t const right = left + sampleW; float const tmp = delta[CNN_UIMUL(numMap, strideDelta) + i] * weights[numMap]; for (size_t y = top; y < bottom; ++y) { for (size_t x = left; x < right; ++x) { mapIn[CNN_UIMUL(y, mapsInW) + x] = tmp; } } } __global__ void upsample_kernel3(float const * delta, size_t const strideDelta, float const * weights, float * in, size_t const strideIn, size_t const mapsInH, size_t const mapsInW, size_t const mapsOutH, size_t const mapsOutW, size_t const sampleH, size_t const sampleW, size_t const numMaps) { size_t const numMap = CNN_UIMUL(blockIdx.y, blockDim.y) + threadIdx.y; size_t const i = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x; if (numMap >= numMaps || i >= CNN_UIMUL(mapsInH, mapsInW)) return; float * mapIn = in + CNN_UIMUL(numMap, strideIn); size_t const r = i / mapsInW; size_t const c = i % mapsInW; size_t const y = r / sampleH; size_t const x = c / sampleW; mapIn[i] = delta[CNN_UIMUL(numMap, strideDelta) + CNN_UIMUL(y, mapsOutW) + x] * weights[numMap]; } /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// #include "cusublayer.hh" #include "cumvli.hh" #include "matvecli.hh" #include "mathli.hh" #include <sstream> CNNPLUS_NS_BEGIN /////////////////////////////////////////////////////////////////////////////// // CUDA kernel calls template<typename T> void subsample(T const * in, size_t const strideIn, T const * weights, T const * biases, T * inSub, size_t const strideInSub, T * sum, size_t const strideSum, Size const & sizeMapsIn, Size const & sizeMapsOut, Size const & sizeSample, size_t const numMaps); template<> void subsample<float>(float const * in, size_t const strideIn, float const * weights, float const * biases, float * inSub, size_t const strideInSub, float * sum, size_t const strideSum, Size const & sizeMapsIn, Size const & sizeMapsOut, Size const & sizeSample, size_t const numMaps) { CNNPLUS_ASSERT(in && strideIn >= sizeMapsIn.area() ); CNNPLUS_ASSERT(inSub && strideInSub >= sizeMapsOut.area()); CNNPLUS_ASSERT(sum && strideSum >= sizeMapsOut.area()); CNNPLUS_ASSERT(sizeMapsIn.area() > 0); CNNPLUS_ASSERT(sizeMapsOut.area() > 0); CNNPLUS_ASSERT(sizeSample.area() > 0); CNNPLUS_ASSERT(sizeSample.height() <= sizeMapsIn.height() && sizeSample.width() <= sizeMapsIn.width()); CNNPLUS_ASSERT( sizeMapsIn.height() == (sizeMapsOut.height() * sizeSample.height()) && sizeMapsIn.width() == (sizeMapsOut.width() * sizeSample.width())); CNNPLUS_ASSERT(numMaps > 0); CNNPLUS_ASSERT(weights && biases); #if 0 if (sizeMapsOut.area() <= MAX_THREADS) { dim3 const dimGrid(1, numMaps); dim3 const dimBlock(sizeMapsOut.width(), sizeMapsOut.height()); CNNPLUS_ASSERT(dimGrid.x == 1); hipLaunchKernelGGL(( subsample_kernel1), dim3(dimGrid), dim3(dimBlock), 0, 0, in, strideIn, weights, biases, inSub, strideInSub, sum, strideSum, sizeMapsIn.height(), sizeMapsIn.width(), sizeMapsOut.height(), sizeMapsOut.width(), sizeSample.height(), sizeSample.width()); CUDA_CHECK_ERROR("Kernel call 'subsample_kernel1' failed"); return; } #endif dim3 const dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT); dim3 const dimGrid((sizeMapsOut.area() + dimBlock.x - 1) / dimBlock.x, (numMaps + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( subsample_kernel2), dim3(dimGrid), dim3(dimBlock), 0, 0, in, strideIn, weights, biases, inSub, strideInSub, sum, strideSum, sizeMapsIn.height(), sizeMapsIn.width(), sizeMapsOut.height(), sizeMapsOut.width(), sizeSample.height(), sizeSample.width(), numMaps); CUDA_CHECK_ERROR("Kernel call 'subsample_kernel2' failed"); } template<> void subsample<double>(double const * in, size_t const strideIn, double const * weights, double const * biases, double * inSub, size_t const strideInSub, double * sum, size_t const strideSum, Size const & sizeMapsIn, Size const & sizeMapsOut, Size const & sizeSample, size_t const numMaps) { throw NotImplementedError("Not yet supported [CUDA<double>]."); } template<typename T> void upsample(T const * delta, size_t const strideDelta, T const * weights, T * in, size_t const strideIn, Size const & sizeMapsIn, Size const & sizeMapsOut, Size const & sizeSample, size_t const numMaps); template<> void upsample<float>(float const * delta, size_t const strideDelta, float const * weights, float * in, size_t const strideIn, Size const & sizeMapsIn, Size const & sizeMapsOut, Size const & sizeSample, size_t const numMaps) { CNNPLUS_ASSERT(delta && strideDelta >= sizeMapsOut.area()); CNNPLUS_ASSERT(in && strideIn >= sizeMapsIn.area() ); CNNPLUS_ASSERT(sizeMapsIn.area() > 0); CNNPLUS_ASSERT(sizeMapsOut.area() > 0); CNNPLUS_ASSERT(sizeSample.area() > 0); CNNPLUS_ASSERT(sizeSample.height() <= sizeMapsIn.height() && sizeSample.width() <= sizeMapsIn.width()); CNNPLUS_ASSERT( sizeMapsIn.height() == (sizeMapsOut.height() * sizeSample.height()) && sizeMapsIn.width() == (sizeMapsOut.width() * sizeSample.width())); CNNPLUS_ASSERT(numMaps > 0); CNNPLUS_ASSERT(weights); #if 0 if (sizeMapsOut.area() <= MAX_THREADS) { dim3 const dimGrid(1, numMaps); dim3 const dimBlock(sizeMapsOut.width(), sizeMapsOut.height()); CNNPLUS_ASSERT(dimGrid.x == 1); hipLaunchKernelGGL(( upsample_kernel1), dim3(dimGrid), dim3(dimBlock), 0, 0, delta, strideDelta, weights, in, strideIn, sizeMapsIn.height(), sizeMapsIn.width(), sizeMapsOut.height(), sizeMapsOut.width(), sizeSample.height(), sizeSample.width()); CUDA_CHECK_ERROR("Kernel call 'upsample_kernel1' failed"); return; } #endif #if 0 dim3 const dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT); dim3 const dimGrid((sizeMapsOut.area() + dimBlock.x - 1) / dimBlock.x, (numMaps + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( upsample_kernel2), dim3(dimGrid), dim3(dimBlock), 0, 0, delta, strideDelta, weights, in, strideIn, sizeMapsIn.height(), sizeMapsIn.width(), sizeMapsOut.height(), sizeMapsOut.width(), sizeSample.height(), sizeSample.width(), numMaps); CUDA_CHECK_ERROR("Kernel call 'upsample_kernel2' failed"); #else dim3 const dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT); dim3 const dimGrid((sizeMapsIn.area() + dimBlock.x - 1) / dimBlock.x, (numMaps + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( upsample_kernel3), dim3(dimGrid), dim3(dimBlock), 0, 0, delta, strideDelta, weights, in, strideIn, sizeMapsIn.height(), sizeMapsIn.width(), sizeMapsOut.height(), sizeMapsOut.width(), sizeSample.height(), sizeSample.width(), numMaps); CUDA_CHECK_ERROR("Kernel call 'upsample_kernel3' failed"); #endif } template<> void upsample<double>(double const * delta, size_t const strideDelta, double const * weights, double * in, size_t const strideIn, Size const & sizeMapsIn, Size const & sizeMapsOut, Size const & sizeSample, size_t const numMaps) { throw NotImplementedError("Not yet supported [CUDA<double>]."); } /////////////////////////////////////////////////////////////////////////////// // CuSubLayer implementation //! Computes the size of the output feature maps /*! \param sizeMapsIn size of input feature maps \param sizeSample sample size */ inline Size outputMapsSize(Size const & sizeMapsIn, Size const & sizeSample) { // Check parameters if (!(sizeSample.height() > 0 && sizeSample.height() <= sizeMapsIn.height() ) || !(sizeSample.width() > 0 && sizeSample.width() <= sizeMapsIn.width() ) || !(sizeMapsIn.height() > 0 && sizeMapsIn.height() % sizeSample.height() == 0) || !(sizeMapsIn.width() > 0 && sizeMapsIn.width() % sizeSample.width() == 0)) { throw ParameterError("Inconsistent input size and sample size."); } return Size(sizeMapsIn.height() / sizeSample.height(), sizeMapsIn.width() / sizeSample.width()); } template<typename T, class SquFnc> CuSubLayer<T, SquFnc>::CuSubLayer(Size const & sizeMapsIn, size_t const numMaps, Size const & sizeSample) : sizeMapsIn_(sizeMapsIn), numMaps_(numMaps), sizeSample_(sizeSample), sizeMapsOut_(outputMapsSize(sizeMapsIn_, sizeSample_)), squasher_(Size(numMaps_, sizeMapsOut_.area())) { // Allocate memory (GPU) d_inSat_ = cumvli::allocv<T>((sizeMapsIn_ + Size(1, 1)).area()); cumvli::zerov<T>(d_inSat_, (sizeMapsIn_ + Size(1, 1)).area()); d_inSub_ = cumvli::allocm<T>(numMaps, sizeMapsOut_.area(), d_strideInSub_); d_tmp_ = cumvli::allocv<T>(sizeMapsOut_.area()); d_weights_ = cumvli::allocv<T>(numMaps_); d_dWeights_ = cumvli::allocv<T>(numMaps_); d_biases_ = cumvli::allocv<T>(numMaps_); d_dBiases_ = cumvli::allocv<T>(numMaps_); d_sum_ = cumvli::allocm<T>(numMaps_, sizeMapsOut_.area(), d_strideSum_); d_delta_ = cumvli::allocm<T>(numMaps_, sizeMapsOut_.area(), d_strideDelta_); // Allocate memory (CPU) h_weights_ = matvecli::allocv<T>(numMaps_); h_biases_ = matvecli::allocv<T>(numMaps_); reset(); // Reset gradients to zero } template<typename T, class SquFnc> CuSubLayer<T, SquFnc>::~CuSubLayer() { // Deallocate memory (GPU) cumvli::free<T>(d_inSat_); cumvli::free<T>(d_inSub_); cumvli::free<T>(d_tmp_); cumvli::free<T>(d_weights_); cumvli::free<T>(d_dWeights_); cumvli::free<T>(d_biases_); cumvli::free<T>(d_dBiases_); cumvli::free<T>(d_sum_); cumvli::free<T>(d_delta_); // Deallocate memory (CPU) matvecli::free<T>(h_weights_); matvecli::free<T>(h_biases_); } template<typename T, class SquFnc> void CuSubLayer<T, SquFnc>::forget(T const sigma, bool scale) { // Initialize weights and biases with random values matvecli::randv<T>(h_weights_, numMaps_, sigma); if (scale) { matvecli::divvc<T>(h_weights_, static_cast<T>(sizeSample_.area()), numMaps_); } cumvli::copyv_h2d<T>(h_weights_, d_weights_, numMaps_); matvecli::randv<T>(h_biases_, numMaps_, sigma); cumvli::copyv_h2d<T>(h_biases_, d_biases_, numMaps_); } //! Initializes weights template<typename T, class SquFnc> void CuSubLayer<T, SquFnc>::forget() { // Set weights to 'coeff = m^(-1/2)', where 'm' is the fan-in // (the number of connections feeding into the node) //T const coeff = mathli::pow(static_cast<T>(sizeSample_.area()), T(-.5)); T const coeff = 1 / mathli::sqrt(static_cast<T>(sizeSample_.area())); matvecli::setv<T>(h_weights_, numMaps_, coeff); cumvli::copyv_h2d<T>(h_weights_, d_weights_, numMaps_); // Set biases to zero (as in lush 1.2.1, \gblearn2\gb-modules-nn.lsh) matvecli::zerov<T>(h_biases_, numMaps_); cumvli::copyv_h2d<T>(h_biases_, d_biases_, numMaps_); } template<typename T, class SquFnc> void CuSubLayer<T, SquFnc>::reset() { cumvli::zerov<T>(d_dWeights_, numMaps_); cumvli::zerov<T>(d_dBiases_, numMaps_); } template<typename T, class SquFnc> void CuSubLayer<T, SquFnc>::update(T const eta) { // Compute: weights_ += eta * dWeights_ cumvli::axpy<T>(d_dWeights_, numMaps_, d_weights_, eta); // Compute: biases_ += eta * dBiases_ cumvli::axpy<T>(d_dBiases_, numMaps_, d_biases_, eta); } template<typename T, class SquFnc> void CuSubLayer<T, SquFnc>::fprop(T const * in, size_t const strideIn, T * out, size_t const strideOut) { CNNPLUS_ASSERT(in && strideIn >= sizeMapsIn_.area()); CNNPLUS_ASSERT(!out || (out && strideOut >= sizeMapsOut_.area())); // TODO doc subsample<T>(in, strideIn, d_weights_, d_biases_, d_inSub_, d_strideInSub_, d_sum_, d_strideSum_, sizeMapsIn_, sizeMapsOut_, sizeSample_, numMaps_); if (out) { // Compute: out = f(sum_) squasher_.fprop(d_sum_, d_strideSum_, out, strideOut); } } template<typename T, class SquFnc> void CuSubLayer<T, SquFnc>::bprop(T * in, size_t const strideIn, T const * out, size_t const strideOut, bool accumGradients) { CNNPLUS_ASSERT(!in || (in && strideIn >= sizeMapsIn_.area())); CNNPLUS_ASSERT(out && strideOut >= sizeMapsOut_.area()); // Compute: delta_ = f'(sum_) .* out cumvli::copymm<T>(d_sum_, d_strideSum_, d_delta_, d_strideDelta_, numMaps_, sizeMapsOut_.area()); squasher_.bprop(d_delta_, d_strideDelta_, out, strideOut); if (in) { // TODO doc upsample<T>(d_delta_, d_strideDelta_, d_weights_, in, strideIn, sizeMapsIn_, sizeMapsOut_, sizeSample_, numMaps_); } if (accumGradients) { // Compute: dBiases_ += sums of row vectors in delta_ cumvli::sumrowacc<T>(d_delta_, d_strideDelta_, d_dBiases_, numMaps_, sizeMapsOut_.area()); // Compute: delta_ = delta_ .* inSub_ cumvli::pmulmm<T>(d_delta_, d_strideDelta_, d_inSub_, d_strideInSub_, numMaps_, sizeMapsOut_.area()); // Compute: dWeights_ += sums of row vectors in delta_ cumvli::sumrowacc<T>(d_delta_, d_strideDelta_, d_dWeights_, numMaps_, sizeMapsOut_.area()); } else { // Compute: dBiases_ = sums of row vectors in delta_ cumvli::sumrow<T>(d_delta_, d_strideDelta_, d_dBiases_, numMaps_, sizeMapsOut_.area()); // Compute: delta_ = delta_ .* inSub_ cumvli::pmulmm<T>(d_delta_, d_strideDelta_, d_inSub_, d_strideInSub_, numMaps_, sizeMapsOut_.area()); // Compute: dWeights_ = sums of row vectors in delta_ cumvli::sumrow<T>(d_delta_, d_strideDelta_, d_dWeights_, numMaps_, sizeMapsOut_.area()); } } #ifdef CNNPLUS_MATLAB_FOUND template<typename T, class SquFnc> void CuSubLayer<T, SquFnc>::load(mxArray const * arr) { if (!arr || !mxIsStruct(arr) || !this->checkType(arr, "s")) throw MatlabError("Failed to read subsampling layer."); // Read Matlab array with weight values mxArray const * arrW = mxGetField(arr, 0, "weights"); { mwSize const dims[] = { numMaps_, 1 }; if (!this->checkArr(arrW, countof(dims), dims)) throw MatlabError("Failed to read 'weights'."); } // Read Matlab array with bias values mxArray const * arrB = mxGetField(arr, 0, "biases"); { mwSize const dims[] = { numMaps_, 1 }; if (!this->checkArr(arrB, countof(dims), dims)) throw MatlabError("Failed to read 'biases'."); } double const * pArrW = static_cast<double const *>(mxGetData(arrW)); double const * pArrB = static_cast<double const *>(mxGetData(arrB)); // Read weight and bias values from Matlab array for (size_t i = 0; i < numMaps_; ++i) { h_weights_[i] = static_cast<T>(pArrW[i]); h_biases_[i] = static_cast<T>(pArrB[i]); } cumvli::copyv_h2d<T>(h_weights_, d_weights_, numMaps_); cumvli::copyv_h2d<T>(h_biases_, d_biases_, numMaps_); } template<typename T, class SquFnc> mxArray * CuSubLayer<T, SquFnc>::save() const { char const * fieldnames[] = { "type", "weights", "biases" }; mxArray * arr = mxCreateStructMatrix(1, 1, countof(fieldnames), fieldnames); if (!arr) throw MatlabError("Failed to create array."); mxArray * arrW = NULL, * arrB = NULL; try { // Create Matlab arrays arrW = mxCreateDoubleMatrix(numMaps_, 1, mxREAL); if (!arrW) throw MatlabError("Failed to create array."); arrB = mxCreateDoubleMatrix(numMaps_, 1, mxREAL); if (!arrB) throw MatlabError("Failed to create array."); } catch (...) { if (arrW) mxDestroyArray(arrW); if (arrB) mxDestroyArray(arrB); throw; } double * pArrW = static_cast<double *>(mxGetData(arrW)); double * pArrB = static_cast<double *>(mxGetData(arrB)); // Copy weight and bias values to Matlab array cumvli::copyv_d2h<T>(d_weights_, h_weights_, numMaps_); cumvli::copyv_d2h<T>(d_biases_, h_biases_, numMaps_); for (size_t i = 0; i < numMaps_; ++i) { pArrW[i] = h_weights_[i]; pArrB[i] = h_biases_[i]; } // Write Matlab arrays to Matlab structure mxSetField(arr, 0, "type", mxCreateString("s")); mxSetField(arr, 0, "weights", arrW); mxSetField(arr, 0, "biases", arrB); return arr; } #endif // CNNPLUS_MATLAB_FOUND template<typename T, class SquFnc> void CuSubLayer<T, SquFnc>::trainableParam(typename Layer<T>::TrainableParam & param) { // Weights param.weights.val = d_weights_; param.weights.dVal = d_dWeights_; param.weights.mask = NULL; param.weights.strideVal = numMaps_; param.weights.strideDVal = numMaps_; param.weights.strideMask = 0; param.weights.rows = 1; param.weights.cols = numMaps_; // Biases param.biases.val = d_biases_; param.biases.dVal = d_dBiases_; param.biases.len = numMaps_; } template<typename T, class SquFnc> std::string CuSubLayer<T, SquFnc>::toString() const { std::stringstream ss; ss << "CuSubLayer" << "<" << numMaps_ << "x" << sizeMapsIn_.toString() << "," << numMaps_ << "x" << sizeMapsOut_.toString() << "," << sizeSample_.toString() << ";" << squasher_.toString() << ">"; return ss.str(); } template<typename T, class SquFnc> size_t CuSubLayer<T, SquFnc>::numTrainableParam() const { return (numMaps_ * 2); } template<typename T, class SquFnc> size_t CuSubLayer<T, SquFnc>::numConnections() const { return (numMaps_ * sizeMapsIn_.area() + numMaps_ * sizeMapsOut_.area()); } /////////////////////////////////////////////////////////////////////////////// /*! \addtogroup eti_grp Explicit Template Instantiation @{ */ template class CuSubLayer< float, CuTanh<float> >; template class CuSubLayer< float, CuStdSigmoid<float> >; template class CuSubLayer< float, CuLogSigmoid<float> >; template class CuSubLayer< float, CuIdentity<float> >; template class CuSubLayer< double, CuTanh<double> >; template class CuSubLayer< double, CuStdSigmoid<double> >; template class CuSubLayer< double, CuLogSigmoid<double> >; template class CuSubLayer< double, CuIdentity<double> >; /*! @} */ CNNPLUS_NS_END
cusublayer.cu
/**************************************************************************//** * * \file cusublayer.cu * \author Daniel Strigl, Klaus Kofler * \date Jun 09 2009 * * $Id: cusublayer.cu 3558 2010-11-22 11:04:51Z klaus $ * * \brief Implementation of cnnplus::CuSubLayer. * *****************************************************************************/ #include "cudautils.hh" /////////////////////////////////////////////////////////////////////////////// // CUDA kernels __global__ void subsample_kernel1(float const * in, size_t const strideIn, float const * weights, float const * biases, float * inSub, size_t const strideInSub, float * sum, size_t const strideSum, size_t const mapsInH, size_t const mapsInW, size_t const mapsOutH, size_t const mapsOutW, size_t const sampleH, size_t const sampleW) { size_t const numMap = blockIdx.y; float const * mapIn = in + CNN_UIMUL(numMap, strideIn); size_t const r = threadIdx.y; size_t const c = threadIdx.x; size_t const top = CNN_UIMUL(r, sampleH); size_t const left = CNN_UIMUL(c, sampleW); size_t const bottom = top + sampleH; size_t const right = left + sampleW; size_t const i = CNN_UIMUL(r, mapsOutW) + c; float tmp = 0; for (size_t y = top; y < bottom; ++y) { for (size_t x = left; x < right; ++x) { tmp += mapIn[CNN_UIMUL(y, mapsInW) + x]; } } inSub[CNN_UIMUL(numMap, strideInSub) + i] = tmp; sum [CNN_UIMUL(numMap, strideSum ) + i] = tmp * weights[numMap] + biases[numMap]; } __global__ void subsample_kernel2(float const * in, size_t const strideIn, float const * weights, float const * biases, float * inSub, size_t const strideInSub, float * sum, size_t const strideSum, size_t const mapsInH, size_t const mapsInW, size_t const mapsOutH, size_t const mapsOutW, size_t const sampleH, size_t const sampleW, size_t const numMaps) { size_t const numMap = CNN_UIMUL(blockIdx.y, blockDim.y) + threadIdx.y; size_t const i = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x; if (numMap >= numMaps || i >= CNN_UIMUL(mapsOutH, mapsOutW)) return; float const * mapIn = in + CNN_UIMUL(numMap, strideIn); size_t const r = i / mapsOutW; size_t const c = i % mapsOutW; size_t const top = CNN_UIMUL(r, sampleH); size_t const left = CNN_UIMUL(c, sampleW); size_t const bottom = top + sampleH; size_t const right = left + sampleW; float tmp = 0; for (size_t y = top; y < bottom; ++y) { for (size_t x = left; x < right; ++x) { tmp += mapIn[CNN_UIMUL(y, mapsInW) + x]; } } inSub[CNN_UIMUL(numMap, strideInSub) + i] = tmp; sum [CNN_UIMUL(numMap, strideSum ) + i] = tmp * weights[numMap] + biases[numMap]; } __global__ void upsample_kernel1(float const * delta, size_t const strideDelta, float const * weights, float * in, size_t const strideIn, size_t const mapsInH, size_t const mapsInW, size_t const mapsOutH, size_t const mapsOutW, size_t const sampleH, size_t const sampleW) { size_t const numMap = blockIdx.y; float * mapIn = in + CNN_UIMUL(numMap, strideIn); size_t const r = threadIdx.y; size_t const c = threadIdx.x; size_t const top = CNN_UIMUL(r, sampleH); size_t const left = CNN_UIMUL(c, sampleW); size_t const bottom = top + sampleH; size_t const right = left + sampleW; size_t const i = CNN_UIMUL(r, mapsOutW) + c; float const tmp = delta[CNN_UIMUL(numMap, strideDelta) + i] * weights[numMap]; for (size_t y = top; y < bottom; ++y) { for (size_t x = left; x < right; ++x) { mapIn[CNN_UIMUL(y, mapsInW) + x] = tmp; } } } __global__ void upsample_kernel2(float const * delta, size_t const strideDelta, float const * weights, float * in, size_t const strideIn, size_t const mapsInH, size_t const mapsInW, size_t const mapsOutH, size_t const mapsOutW, size_t const sampleH, size_t const sampleW, size_t const numMaps) { size_t const numMap = CNN_UIMUL(blockIdx.y, blockDim.y) + threadIdx.y; size_t const i = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x; if (numMap >= numMaps || i >= CNN_UIMUL(mapsOutH, mapsOutW)) return; float * mapIn = in + CNN_UIMUL(numMap, strideIn); size_t const r = i / mapsOutW; size_t const c = i % mapsOutW; size_t const top = CNN_UIMUL(r, sampleH); size_t const left = CNN_UIMUL(c, sampleW); size_t const bottom = top + sampleH; size_t const right = left + sampleW; float const tmp = delta[CNN_UIMUL(numMap, strideDelta) + i] * weights[numMap]; for (size_t y = top; y < bottom; ++y) { for (size_t x = left; x < right; ++x) { mapIn[CNN_UIMUL(y, mapsInW) + x] = tmp; } } } __global__ void upsample_kernel3(float const * delta, size_t const strideDelta, float const * weights, float * in, size_t const strideIn, size_t const mapsInH, size_t const mapsInW, size_t const mapsOutH, size_t const mapsOutW, size_t const sampleH, size_t const sampleW, size_t const numMaps) { size_t const numMap = CNN_UIMUL(blockIdx.y, blockDim.y) + threadIdx.y; size_t const i = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x; if (numMap >= numMaps || i >= CNN_UIMUL(mapsInH, mapsInW)) return; float * mapIn = in + CNN_UIMUL(numMap, strideIn); size_t const r = i / mapsInW; size_t const c = i % mapsInW; size_t const y = r / sampleH; size_t const x = c / sampleW; mapIn[i] = delta[CNN_UIMUL(numMap, strideDelta) + CNN_UIMUL(y, mapsOutW) + x] * weights[numMap]; } /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// #include "cusublayer.hh" #include "cumvli.hh" #include "matvecli.hh" #include "mathli.hh" #include <sstream> CNNPLUS_NS_BEGIN /////////////////////////////////////////////////////////////////////////////// // CUDA kernel calls template<typename T> void subsample(T const * in, size_t const strideIn, T const * weights, T const * biases, T * inSub, size_t const strideInSub, T * sum, size_t const strideSum, Size const & sizeMapsIn, Size const & sizeMapsOut, Size const & sizeSample, size_t const numMaps); template<> void subsample<float>(float const * in, size_t const strideIn, float const * weights, float const * biases, float * inSub, size_t const strideInSub, float * sum, size_t const strideSum, Size const & sizeMapsIn, Size const & sizeMapsOut, Size const & sizeSample, size_t const numMaps) { CNNPLUS_ASSERT(in && strideIn >= sizeMapsIn.area() ); CNNPLUS_ASSERT(inSub && strideInSub >= sizeMapsOut.area()); CNNPLUS_ASSERT(sum && strideSum >= sizeMapsOut.area()); CNNPLUS_ASSERT(sizeMapsIn.area() > 0); CNNPLUS_ASSERT(sizeMapsOut.area() > 0); CNNPLUS_ASSERT(sizeSample.area() > 0); CNNPLUS_ASSERT(sizeSample.height() <= sizeMapsIn.height() && sizeSample.width() <= sizeMapsIn.width()); CNNPLUS_ASSERT( sizeMapsIn.height() == (sizeMapsOut.height() * sizeSample.height()) && sizeMapsIn.width() == (sizeMapsOut.width() * sizeSample.width())); CNNPLUS_ASSERT(numMaps > 0); CNNPLUS_ASSERT(weights && biases); #if 0 if (sizeMapsOut.area() <= MAX_THREADS) { dim3 const dimGrid(1, numMaps); dim3 const dimBlock(sizeMapsOut.width(), sizeMapsOut.height()); CNNPLUS_ASSERT(dimGrid.x == 1); subsample_kernel1<<<dimGrid, dimBlock>>>( in, strideIn, weights, biases, inSub, strideInSub, sum, strideSum, sizeMapsIn.height(), sizeMapsIn.width(), sizeMapsOut.height(), sizeMapsOut.width(), sizeSample.height(), sizeSample.width()); CUDA_CHECK_ERROR("Kernel call 'subsample_kernel1' failed"); return; } #endif dim3 const dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT); dim3 const dimGrid((sizeMapsOut.area() + dimBlock.x - 1) / dimBlock.x, (numMaps + dimBlock.y - 1) / dimBlock.y); subsample_kernel2<<<dimGrid, dimBlock>>>( in, strideIn, weights, biases, inSub, strideInSub, sum, strideSum, sizeMapsIn.height(), sizeMapsIn.width(), sizeMapsOut.height(), sizeMapsOut.width(), sizeSample.height(), sizeSample.width(), numMaps); CUDA_CHECK_ERROR("Kernel call 'subsample_kernel2' failed"); } template<> void subsample<double>(double const * in, size_t const strideIn, double const * weights, double const * biases, double * inSub, size_t const strideInSub, double * sum, size_t const strideSum, Size const & sizeMapsIn, Size const & sizeMapsOut, Size const & sizeSample, size_t const numMaps) { throw NotImplementedError("Not yet supported [CUDA<double>]."); } template<typename T> void upsample(T const * delta, size_t const strideDelta, T const * weights, T * in, size_t const strideIn, Size const & sizeMapsIn, Size const & sizeMapsOut, Size const & sizeSample, size_t const numMaps); template<> void upsample<float>(float const * delta, size_t const strideDelta, float const * weights, float * in, size_t const strideIn, Size const & sizeMapsIn, Size const & sizeMapsOut, Size const & sizeSample, size_t const numMaps) { CNNPLUS_ASSERT(delta && strideDelta >= sizeMapsOut.area()); CNNPLUS_ASSERT(in && strideIn >= sizeMapsIn.area() ); CNNPLUS_ASSERT(sizeMapsIn.area() > 0); CNNPLUS_ASSERT(sizeMapsOut.area() > 0); CNNPLUS_ASSERT(sizeSample.area() > 0); CNNPLUS_ASSERT(sizeSample.height() <= sizeMapsIn.height() && sizeSample.width() <= sizeMapsIn.width()); CNNPLUS_ASSERT( sizeMapsIn.height() == (sizeMapsOut.height() * sizeSample.height()) && sizeMapsIn.width() == (sizeMapsOut.width() * sizeSample.width())); CNNPLUS_ASSERT(numMaps > 0); CNNPLUS_ASSERT(weights); #if 0 if (sizeMapsOut.area() <= MAX_THREADS) { dim3 const dimGrid(1, numMaps); dim3 const dimBlock(sizeMapsOut.width(), sizeMapsOut.height()); CNNPLUS_ASSERT(dimGrid.x == 1); upsample_kernel1<<<dimGrid, dimBlock>>>( delta, strideDelta, weights, in, strideIn, sizeMapsIn.height(), sizeMapsIn.width(), sizeMapsOut.height(), sizeMapsOut.width(), sizeSample.height(), sizeSample.width()); CUDA_CHECK_ERROR("Kernel call 'upsample_kernel1' failed"); return; } #endif #if 0 dim3 const dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT); dim3 const dimGrid((sizeMapsOut.area() + dimBlock.x - 1) / dimBlock.x, (numMaps + dimBlock.y - 1) / dimBlock.y); upsample_kernel2<<<dimGrid, dimBlock>>>( delta, strideDelta, weights, in, strideIn, sizeMapsIn.height(), sizeMapsIn.width(), sizeMapsOut.height(), sizeMapsOut.width(), sizeSample.height(), sizeSample.width(), numMaps); CUDA_CHECK_ERROR("Kernel call 'upsample_kernel2' failed"); #else dim3 const dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT); dim3 const dimGrid((sizeMapsIn.area() + dimBlock.x - 1) / dimBlock.x, (numMaps + dimBlock.y - 1) / dimBlock.y); upsample_kernel3<<<dimGrid, dimBlock>>>( delta, strideDelta, weights, in, strideIn, sizeMapsIn.height(), sizeMapsIn.width(), sizeMapsOut.height(), sizeMapsOut.width(), sizeSample.height(), sizeSample.width(), numMaps); CUDA_CHECK_ERROR("Kernel call 'upsample_kernel3' failed"); #endif } template<> void upsample<double>(double const * delta, size_t const strideDelta, double const * weights, double * in, size_t const strideIn, Size const & sizeMapsIn, Size const & sizeMapsOut, Size const & sizeSample, size_t const numMaps) { throw NotImplementedError("Not yet supported [CUDA<double>]."); } /////////////////////////////////////////////////////////////////////////////// // CuSubLayer implementation //! Computes the size of the output feature maps /*! \param sizeMapsIn size of input feature maps \param sizeSample sample size */ inline Size outputMapsSize(Size const & sizeMapsIn, Size const & sizeSample) { // Check parameters if (!(sizeSample.height() > 0 && sizeSample.height() <= sizeMapsIn.height() ) || !(sizeSample.width() > 0 && sizeSample.width() <= sizeMapsIn.width() ) || !(sizeMapsIn.height() > 0 && sizeMapsIn.height() % sizeSample.height() == 0) || !(sizeMapsIn.width() > 0 && sizeMapsIn.width() % sizeSample.width() == 0)) { throw ParameterError("Inconsistent input size and sample size."); } return Size(sizeMapsIn.height() / sizeSample.height(), sizeMapsIn.width() / sizeSample.width()); } template<typename T, class SquFnc> CuSubLayer<T, SquFnc>::CuSubLayer(Size const & sizeMapsIn, size_t const numMaps, Size const & sizeSample) : sizeMapsIn_(sizeMapsIn), numMaps_(numMaps), sizeSample_(sizeSample), sizeMapsOut_(outputMapsSize(sizeMapsIn_, sizeSample_)), squasher_(Size(numMaps_, sizeMapsOut_.area())) { // Allocate memory (GPU) d_inSat_ = cumvli::allocv<T>((sizeMapsIn_ + Size(1, 1)).area()); cumvli::zerov<T>(d_inSat_, (sizeMapsIn_ + Size(1, 1)).area()); d_inSub_ = cumvli::allocm<T>(numMaps, sizeMapsOut_.area(), d_strideInSub_); d_tmp_ = cumvli::allocv<T>(sizeMapsOut_.area()); d_weights_ = cumvli::allocv<T>(numMaps_); d_dWeights_ = cumvli::allocv<T>(numMaps_); d_biases_ = cumvli::allocv<T>(numMaps_); d_dBiases_ = cumvli::allocv<T>(numMaps_); d_sum_ = cumvli::allocm<T>(numMaps_, sizeMapsOut_.area(), d_strideSum_); d_delta_ = cumvli::allocm<T>(numMaps_, sizeMapsOut_.area(), d_strideDelta_); // Allocate memory (CPU) h_weights_ = matvecli::allocv<T>(numMaps_); h_biases_ = matvecli::allocv<T>(numMaps_); reset(); // Reset gradients to zero } template<typename T, class SquFnc> CuSubLayer<T, SquFnc>::~CuSubLayer() { // Deallocate memory (GPU) cumvli::free<T>(d_inSat_); cumvli::free<T>(d_inSub_); cumvli::free<T>(d_tmp_); cumvli::free<T>(d_weights_); cumvli::free<T>(d_dWeights_); cumvli::free<T>(d_biases_); cumvli::free<T>(d_dBiases_); cumvli::free<T>(d_sum_); cumvli::free<T>(d_delta_); // Deallocate memory (CPU) matvecli::free<T>(h_weights_); matvecli::free<T>(h_biases_); } template<typename T, class SquFnc> void CuSubLayer<T, SquFnc>::forget(T const sigma, bool scale) { // Initialize weights and biases with random values matvecli::randv<T>(h_weights_, numMaps_, sigma); if (scale) { matvecli::divvc<T>(h_weights_, static_cast<T>(sizeSample_.area()), numMaps_); } cumvli::copyv_h2d<T>(h_weights_, d_weights_, numMaps_); matvecli::randv<T>(h_biases_, numMaps_, sigma); cumvli::copyv_h2d<T>(h_biases_, d_biases_, numMaps_); } //! Initializes weights template<typename T, class SquFnc> void CuSubLayer<T, SquFnc>::forget() { // Set weights to 'coeff = m^(-1/2)', where 'm' is the fan-in // (the number of connections feeding into the node) //T const coeff = mathli::pow(static_cast<T>(sizeSample_.area()), T(-.5)); T const coeff = 1 / mathli::sqrt(static_cast<T>(sizeSample_.area())); matvecli::setv<T>(h_weights_, numMaps_, coeff); cumvli::copyv_h2d<T>(h_weights_, d_weights_, numMaps_); // Set biases to zero (as in lush 1.2.1, \gblearn2\gb-modules-nn.lsh) matvecli::zerov<T>(h_biases_, numMaps_); cumvli::copyv_h2d<T>(h_biases_, d_biases_, numMaps_); } template<typename T, class SquFnc> void CuSubLayer<T, SquFnc>::reset() { cumvli::zerov<T>(d_dWeights_, numMaps_); cumvli::zerov<T>(d_dBiases_, numMaps_); } template<typename T, class SquFnc> void CuSubLayer<T, SquFnc>::update(T const eta) { // Compute: weights_ += eta * dWeights_ cumvli::axpy<T>(d_dWeights_, numMaps_, d_weights_, eta); // Compute: biases_ += eta * dBiases_ cumvli::axpy<T>(d_dBiases_, numMaps_, d_biases_, eta); } template<typename T, class SquFnc> void CuSubLayer<T, SquFnc>::fprop(T const * in, size_t const strideIn, T * out, size_t const strideOut) { CNNPLUS_ASSERT(in && strideIn >= sizeMapsIn_.area()); CNNPLUS_ASSERT(!out || (out && strideOut >= sizeMapsOut_.area())); // TODO doc subsample<T>(in, strideIn, d_weights_, d_biases_, d_inSub_, d_strideInSub_, d_sum_, d_strideSum_, sizeMapsIn_, sizeMapsOut_, sizeSample_, numMaps_); if (out) { // Compute: out = f(sum_) squasher_.fprop(d_sum_, d_strideSum_, out, strideOut); } } template<typename T, class SquFnc> void CuSubLayer<T, SquFnc>::bprop(T * in, size_t const strideIn, T const * out, size_t const strideOut, bool accumGradients) { CNNPLUS_ASSERT(!in || (in && strideIn >= sizeMapsIn_.area())); CNNPLUS_ASSERT(out && strideOut >= sizeMapsOut_.area()); // Compute: delta_ = f'(sum_) .* out cumvli::copymm<T>(d_sum_, d_strideSum_, d_delta_, d_strideDelta_, numMaps_, sizeMapsOut_.area()); squasher_.bprop(d_delta_, d_strideDelta_, out, strideOut); if (in) { // TODO doc upsample<T>(d_delta_, d_strideDelta_, d_weights_, in, strideIn, sizeMapsIn_, sizeMapsOut_, sizeSample_, numMaps_); } if (accumGradients) { // Compute: dBiases_ += sums of row vectors in delta_ cumvli::sumrowacc<T>(d_delta_, d_strideDelta_, d_dBiases_, numMaps_, sizeMapsOut_.area()); // Compute: delta_ = delta_ .* inSub_ cumvli::pmulmm<T>(d_delta_, d_strideDelta_, d_inSub_, d_strideInSub_, numMaps_, sizeMapsOut_.area()); // Compute: dWeights_ += sums of row vectors in delta_ cumvli::sumrowacc<T>(d_delta_, d_strideDelta_, d_dWeights_, numMaps_, sizeMapsOut_.area()); } else { // Compute: dBiases_ = sums of row vectors in delta_ cumvli::sumrow<T>(d_delta_, d_strideDelta_, d_dBiases_, numMaps_, sizeMapsOut_.area()); // Compute: delta_ = delta_ .* inSub_ cumvli::pmulmm<T>(d_delta_, d_strideDelta_, d_inSub_, d_strideInSub_, numMaps_, sizeMapsOut_.area()); // Compute: dWeights_ = sums of row vectors in delta_ cumvli::sumrow<T>(d_delta_, d_strideDelta_, d_dWeights_, numMaps_, sizeMapsOut_.area()); } } #ifdef CNNPLUS_MATLAB_FOUND template<typename T, class SquFnc> void CuSubLayer<T, SquFnc>::load(mxArray const * arr) { if (!arr || !mxIsStruct(arr) || !this->checkType(arr, "s")) throw MatlabError("Failed to read subsampling layer."); // Read Matlab array with weight values mxArray const * arrW = mxGetField(arr, 0, "weights"); { mwSize const dims[] = { numMaps_, 1 }; if (!this->checkArr(arrW, countof(dims), dims)) throw MatlabError("Failed to read 'weights'."); } // Read Matlab array with bias values mxArray const * arrB = mxGetField(arr, 0, "biases"); { mwSize const dims[] = { numMaps_, 1 }; if (!this->checkArr(arrB, countof(dims), dims)) throw MatlabError("Failed to read 'biases'."); } double const * pArrW = static_cast<double const *>(mxGetData(arrW)); double const * pArrB = static_cast<double const *>(mxGetData(arrB)); // Read weight and bias values from Matlab array for (size_t i = 0; i < numMaps_; ++i) { h_weights_[i] = static_cast<T>(pArrW[i]); h_biases_[i] = static_cast<T>(pArrB[i]); } cumvli::copyv_h2d<T>(h_weights_, d_weights_, numMaps_); cumvli::copyv_h2d<T>(h_biases_, d_biases_, numMaps_); } template<typename T, class SquFnc> mxArray * CuSubLayer<T, SquFnc>::save() const { char const * fieldnames[] = { "type", "weights", "biases" }; mxArray * arr = mxCreateStructMatrix(1, 1, countof(fieldnames), fieldnames); if (!arr) throw MatlabError("Failed to create array."); mxArray * arrW = NULL, * arrB = NULL; try { // Create Matlab arrays arrW = mxCreateDoubleMatrix(numMaps_, 1, mxREAL); if (!arrW) throw MatlabError("Failed to create array."); arrB = mxCreateDoubleMatrix(numMaps_, 1, mxREAL); if (!arrB) throw MatlabError("Failed to create array."); } catch (...) { if (arrW) mxDestroyArray(arrW); if (arrB) mxDestroyArray(arrB); throw; } double * pArrW = static_cast<double *>(mxGetData(arrW)); double * pArrB = static_cast<double *>(mxGetData(arrB)); // Copy weight and bias values to Matlab array cumvli::copyv_d2h<T>(d_weights_, h_weights_, numMaps_); cumvli::copyv_d2h<T>(d_biases_, h_biases_, numMaps_); for (size_t i = 0; i < numMaps_; ++i) { pArrW[i] = h_weights_[i]; pArrB[i] = h_biases_[i]; } // Write Matlab arrays to Matlab structure mxSetField(arr, 0, "type", mxCreateString("s")); mxSetField(arr, 0, "weights", arrW); mxSetField(arr, 0, "biases", arrB); return arr; } #endif // CNNPLUS_MATLAB_FOUND template<typename T, class SquFnc> void CuSubLayer<T, SquFnc>::trainableParam(typename Layer<T>::TrainableParam & param) { // Weights param.weights.val = d_weights_; param.weights.dVal = d_dWeights_; param.weights.mask = NULL; param.weights.strideVal = numMaps_; param.weights.strideDVal = numMaps_; param.weights.strideMask = 0; param.weights.rows = 1; param.weights.cols = numMaps_; // Biases param.biases.val = d_biases_; param.biases.dVal = d_dBiases_; param.biases.len = numMaps_; } template<typename T, class SquFnc> std::string CuSubLayer<T, SquFnc>::toString() const { std::stringstream ss; ss << "CuSubLayer" << "<" << numMaps_ << "x" << sizeMapsIn_.toString() << "," << numMaps_ << "x" << sizeMapsOut_.toString() << "," << sizeSample_.toString() << ";" << squasher_.toString() << ">"; return ss.str(); } template<typename T, class SquFnc> size_t CuSubLayer<T, SquFnc>::numTrainableParam() const { return (numMaps_ * 2); } template<typename T, class SquFnc> size_t CuSubLayer<T, SquFnc>::numConnections() const { return (numMaps_ * sizeMapsIn_.area() + numMaps_ * sizeMapsOut_.area()); } /////////////////////////////////////////////////////////////////////////////// /*! \addtogroup eti_grp Explicit Template Instantiation @{ */ template class CuSubLayer< float, CuTanh<float> >; template class CuSubLayer< float, CuStdSigmoid<float> >; template class CuSubLayer< float, CuLogSigmoid<float> >; template class CuSubLayer< float, CuIdentity<float> >; template class CuSubLayer< double, CuTanh<double> >; template class CuSubLayer< double, CuStdSigmoid<double> >; template class CuSubLayer< double, CuLogSigmoid<double> >; template class CuSubLayer< double, CuIdentity<double> >; /*! @} */ CNNPLUS_NS_END
neighborlist.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * neighborlist.cu * * Created on: Sep 4, 2016 * Author: uwe */ #include "neighborlist.h" #include "nativeTypesWrapper.h" #include "DeviceNLGrid.h" #include "DeviceProtein.h" #include "DeviceParamTable.h" #include "SimParam.h" #include "forcefield.h" #include "macros.h" namespace as { template<typename REAL> __global__ void d_NLPotForce( const d_NLGrid<REAL> grid, const d_Protein<REAL> rec, const d_Protein<REAL> lig, const d_ParamTable<REAL> table, const SimParam<REAL> simParam, const unsigned numDOFs, const REAL* LigPosX, const REAL* LigPosY, const REAL* LigPosZ, REAL* outLig_fx, REAL* outLig_fy, REAL* outLig_fz, REAL* outLigand_E) { using real3_t = typename TypeWrapper<REAL>::real3_t; const unsigned i = blockDim.x * blockIdx.x + threadIdx.x; const unsigned LigNumEl = lig.numAtoms; if (i < LigNumEl*numDOFs) { const unsigned LigAttrIdx = i % LigNumEl; const unsigned atomTypeLig = lig.type[LigAttrIdx]; if (atomTypeLig != 0) { const REAL posLigX = LigPosX[i]; const REAL posLigY = LigPosY[i]; const REAL posLigZ = LigPosZ[i]; /* test if particle is out of bounds and perform data fetch and neigbourlist calculations */ if (!( (posLigX < grid.minDim.x || posLigX > grid.maxDim.x) || (posLigY < grid.minDim.y || posLigY > grid.maxDim.y) || (posLigZ < grid.minDim.z || posLigZ > grid.maxDim.z) )) { const uint2 nDesc = tex3D<uint2>(grid.tex, (posLigX - grid.minDim.x) * grid.dVox_inv + 0.5, (posLigY - grid.minDim.y) * grid.dVox_inv + 0.5, (posLigZ - grid.minDim.z) * grid.dVox_inv + 0.5); /* numEl = x; idx = y */ real3_t fAcc = {0,0,0}; REAL eAcc = 0; for (unsigned j = 0; j < nDesc.x; ++j) { const unsigned nIdx = grid.neighborList[nDesc.y + j]; REAL dx = posLigX - rec.xPos[nIdx]; REAL dy = posLigY - rec.yPos[nIdx]; REAL dz = posLigZ - rec.zPos[nIdx]; const REAL dr2 = dx * dx + dy * dy + dz * dz; const REAL dPlateau2 = grid.dPlateau2; if ((dr2) > dPlateau2) { continue; } constexpr REAL one = static_cast<REAL>(1.0); const REAL dr2_inv = one/dr2; // inverse of dr2 // Scale distances dx *= dr2_inv; dy *= dr2_inv; dz *= dr2_inv; real3_t fVdW; REAL eVdW; const size_t atomTypeRec = rec.type[nIdx]; // calculate energy and potential/energy of LJ/VdW potential auto const params = table.getParams(atomTypeRec-1, atomTypeLig-1); LJPotForce(dr2, dr2_inv, dx, dy, dz, params, one, table.shape, fVdW.x, fVdW.y, fVdW.z, eVdW); fAcc.x += fVdW.x; fAcc.y += fVdW.y; fAcc.z += fVdW.z; eAcc += eVdW; const REAL chargeLig = lig.charge[LigAttrIdx]; const REAL chargeRec = rec.charge[nIdx]; const REAL chargeLigRec = chargeLig * chargeRec * simParam.ffelec; const bool calc_elec = abs(chargeLigRec) > 0.001; // evaluate electric potential REAL dPlateau2_inv = 1/grid.dPlateau2; const REAL ratio = sqrt(dr2*dPlateau2_inv); REAL rdx = ratio*dx; REAL rdy = ratio*dy; REAL rdz = ratio*dz; LJPotForce(dPlateau2, dPlateau2_inv, rdx, rdy, rdz, params, one, table.shape, fVdW.x, fVdW.y, fVdW.z, eVdW); fAcc.x -= fVdW.x; fAcc.y -= fVdW.y; fAcc.z -= fVdW.z; eAcc -= eVdW; if (calc_elec) { REAL eEl; real3_t fEl; // calculate energy and potential/energy of charge potential if (false) { printf("%u %f %f %f %u\n" , i, posLigX, posLigY, posLigZ, atomTypeLig); } ChargePotForce(dr2_inv, dx, dy, dz, chargeLigRec, one, simParam.dielec, fEl.x, fEl.y, fEl.z, eEl); fAcc.x += fEl.x; fAcc.y += fEl.y; fAcc.z += fEl.z; eAcc += eEl; ChargePotForce(dPlateau2_inv, rdx, rdy, rdz, chargeLigRec, one, simParam.dielec, fEl.x, fEl.y, fEl.z, eEl); fAcc.x -= fEl.x; fAcc.y -= fEl.y; fAcc.z -= fEl.z; eAcc -= eEl; } } /* store results back to global memory */ if (nDesc.x > 0) { outLig_fx[i] += fAcc.x; outLig_fy[i] += fAcc.y; outLig_fz[i] += fAcc.z; outLigand_E[i] += eAcc; } } } // if (atomtype != 0) } } template<typename REAL> void d_NLPotForce( unsigned blockSize, unsigned gridSize, const hipStream_t &stream, const d_NLGrid<REAL>& grid, const d_Protein<REAL>& rec, const d_Protein<REAL>& lig, const d_ParamTable<REAL>& table, const SimParam<REAL>& simParam, const unsigned& numDOFs, const REAL* LigPosX, const REAL* LigPosY, const REAL* LigPosZ, REAL* outLig_fx, REAL* outLig_fy, REAL* outLig_fz, REAL* outLigand_E) { cudaVerifyKernel(( hipLaunchKernelGGL(( d_NLPotForce), dim3(gridSize), dim3(blockSize), 0, stream, grid, rec, lig, table, simParam, numDOFs, LigPosX, LigPosY, LigPosZ, outLig_fx, outLig_fy, outLig_fz, outLigand_E ) )); } template void d_NLPotForce<float>( unsigned blockSize, unsigned gridSize, const hipStream_t &stream, const d_NLGrid<float>& grid, const d_Protein<float>& rec, const d_Protein<float>& lig, const d_ParamTable<float>& table, const SimParam<float>& simParam, const unsigned& numDOFs, const float* LigPosX, const float* LigPosY, const float* LigPosZ, float* outLig_fx, float* outLig_fy, float* outLig_fz, float* outLigand_E); template void d_NLPotForce<double>( unsigned blockSize, unsigned gridSize, const hipStream_t &stream, const d_NLGrid<double>& grid, const d_Protein<double>& rec, const d_Protein<double>& lig, const d_ParamTable<double>& table, const SimParam<double>& simParam, const unsigned& numDOFs, const double* LigPosX, const double* LigPosY, const double* LigPosZ, double* outLig_fx, double* outLig_fy, double* outLig_fz, double* outLigand_E); } // namespace as
neighborlist.cu
/* * neighborlist.cu * * Created on: Sep 4, 2016 * Author: uwe */ #include "neighborlist.h" #include "nativeTypesWrapper.h" #include "DeviceNLGrid.h" #include "DeviceProtein.h" #include "DeviceParamTable.h" #include "SimParam.h" #include "forcefield.h" #include "macros.h" namespace as { template<typename REAL> __global__ void d_NLPotForce( const d_NLGrid<REAL> grid, const d_Protein<REAL> rec, const d_Protein<REAL> lig, const d_ParamTable<REAL> table, const SimParam<REAL> simParam, const unsigned numDOFs, const REAL* LigPosX, const REAL* LigPosY, const REAL* LigPosZ, REAL* outLig_fx, REAL* outLig_fy, REAL* outLig_fz, REAL* outLigand_E) { using real3_t = typename TypeWrapper<REAL>::real3_t; const unsigned i = blockDim.x * blockIdx.x + threadIdx.x; const unsigned LigNumEl = lig.numAtoms; if (i < LigNumEl*numDOFs) { const unsigned LigAttrIdx = i % LigNumEl; const unsigned atomTypeLig = lig.type[LigAttrIdx]; if (atomTypeLig != 0) { const REAL posLigX = LigPosX[i]; const REAL posLigY = LigPosY[i]; const REAL posLigZ = LigPosZ[i]; /* test if particle is out of bounds and perform data fetch and neigbourlist calculations */ if (!( (posLigX < grid.minDim.x || posLigX > grid.maxDim.x) || (posLigY < grid.minDim.y || posLigY > grid.maxDim.y) || (posLigZ < grid.minDim.z || posLigZ > grid.maxDim.z) )) { const uint2 nDesc = tex3D<uint2>(grid.tex, (posLigX - grid.minDim.x) * grid.dVox_inv + 0.5, (posLigY - grid.minDim.y) * grid.dVox_inv + 0.5, (posLigZ - grid.minDim.z) * grid.dVox_inv + 0.5); /* numEl = x; idx = y */ real3_t fAcc = {0,0,0}; REAL eAcc = 0; for (unsigned j = 0; j < nDesc.x; ++j) { const unsigned nIdx = grid.neighborList[nDesc.y + j]; REAL dx = posLigX - rec.xPos[nIdx]; REAL dy = posLigY - rec.yPos[nIdx]; REAL dz = posLigZ - rec.zPos[nIdx]; const REAL dr2 = dx * dx + dy * dy + dz * dz; const REAL dPlateau2 = grid.dPlateau2; if ((dr2) > dPlateau2) { continue; } constexpr REAL one = static_cast<REAL>(1.0); const REAL dr2_inv = one/dr2; // inverse of dr2 // Scale distances dx *= dr2_inv; dy *= dr2_inv; dz *= dr2_inv; real3_t fVdW; REAL eVdW; const size_t atomTypeRec = rec.type[nIdx]; // calculate energy and potential/energy of LJ/VdW potential auto const params = table.getParams(atomTypeRec-1, atomTypeLig-1); LJPotForce(dr2, dr2_inv, dx, dy, dz, params, one, table.shape, fVdW.x, fVdW.y, fVdW.z, eVdW); fAcc.x += fVdW.x; fAcc.y += fVdW.y; fAcc.z += fVdW.z; eAcc += eVdW; const REAL chargeLig = lig.charge[LigAttrIdx]; const REAL chargeRec = rec.charge[nIdx]; const REAL chargeLigRec = chargeLig * chargeRec * simParam.ffelec; const bool calc_elec = abs(chargeLigRec) > 0.001; // evaluate electric potential REAL dPlateau2_inv = 1/grid.dPlateau2; const REAL ratio = sqrt(dr2*dPlateau2_inv); REAL rdx = ratio*dx; REAL rdy = ratio*dy; REAL rdz = ratio*dz; LJPotForce(dPlateau2, dPlateau2_inv, rdx, rdy, rdz, params, one, table.shape, fVdW.x, fVdW.y, fVdW.z, eVdW); fAcc.x -= fVdW.x; fAcc.y -= fVdW.y; fAcc.z -= fVdW.z; eAcc -= eVdW; if (calc_elec) { REAL eEl; real3_t fEl; // calculate energy and potential/energy of charge potential if (false) { printf("%u %f %f %f %u\n" , i, posLigX, posLigY, posLigZ, atomTypeLig); } ChargePotForce(dr2_inv, dx, dy, dz, chargeLigRec, one, simParam.dielec, fEl.x, fEl.y, fEl.z, eEl); fAcc.x += fEl.x; fAcc.y += fEl.y; fAcc.z += fEl.z; eAcc += eEl; ChargePotForce(dPlateau2_inv, rdx, rdy, rdz, chargeLigRec, one, simParam.dielec, fEl.x, fEl.y, fEl.z, eEl); fAcc.x -= fEl.x; fAcc.y -= fEl.y; fAcc.z -= fEl.z; eAcc -= eEl; } } /* store results back to global memory */ if (nDesc.x > 0) { outLig_fx[i] += fAcc.x; outLig_fy[i] += fAcc.y; outLig_fz[i] += fAcc.z; outLigand_E[i] += eAcc; } } } // if (atomtype != 0) } } template<typename REAL> void d_NLPotForce( unsigned blockSize, unsigned gridSize, const cudaStream_t &stream, const d_NLGrid<REAL>& grid, const d_Protein<REAL>& rec, const d_Protein<REAL>& lig, const d_ParamTable<REAL>& table, const SimParam<REAL>& simParam, const unsigned& numDOFs, const REAL* LigPosX, const REAL* LigPosY, const REAL* LigPosZ, REAL* outLig_fx, REAL* outLig_fy, REAL* outLig_fz, REAL* outLigand_E) { cudaVerifyKernel(( d_NLPotForce<<<gridSize, blockSize, 0, stream>>> ( grid, rec, lig, table, simParam, numDOFs, LigPosX, LigPosY, LigPosZ, outLig_fx, outLig_fy, outLig_fz, outLigand_E ) )); } template void d_NLPotForce<float>( unsigned blockSize, unsigned gridSize, const cudaStream_t &stream, const d_NLGrid<float>& grid, const d_Protein<float>& rec, const d_Protein<float>& lig, const d_ParamTable<float>& table, const SimParam<float>& simParam, const unsigned& numDOFs, const float* LigPosX, const float* LigPosY, const float* LigPosZ, float* outLig_fx, float* outLig_fy, float* outLig_fz, float* outLigand_E); template void d_NLPotForce<double>( unsigned blockSize, unsigned gridSize, const cudaStream_t &stream, const d_NLGrid<double>& grid, const d_Protein<double>& rec, const d_Protein<double>& lig, const d_ParamTable<double>& table, const SimParam<double>& simParam, const unsigned& numDOFs, const double* LigPosX, const double* LigPosY, const double* LigPosZ, double* outLig_fx, double* outLig_fy, double* outLig_fz, double* outLigand_E); } // namespace as
a143dae18bfa9d82e18934cd2d81e8d57f06b131.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void combine_kernel(int nPixels, int cuePitchInFloats, float* devBg, float* devCga, float* devCgb, float* devTg, float* devMpb, float* devCombinedg) { int index = blockDim.x * blockIdx.x + threadIdx.x; int orientation = threadIdx.y; int orientedIndex = orientation * cuePitchInFloats + index; if (index < nPixels) { float accumulant = 0.0; float accumulant2=0.0; float* pointer = &devBg[orientedIndex]; accumulant += *pointer * coefficients[0]; accumulant2 += *pointer * weights[0]; pointer += 8 * cuePitchInFloats; accumulant += *pointer * coefficients[1]; accumulant2 += *pointer * weights[1]; pointer += 8 * cuePitchInFloats; accumulant += *pointer * coefficients[2]; accumulant2 += *pointer * weights[2]; pointer = &devCga[orientedIndex]; accumulant += *pointer * coefficients[3]; accumulant2 += *pointer * weights[3]; pointer += 8 * cuePitchInFloats; accumulant += *pointer * coefficients[4]; accumulant2 += *pointer * weights[4]; pointer += 8 * cuePitchInFloats; accumulant += *pointer * coefficients[5]; accumulant2 += *pointer * weights[5]; pointer = &devCgb[orientedIndex]; accumulant += *pointer * coefficients[6]; accumulant2 += *pointer * weights[6]; pointer += 8 * cuePitchInFloats; accumulant += *pointer * coefficients[7]; accumulant2 += *pointer * weights[7]; pointer += 8 * cuePitchInFloats; accumulant += *pointer * coefficients[8]; accumulant2 += *pointer * weights[8]; pointer = &devTg[orientedIndex]; accumulant += *pointer * coefficients[9]; accumulant2 += *pointer * weights[9]; pointer += 8 * cuePitchInFloats; accumulant += *pointer * coefficients[10]; accumulant2 += *pointer * weights[10]; pointer += 8 * cuePitchInFloats; accumulant += *pointer * coefficients[11]; accumulant2 += *pointer * weights[11]; devMpb[orientedIndex] = accumulant; devCombinedg[orientedIndex] = accumulant2; } }
a143dae18bfa9d82e18934cd2d81e8d57f06b131.cu
#include "includes.h" __global__ void combine_kernel(int nPixels, int cuePitchInFloats, float* devBg, float* devCga, float* devCgb, float* devTg, float* devMpb, float* devCombinedg) { int index = blockDim.x * blockIdx.x + threadIdx.x; int orientation = threadIdx.y; int orientedIndex = orientation * cuePitchInFloats + index; if (index < nPixels) { float accumulant = 0.0; float accumulant2=0.0; float* pointer = &devBg[orientedIndex]; accumulant += *pointer * coefficients[0]; accumulant2 += *pointer * weights[0]; pointer += 8 * cuePitchInFloats; accumulant += *pointer * coefficients[1]; accumulant2 += *pointer * weights[1]; pointer += 8 * cuePitchInFloats; accumulant += *pointer * coefficients[2]; accumulant2 += *pointer * weights[2]; pointer = &devCga[orientedIndex]; accumulant += *pointer * coefficients[3]; accumulant2 += *pointer * weights[3]; pointer += 8 * cuePitchInFloats; accumulant += *pointer * coefficients[4]; accumulant2 += *pointer * weights[4]; pointer += 8 * cuePitchInFloats; accumulant += *pointer * coefficients[5]; accumulant2 += *pointer * weights[5]; pointer = &devCgb[orientedIndex]; accumulant += *pointer * coefficients[6]; accumulant2 += *pointer * weights[6]; pointer += 8 * cuePitchInFloats; accumulant += *pointer * coefficients[7]; accumulant2 += *pointer * weights[7]; pointer += 8 * cuePitchInFloats; accumulant += *pointer * coefficients[8]; accumulant2 += *pointer * weights[8]; pointer = &devTg[orientedIndex]; accumulant += *pointer * coefficients[9]; accumulant2 += *pointer * weights[9]; pointer += 8 * cuePitchInFloats; accumulant += *pointer * coefficients[10]; accumulant2 += *pointer * weights[10]; pointer += 8 * cuePitchInFloats; accumulant += *pointer * coefficients[11]; accumulant2 += *pointer * weights[11]; devMpb[orientedIndex] = accumulant; devCombinedg[orientedIndex] = accumulant2; } }
ab6409952d3973da79c314097aa09aaca16116c8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> #include <time.h> __global__ void matrixMult(int *a, int *b, int *c, int N) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; int index, i; if (row < N && col < N) { index = col + (row * N); for (i = 0; i < N; i++) { sum += a[i + (row * N)] * b[col + (i * N)]; } c[index] = sum; } } int main(void) { int N, T, B, repeat; repeat = 1; while (repeat == 1) { printf("Enter size of matrices: "); scanf("%d", &N); while (N <= 0) { printf( "Size of matrices must be greater than 0. Enter a valid size of matrices: "); scanf("%d", &N); } printf("Enter number of threads in a block: "); scanf("%d", &T); while (T <= 0) { printf( "Number of threads must be greater than 0. Enter number of threads in a block: "); scanf("%d", &T); } while (T > 1024) { printf( "Number of threads must not exceed the device bandwidth. Enter number of threads in a block: "); scanf("%d", &T); } printf("Enter number of blocks in a grid: "); scanf("%d", &B); while (B <= 0) { printf( "Number of blocks must be greater than 0. Enter number of blocks in a grid: "); scanf("%d", &B); } while (B > 65535) { printf( "Number of blocks must not exceed the device bandwidth. Enter number of blocks in a grid: "); scanf("%d", &B); } int *a, *b, *c, *deviceC; int *dev_a, *dev_b, *dev_c; int i, j, k; int ssd = 0; hipEvent_t start, stop; float elapsedTime; hipEventCreate(&start); hipEventCreate(&stop); hipMalloc((void**) &dev_a, (N * N) * sizeof(int)); hipMalloc((void**) &dev_b, (N * N) * sizeof(int)); hipMalloc((void**) &dev_c, (N * N) * sizeof(int)); a = (int *) malloc((N * N) * sizeof(int)); b = (int *) malloc((N * N) * sizeof(int)); c = (int *) malloc((N * N) * sizeof(int)); deviceC = (int *) malloc((N * N) * sizeof(int)); srand(time(NULL)); //loop will generate the matrix with random integers from 0 to 9 for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { a[j + (i * N)] = (int) rand() % 10; b[j + (i * N)] = (int) rand() % 10; } } /*******************begin host code*****************************/ clock_t begin, end; begin = clock(); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { c[j + i * N] = 0; for (k = 0; k < N; k++) { c[j + i * N] = c[j + i * N] + a[k + i * N] * b[j + k * N]; } } } end = clock(); printf("It took %f seconds for the host to do the matrix operation.\n", (float) (end - begin) / (CLOCKS_PER_SEC)); /*******************end host code*****************************/ /*******************begin device code*****************************/ hipMemcpy(dev_a, a, (N * N) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, (N * N) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_c, deviceC, (N * N) * sizeof(int), hipMemcpyHostToDevice); dim3 grid(B, B); dim3 block(T, T); hipEventRecord(start, 0); hipLaunchKernelGGL(( matrixMult), dim3(grid), dim3(block), 0, 0, dev_a, dev_b, dev_c, N); hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipMemcpy(deviceC, dev_c, (N * N) * sizeof(int), hipMemcpyDeviceToHost); hipEventElapsedTime(&elapsedTime, start, stop); printf( "It took %f seconds for the device to do the matrix operation.\n", (elapsedTime / 1000)); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { ssd += deviceC[j + (i * N)] - c[j + (i * N)]; } } printf("The sum of square difference is %d.\n", ssd); printf("The speedup factor is %f.\n", ((float) (end - begin) / (CLOCKS_PER_SEC)) / (elapsedTime / 1000)); hipEventDestroy(start); hipEventDestroy(stop); free(a); free(b); free(c); free(deviceC); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); /*******************end device code*****************************/ printf("Enter 1 to continue: "); scanf("%d", &repeat); } return 0; }
ab6409952d3973da79c314097aa09aaca16116c8.cu
#include <stdio.h> #include <cuda.h> #include <stdlib.h> #include <time.h> __global__ void matrixMult(int *a, int *b, int *c, int N) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; int index, i; if (row < N && col < N) { index = col + (row * N); for (i = 0; i < N; i++) { sum += a[i + (row * N)] * b[col + (i * N)]; } c[index] = sum; } } int main(void) { int N, T, B, repeat; repeat = 1; while (repeat == 1) { printf("Enter size of matrices: "); scanf("%d", &N); while (N <= 0) { printf( "Size of matrices must be greater than 0. Enter a valid size of matrices: "); scanf("%d", &N); } printf("Enter number of threads in a block: "); scanf("%d", &T); while (T <= 0) { printf( "Number of threads must be greater than 0. Enter number of threads in a block: "); scanf("%d", &T); } while (T > 1024) { printf( "Number of threads must not exceed the device bandwidth. Enter number of threads in a block: "); scanf("%d", &T); } printf("Enter number of blocks in a grid: "); scanf("%d", &B); while (B <= 0) { printf( "Number of blocks must be greater than 0. Enter number of blocks in a grid: "); scanf("%d", &B); } while (B > 65535) { printf( "Number of blocks must not exceed the device bandwidth. Enter number of blocks in a grid: "); scanf("%d", &B); } int *a, *b, *c, *deviceC; int *dev_a, *dev_b, *dev_c; int i, j, k; int ssd = 0; cudaEvent_t start, stop; float elapsedTime; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMalloc((void**) &dev_a, (N * N) * sizeof(int)); cudaMalloc((void**) &dev_b, (N * N) * sizeof(int)); cudaMalloc((void**) &dev_c, (N * N) * sizeof(int)); a = (int *) malloc((N * N) * sizeof(int)); b = (int *) malloc((N * N) * sizeof(int)); c = (int *) malloc((N * N) * sizeof(int)); deviceC = (int *) malloc((N * N) * sizeof(int)); srand(time(NULL)); //loop will generate the matrix with random integers from 0 to 9 for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { a[j + (i * N)] = (int) rand() % 10; b[j + (i * N)] = (int) rand() % 10; } } /*******************begin host code*****************************/ clock_t begin, end; begin = clock(); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { c[j + i * N] = 0; for (k = 0; k < N; k++) { c[j + i * N] = c[j + i * N] + a[k + i * N] * b[j + k * N]; } } } end = clock(); printf("It took %f seconds for the host to do the matrix operation.\n", (float) (end - begin) / (CLOCKS_PER_SEC)); /*******************end host code*****************************/ /*******************begin device code*****************************/ cudaMemcpy(dev_a, a, (N * N) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, (N * N) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_c, deviceC, (N * N) * sizeof(int), cudaMemcpyHostToDevice); dim3 grid(B, B); dim3 block(T, T); cudaEventRecord(start, 0); matrixMult<<<grid, block>>>(dev_a, dev_b, dev_c, N); cudaThreadSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaMemcpy(deviceC, dev_c, (N * N) * sizeof(int), cudaMemcpyDeviceToHost); cudaEventElapsedTime(&elapsedTime, start, stop); printf( "It took %f seconds for the device to do the matrix operation.\n", (elapsedTime / 1000)); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { ssd += deviceC[j + (i * N)] - c[j + (i * N)]; } } printf("The sum of square difference is %d.\n", ssd); printf("The speedup factor is %f.\n", ((float) (end - begin) / (CLOCKS_PER_SEC)) / (elapsedTime / 1000)); cudaEventDestroy(start); cudaEventDestroy(stop); free(a); free(b); free(c); free(deviceC); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); /*******************end device code*****************************/ printf("Enter 1 to continue: "); scanf("%d", &repeat); } return 0; }
62d997c88fe61402f8fc2bb4fbff596e6eb7aece.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdlib> #include <iostream> #include <fstream> #include <string> #include <string.h> #include <vector> #include <fstream> #include <stdio.h> #include <stdlib.h> #include <random> #define MAX_GRID_SIZE 4294967295 #define MAX_BLOCK_SIZE_X 32 // which points #define MAX_BLOCK_SIZE_Y 8 // which cluster/dimension (first one for clustering, second one for computing centroids) #define MAX_BLOCK_SIZE_Z 4 // which dimension #define MAX_THREADS_PER_BLOCK 1024 #define MAX_ITER 32 #define FILE_NAME "points.txt" using namespace std; __global__ void initialize(int *clusters, int n, int k); __global__ void compute_centroids(float *points, float *centroids, int *clusters, int *clusters_size, int n, int d, int k, int reset); __global__ void assign_cluster(float *points, float *centroids, int *clusters, int *clusters_size, float *distances, int n, int d, int k, int *converged, int reset); int main(){ /* Read FILE_NAME with the following format: First line specifies n: the number of points, d: the dimension of the points, and k: the number of clusters, in the same order. The following n lines will contain the value of the nodes in each d dimension in each line. */ int n = 1 << 20, d = 4, k = 8; // parameters float *points; // value of the points int *clusters; // each point's cluster a.k.a the final result int *converged; // whether the program is finished yet or not /* generate random numbers */ points = (float *) malloc(n * d * sizeof(float)); for (int i = 0; i < n; i++){ for (int j = 0; j < d; j++){ float f = (float) rand() / RAND_MAX; points[i * d + j] = f * 100.0; } } clusters = (int *) malloc(n * sizeof(int)); converged = (int *) malloc(sizeof(int)); /* Start overall timer */ hipEvent_t start_overall, stop_overall; hipEventCreate(&start_overall); hipEventCreate(&stop_overall); hipEventRecord(start_overall, 0); /* Allocate memory on device */ float *points_dev, *centroids_dev, *distances_dev; int *clusters_dev, *clusters_size_dev; int *converged_dev; hipMalloc((void **) &points_dev, n * d * sizeof(float)); hipMalloc((void **) &centroids_dev, k * d * sizeof(float)); hipMalloc((void **) &clusters_dev, n * sizeof(int)); hipMalloc((void **) &clusters_size_dev, k * sizeof(int)); hipMalloc((void **) &converged_dev, sizeof(int)); hipMalloc((void **) &distances_dev, n * k * sizeof(float)); /* Copy the points from host to device */ hipMemcpy(points_dev, points, n * d * sizeof(float), hipMemcpyHostToDevice); /* Compute the block size and grid size for initialization */ int block_size = (n > MAX_THREADS_PER_BLOCK) ? MAX_THREADS_PER_BLOCK : n; int temp_grid_size = (n + block_size - 1) / block_size; int grid_size = (temp_grid_size > MAX_GRID_SIZE) ? MAX_GRID_SIZE : temp_grid_size; /* Initialize the clusters */ hipLaunchKernelGGL(( initialize), dim3(grid_size), dim3(block_size), 0, 0, clusters_dev, n, k); /* Compute the block size for launching the 'compute_centroids' kernel */ int cc_block_size_x = (n > MAX_BLOCK_SIZE_X) ? MAX_BLOCK_SIZE_X : n; int cc_block_size_y = (d > MAX_BLOCK_SIZE_Y * MAX_BLOCK_SIZE_Z) ? (MAX_BLOCK_SIZE_Y * MAX_BLOCK_SIZE_Z) : d; int cc_temp_grid_size = (n + cc_block_size_x - 1) / cc_block_size_x; int cc_grid_size = (cc_temp_grid_size > MAX_GRID_SIZE) ? MAX_GRID_SIZE : cc_temp_grid_size; /* Compute the block size for launching the 'assign_cluster' kernel */ int ac_block_size_x = (n > MAX_BLOCK_SIZE_X) ? MAX_BLOCK_SIZE_X : n; int ac_block_size_y = (k > MAX_BLOCK_SIZE_Y) ? MAX_BLOCK_SIZE_Y : k; int ac_block_size_z = (d > MAX_BLOCK_SIZE_Z) ? MAX_BLOCK_SIZE_Z : d; int ac_temp_grid_size = (n + ac_block_size_x - 1) / ac_block_size_x; int ac_grid_size = (ac_temp_grid_size > MAX_GRID_SIZE) ? MAX_GRID_SIZE : ac_temp_grid_size; /* Final computation of sizes */ dim3 cc_block_dim(cc_block_size_x, cc_block_size_y); dim3 ac_block_dim(ac_block_size_x, ac_block_size_y, ac_block_size_z); /* because the first time, there is no need to reset centroids and clusters_size */ int reset = 0; hipDeviceSynchronize(); /* Start execution timer */ hipEvent_t start_exec, stop_exec; hipEventCreate(&start_exec); hipEventCreate(&stop_exec); hipEventRecord(start_exec, 0); int iter = 0; while (1){ iter++; if (iter > MAX_ITER) break; //initialize the convergence rate hipMemset(converged_dev, 0, sizeof(int)); // start the first kernel hipLaunchKernelGGL(( compute_centroids), dim3(cc_grid_size), dim3(cc_block_dim), 0, 0, points_dev, centroids_dev, clusters_dev, clusters_size_dev, n, d, k, reset); hipDeviceSynchronize(); // start the second kernel hipMemset(distances_dev, 0, n * k * sizeof(float)); hipLaunchKernelGGL(( assign_cluster), dim3(ac_grid_size), dim3(ac_block_dim), 0, 0, points_dev, centroids_dev, clusters_dev, clusters_size_dev, distances_dev, n, d, k, converged_dev, reset); hipDeviceSynchronize(); // check converged flag hipMemcpy(converged, converged_dev, sizeof(int), hipMemcpyDeviceToHost); if (*converged == 0) break; reset = 1; } hipEventRecord(stop_exec, 0); hipEventSynchronize(stop_exec); /* Copy the results back from device to host */ hipMemcpy(clusters, clusters_dev, n * sizeof(int), hipMemcpyDeviceToHost); hipEventRecord(stop_overall, 0); hipEventSynchronize(stop_overall); /* do whatever you want with the results :)) */ float overall_time, execution_time; hipEventElapsedTime(&overall_time, start_overall, stop_overall); hipEventElapsedTime(&execution_time, start_exec, stop_exec); cout << "n: " << n << "\tk: " << k << "\td: " << d << endl; cout << "converged after " << iter << " iterations" << endl; cout << "time for executing kmeans: " << execution_time << "ms" << endl; cout << "time for entire run (allocation, initialization, etc): " << overall_time << "ms" << endl; /* Free the memory */ hipFree(points_dev); hipFree(centroids_dev); hipFree(clusters_dev); hipFree(clusters_size_dev); hipFree(converged_dev); free(points); free(clusters); free(converged); hipEventDestroy(start_overall); hipEventDestroy(stop_overall); hipEventDestroy(start_exec); hipEventDestroy(stop_exec); } __global__ void initialize(int *clusters, int n, int k){ int start = threadIdx.x, stride = blockDim.x * gridDim.x; for (int i = start; i < n; i += stride){ clusters[i] = i % k; } } __global__ void compute_centroids(float *points, float *centroids, int *clusters, int *clusters_size, int n, int d, int k, int reset){ int t_point = threadIdx.x + blockDim.x * blockIdx.x, p_stride = blockDim.x * gridDim.x; int t_dim = threadIdx.y, d_stride = blockDim.y; /* reset centroids and their sizes */ if (reset){ if (threadIdx.x < k){ clusters_size[threadIdx.x] = 0; for (int i = t_dim; i < d; i += d_stride){ centroids[threadIdx.x * d + i] = 0.0; } } __syncthreads(); } /* Each thread handles one (or a few) dimensions of one (or a few) points */ for (int i = t_point; i < n; i += p_stride){ // one (or a few) points int t_cluster = clusters[i]; // this point's cluster atomicAdd(clusters_size + t_cluster, 1); // increase the size of this cluster for (int j = t_dim; j < d; j += d_stride){ // one (or a few) dimensions atomicAdd(centroids + t_cluster * d + j, points[i * d + j]); // this point's value added to this cluster's value } } } /* This function assigns clusters to points based on centroids */ __global__ void assign_cluster(float *points, float *centroids, int *clusters, int *clusters_size, float* distances, int n, int d, int k, int *converged, int reset){ int t_point = threadIdx.x + blockIdx.x * blockDim.x, t_cluster = threadIdx.y, t_dim = threadIdx.z; int p_stride = blockDim.x * gridDim.x, c_stride = blockDim.y, d_stride = blockDim.z; int t_converged = 0; __shared__ int block_converged; /* Each thread handles one (or a few) dimensions of one (or a few) points next to one (or a few) clusters */ for (int i = t_point; i < n; i += p_stride){ // one (or a few) points for (int j = t_cluster; j < k; j += c_stride){ // one (or a few) clusters for (int l = t_dim; l < d; l += d_stride){ // one (or a few) dimensions /* Calculate the <point, centroid> distances */ float t_dist = points[i * d + l] - centroids[j * d + l] / clusters_size[j]; atomicAdd(distances + i * k + j, t_dist * t_dist); } } __syncthreads(); /* Calculate minimum distancd for this point and assign (maybe) new cluster. */ if (t_cluster == 0 && t_dim == 0){ float min_distance = distances[i * k]; int min_cluster = 0; for (int j = 1; j < k; j++){ if (distances[i * k + j] < min_distance){ min_distance = distances[i * k + j]; min_cluster = j; } } if (min_cluster != clusters[i]){ clusters[i] = min_cluster; t_converged = -1; } } } /* handle convergence */ if (t_cluster == 0 && t_dim == 0){ if (t_converged == -1){ block_converged = -1; } } __syncthreads(); if (t_point == 0 && t_cluster == 0 && t_dim == 0){ if (block_converged == -1){ *converged = -1; block_converged = 0; } } }
62d997c88fe61402f8fc2bb4fbff596e6eb7aece.cu
#include <cstdlib> #include <iostream> #include <fstream> #include <string> #include <string.h> #include <vector> #include <fstream> #include <stdio.h> #include <stdlib.h> #include <random> #define MAX_GRID_SIZE 4294967295 #define MAX_BLOCK_SIZE_X 32 // which points #define MAX_BLOCK_SIZE_Y 8 // which cluster/dimension (first one for clustering, second one for computing centroids) #define MAX_BLOCK_SIZE_Z 4 // which dimension #define MAX_THREADS_PER_BLOCK 1024 #define MAX_ITER 32 #define FILE_NAME "points.txt" using namespace std; __global__ void initialize(int *clusters, int n, int k); __global__ void compute_centroids(float *points, float *centroids, int *clusters, int *clusters_size, int n, int d, int k, int reset); __global__ void assign_cluster(float *points, float *centroids, int *clusters, int *clusters_size, float *distances, int n, int d, int k, int *converged, int reset); int main(){ /* Read FILE_NAME with the following format: First line specifies n: the number of points, d: the dimension of the points, and k: the number of clusters, in the same order. The following n lines will contain the value of the nodes in each d dimension in each line. */ int n = 1 << 20, d = 4, k = 8; // parameters float *points; // value of the points int *clusters; // each point's cluster a.k.a the final result int *converged; // whether the program is finished yet or not /* generate random numbers */ points = (float *) malloc(n * d * sizeof(float)); for (int i = 0; i < n; i++){ for (int j = 0; j < d; j++){ float f = (float) rand() / RAND_MAX; points[i * d + j] = f * 100.0; } } clusters = (int *) malloc(n * sizeof(int)); converged = (int *) malloc(sizeof(int)); /* Start overall timer */ cudaEvent_t start_overall, stop_overall; cudaEventCreate(&start_overall); cudaEventCreate(&stop_overall); cudaEventRecord(start_overall, 0); /* Allocate memory on device */ float *points_dev, *centroids_dev, *distances_dev; int *clusters_dev, *clusters_size_dev; int *converged_dev; cudaMalloc((void **) &points_dev, n * d * sizeof(float)); cudaMalloc((void **) &centroids_dev, k * d * sizeof(float)); cudaMalloc((void **) &clusters_dev, n * sizeof(int)); cudaMalloc((void **) &clusters_size_dev, k * sizeof(int)); cudaMalloc((void **) &converged_dev, sizeof(int)); cudaMalloc((void **) &distances_dev, n * k * sizeof(float)); /* Copy the points from host to device */ cudaMemcpy(points_dev, points, n * d * sizeof(float), cudaMemcpyHostToDevice); /* Compute the block size and grid size for initialization */ int block_size = (n > MAX_THREADS_PER_BLOCK) ? MAX_THREADS_PER_BLOCK : n; int temp_grid_size = (n + block_size - 1) / block_size; int grid_size = (temp_grid_size > MAX_GRID_SIZE) ? MAX_GRID_SIZE : temp_grid_size; /* Initialize the clusters */ initialize<<<grid_size, block_size>>>(clusters_dev, n, k); /* Compute the block size for launching the 'compute_centroids' kernel */ int cc_block_size_x = (n > MAX_BLOCK_SIZE_X) ? MAX_BLOCK_SIZE_X : n; int cc_block_size_y = (d > MAX_BLOCK_SIZE_Y * MAX_BLOCK_SIZE_Z) ? (MAX_BLOCK_SIZE_Y * MAX_BLOCK_SIZE_Z) : d; int cc_temp_grid_size = (n + cc_block_size_x - 1) / cc_block_size_x; int cc_grid_size = (cc_temp_grid_size > MAX_GRID_SIZE) ? MAX_GRID_SIZE : cc_temp_grid_size; /* Compute the block size for launching the 'assign_cluster' kernel */ int ac_block_size_x = (n > MAX_BLOCK_SIZE_X) ? MAX_BLOCK_SIZE_X : n; int ac_block_size_y = (k > MAX_BLOCK_SIZE_Y) ? MAX_BLOCK_SIZE_Y : k; int ac_block_size_z = (d > MAX_BLOCK_SIZE_Z) ? MAX_BLOCK_SIZE_Z : d; int ac_temp_grid_size = (n + ac_block_size_x - 1) / ac_block_size_x; int ac_grid_size = (ac_temp_grid_size > MAX_GRID_SIZE) ? MAX_GRID_SIZE : ac_temp_grid_size; /* Final computation of sizes */ dim3 cc_block_dim(cc_block_size_x, cc_block_size_y); dim3 ac_block_dim(ac_block_size_x, ac_block_size_y, ac_block_size_z); /* because the first time, there is no need to reset centroids and clusters_size */ int reset = 0; cudaDeviceSynchronize(); /* Start execution timer */ cudaEvent_t start_exec, stop_exec; cudaEventCreate(&start_exec); cudaEventCreate(&stop_exec); cudaEventRecord(start_exec, 0); int iter = 0; while (1){ iter++; if (iter > MAX_ITER) break; //initialize the convergence rate cudaMemset(converged_dev, 0, sizeof(int)); // start the first kernel compute_centroids<<<cc_grid_size, cc_block_dim>>>(points_dev, centroids_dev, clusters_dev, clusters_size_dev, n, d, k, reset); cudaDeviceSynchronize(); // start the second kernel cudaMemset(distances_dev, 0, n * k * sizeof(float)); assign_cluster<<<ac_grid_size, ac_block_dim>>>(points_dev, centroids_dev, clusters_dev, clusters_size_dev, distances_dev, n, d, k, converged_dev, reset); cudaDeviceSynchronize(); // check converged flag cudaMemcpy(converged, converged_dev, sizeof(int), cudaMemcpyDeviceToHost); if (*converged == 0) break; reset = 1; } cudaEventRecord(stop_exec, 0); cudaEventSynchronize(stop_exec); /* Copy the results back from device to host */ cudaMemcpy(clusters, clusters_dev, n * sizeof(int), cudaMemcpyDeviceToHost); cudaEventRecord(stop_overall, 0); cudaEventSynchronize(stop_overall); /* do whatever you want with the results :)) */ float overall_time, execution_time; cudaEventElapsedTime(&overall_time, start_overall, stop_overall); cudaEventElapsedTime(&execution_time, start_exec, stop_exec); cout << "n: " << n << "\tk: " << k << "\td: " << d << endl; cout << "converged after " << iter << " iterations" << endl; cout << "time for executing kmeans: " << execution_time << "ms" << endl; cout << "time for entire run (allocation, initialization, etc): " << overall_time << "ms" << endl; /* Free the memory */ cudaFree(points_dev); cudaFree(centroids_dev); cudaFree(clusters_dev); cudaFree(clusters_size_dev); cudaFree(converged_dev); free(points); free(clusters); free(converged); cudaEventDestroy(start_overall); cudaEventDestroy(stop_overall); cudaEventDestroy(start_exec); cudaEventDestroy(stop_exec); } __global__ void initialize(int *clusters, int n, int k){ int start = threadIdx.x, stride = blockDim.x * gridDim.x; for (int i = start; i < n; i += stride){ clusters[i] = i % k; } } __global__ void compute_centroids(float *points, float *centroids, int *clusters, int *clusters_size, int n, int d, int k, int reset){ int t_point = threadIdx.x + blockDim.x * blockIdx.x, p_stride = blockDim.x * gridDim.x; int t_dim = threadIdx.y, d_stride = blockDim.y; /* reset centroids and their sizes */ if (reset){ if (threadIdx.x < k){ clusters_size[threadIdx.x] = 0; for (int i = t_dim; i < d; i += d_stride){ centroids[threadIdx.x * d + i] = 0.0; } } __syncthreads(); } /* Each thread handles one (or a few) dimensions of one (or a few) points */ for (int i = t_point; i < n; i += p_stride){ // one (or a few) points int t_cluster = clusters[i]; // this point's cluster atomicAdd(clusters_size + t_cluster, 1); // increase the size of this cluster for (int j = t_dim; j < d; j += d_stride){ // one (or a few) dimensions atomicAdd(centroids + t_cluster * d + j, points[i * d + j]); // this point's value added to this cluster's value } } } /* This function assigns clusters to points based on centroids */ __global__ void assign_cluster(float *points, float *centroids, int *clusters, int *clusters_size, float* distances, int n, int d, int k, int *converged, int reset){ int t_point = threadIdx.x + blockIdx.x * blockDim.x, t_cluster = threadIdx.y, t_dim = threadIdx.z; int p_stride = blockDim.x * gridDim.x, c_stride = blockDim.y, d_stride = blockDim.z; int t_converged = 0; __shared__ int block_converged; /* Each thread handles one (or a few) dimensions of one (or a few) points next to one (or a few) clusters */ for (int i = t_point; i < n; i += p_stride){ // one (or a few) points for (int j = t_cluster; j < k; j += c_stride){ // one (or a few) clusters for (int l = t_dim; l < d; l += d_stride){ // one (or a few) dimensions /* Calculate the <point, centroid> distances */ float t_dist = points[i * d + l] - centroids[j * d + l] / clusters_size[j]; atomicAdd(distances + i * k + j, t_dist * t_dist); } } __syncthreads(); /* Calculate minimum distancd for this point and assign (maybe) new cluster. */ if (t_cluster == 0 && t_dim == 0){ float min_distance = distances[i * k]; int min_cluster = 0; for (int j = 1; j < k; j++){ if (distances[i * k + j] < min_distance){ min_distance = distances[i * k + j]; min_cluster = j; } } if (min_cluster != clusters[i]){ clusters[i] = min_cluster; t_converged = -1; } } } /* handle convergence */ if (t_cluster == 0 && t_dim == 0){ if (t_converged == -1){ block_converged = -1; } } __syncthreads(); if (t_point == 0 && t_cluster == 0 && t_dim == 0){ if (block_converged == -1){ *converged = -1; block_converged = 0; } } }
e762a947fc772bb2a51081f81f1d0501ee7dd473.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * The MIT License (MIT) * * Copyright (c) 2014 Kyle Hollins Wray, University of Massachusetts * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <epic/harmonic/harmonic.h> #include <epic/harmonic/harmonic_gpu.h> #include <epic/harmonic/harmonic_model_gpu.h> #include <epic/error_codes.h> #include <epic/constants.h> #include <iostream> #include <stdio.h> #include <math.h> #include <cmath> namespace epic { __global__ void harmonic_update_2d_gpu(unsigned int *m, float *u, unsigned int *locked, unsigned int currentIteration) { for (unsigned int x0 = blockIdx.x; x0 < m[0]; x0 += gridDim.x) { unsigned int offset = (unsigned int)((currentIteration % 2) != (x0 % 2)); for (unsigned int x1 = 2 * threadIdx.x + offset; x1 < m[1]; x1 += 2 * blockDim.x) { // If this is locked, then wait for the other threads to finish in the warp, then continue. if (locked[x0 * m[1] + x1]) { __syncthreads(); continue; } // Update the value at this location with the log-sum-exp trick. float maxVal = EPIC_FLT_MIN; maxVal = fmaxf(u[(x0 - 1) * m[1] + x1], u[(x0 + 1) * m[1] + x1]); maxVal = fmaxf(maxVal, u[x0 * m[1] + (x1 - 1)]); maxVal = fmaxf(maxVal, u[x0 * m[1] + (x1 + 1)]); u[x0 * m[1] + x1] = maxVal + __logf(__expf(u[(x0 - 1) * m[1] + x1] - maxVal) + __expf(u[(x0 + 1) * m[1] + x1] - maxVal) + __expf(u[x0 * m[1] + (x1 - 1)] - maxVal) + __expf(u[x0 * m[1] + (x1 + 1)] - maxVal)) - 1.38629436f; //This is equivalent to ln(2.0 * n) for n = 2. __syncthreads(); } } } __global__ void harmonic_update_and_check_2d_gpu(unsigned int *m, float *u, unsigned int *locked, unsigned int currentIteration, float *delta) { // Since float and unsigned int are 4 bytes each, and we need each array to be the size of // the number of threads, we will need to call this with: sizeof(float) * numThreads. // Note: blockDim.x == numThreads extern __shared__ float sdata[]; float *deltaLocalMax = (float *)sdata; deltaLocalMax[threadIdx.x] = 0.0f; __syncthreads(); for (unsigned int x0 = blockIdx.x; x0 < m[0]; x0 += gridDim.x) { unsigned int offset = (unsigned int)((currentIteration % 2) != (x0 % 2)); for (unsigned int x1 = 2 * threadIdx.x + offset; x1 < m[1]; x1 += 2 * blockDim.x) { // If this is locked, then wait for the other threads to finish in the warp, then continue. if (locked[x0 * m[1] + x1]) { __syncthreads(); continue; } float uPrevious = u[x0 * m[1] + x1]; // Update the value at this location with the log-sum-exp trick. float maxVal = EPIC_FLT_MIN; maxVal = fmaxf(u[(x0 - 1) * m[1] + x1], u[(x0 + 1) * m[1] + x1]); maxVal = fmaxf(maxVal, u[x0 * m[1] + (x1 - 1)]); maxVal = fmaxf(maxVal, u[x0 * m[1] + (x1 + 1)]); u[x0 * m[1] + x1] = maxVal + __logf(__expf(u[(x0 - 1) * m[1] + x1] - maxVal) + __expf(u[(x0 + 1) * m[1] + x1] - maxVal) + __expf(u[x0 * m[1] + (x1 - 1)] - maxVal) + __expf(u[x0 * m[1] + (x1 + 1)] - maxVal)) - 1.38629436f; //This is equivalent to ln(2.0 * n) for n = 2. // Compute the updated delta. deltaLocalMax[threadIdx.x] = fmaxf(deltaLocalMax[threadIdx.x], fabs(uPrevious - u[x0 * m[1] + x1])); __syncthreads(); } } // At the end, perform a reduction to efficiently compute the maximal delta for this thread block. for (unsigned int index = blockDim.x / 2; index > 0; index >>= 1) { if (threadIdx.x < index && threadIdx.x + index < blockDim.x) { if (deltaLocalMax[threadIdx.x] < deltaLocalMax[threadIdx.x + index]) { deltaLocalMax[threadIdx.x] = deltaLocalMax[threadIdx.x + index]; } } __syncthreads(); } // Store the maximal delta in the array for delta values. We will use another kernel to quickly // do a reduction over this to find the max delta. if (threadIdx.x == 0) { delta[blockIdx.x] = deltaLocalMax[0]; } } __global__ void harmonic_compute_max_delta_gpu(unsigned int numBlocks, float *delta) { // Stride this thread to compute its individual max. for (unsigned int i = threadIdx.x; i < numBlocks; i += blockDim.x) { delta[threadIdx.x] = fmaxf(delta[threadIdx.x], delta[i]); } __syncthreads(); // Do a final reduction on these values to efficiently compute the true maximal delta. // Note: Afterwards, delta[0] will hold the max delta over all cells. for (unsigned int index = blockDim.x / 2; index > 0; index >>= 1) { if (threadIdx.x < index && threadIdx.x + index < numBlocks) { //delta[threadIdx.x] = fmaxf(delta[threadIdx.x], delta[threadIdx.x + index]); if (delta[threadIdx.x] < delta[threadIdx.x + index]) { delta[threadIdx.x] = delta[threadIdx.x + index]; } } __syncthreads(); } } unsigned int harmonic_compute_num_blocks_gpu(Harmonic *harmonic, unsigned int numThreads) { if (harmonic->n == 2) { return harmonic->m[0]; } else if (harmonic->n == 3) { } else if (harmonic->n == 4) { } return 0; } int harmonic_complete_gpu(Harmonic *harmonic, unsigned int numThreads) { int result; result = harmonic_initialize_dimension_size_gpu(harmonic); if (result != EPIC_SUCCESS) { return result; } result = harmonic_initialize_potential_values_gpu(harmonic); if (result != EPIC_SUCCESS) { return result; } result = harmonic_initialize_locked_gpu(harmonic); if (result != EPIC_SUCCESS) { return result; } result = harmonic_execute_gpu(harmonic, numThreads); if (result != EPIC_SUCCESS) { return result; } result = EPIC_SUCCESS; if (harmonic_uninitialize_dimension_size_gpu(harmonic) != EPIC_SUCCESS) { result = EPIC_ERROR_DEVICE_FREE; } if (harmonic_uninitialize_potential_values_gpu(harmonic) != EPIC_SUCCESS) { result = EPIC_ERROR_DEVICE_FREE; } if (harmonic_uninitialize_locked_gpu(harmonic) != EPIC_SUCCESS) { result = EPIC_ERROR_DEVICE_FREE; } return result; } int harmonic_initialize_gpu(Harmonic *harmonic, unsigned int numThreads) { // Ensure the data is valid. if (harmonic->n == 0 || harmonic->m == nullptr || harmonic->d_delta != nullptr) { fprintf(stderr, "Error[harmonic_initialize_gpu]: %s\n", "Invalid input."); return EPIC_ERROR_INVALID_DATA; } unsigned int numBlocks = harmonic_compute_num_blocks_gpu(harmonic, numThreads); // Allocate the memory on the device. if (hipMalloc(&harmonic->d_delta, numBlocks * sizeof(float)) != hipSuccess) { fprintf(stderr, "Error[harmonic_initialize_gpu]: %s\n", "Failed to allocate device-side memory for delta."); return EPIC_ERROR_DEVICE_MALLOC; } return EPIC_SUCCESS; } int harmonic_execute_gpu(Harmonic *harmonic, unsigned int numThreads) { // The result from calling other functions. int result; // Ensure data is valid before we begin. if (harmonic == nullptr || harmonic->m == nullptr || harmonic->u == nullptr || harmonic->locked == nullptr || harmonic->epsilon <= 0.0 || harmonic->d_m == nullptr || harmonic->d_u == nullptr || harmonic->d_locked == nullptr) { fprintf(stderr, "Error[harmonic_execute_gpu]: %s\n", "Invalid data."); return EPIC_ERROR_INVALID_DATA; } if (numThreads % 32 != 0) { fprintf(stderr, "Error[harmonic_execute_gpu]: %s\n", "Must specficy a number of threads divisible by 32 (the number of threads in a warp)."); return EPIC_ERROR_INVALID_CUDA_PARAM; } // Reset the current iteration. harmonic->currentIteration = 0; result = harmonic_initialize_gpu(harmonic, numThreads); if (result != EPIC_SUCCESS) { fprintf(stderr, "Error[harmonic_execute_gpu]: %s\n", "Failed to initialize GPU variables."); return result; } // Make sure 'information' can at least be propagated throughout the entire grid. unsigned int mMax = 0; for (unsigned int i = 0; i < harmonic->n; i++) { mMax = ::max(mMax, harmonic->m[i]); } harmonic->delta = harmonic->epsilon + 1.0f; result = EPIC_SUCCESS; // Keep going until a threshold is reached. while (result != EPIC_SUCCESS_AND_CONVERGED || harmonic->currentIteration < mMax) { // We check for convergence on a staggered number of iterations. if (harmonic->currentIteration % harmonic->numIterationsToStaggerCheck == 0) { result = harmonic_update_and_check_gpu(harmonic, numThreads); if (result != EPIC_SUCCESS && result != EPIC_SUCCESS_AND_CONVERGED) { fprintf(stderr, "Error[harmonic_execute_gpu]: %s\n", "Failed to perform the Gauss-Seidel update and check step."); return result; } } else { result = harmonic_update_gpu(harmonic, numThreads); if (result != EPIC_SUCCESS) { fprintf(stderr, "Error[harmonic_execute_gpu]: %s\n", "Failed to perform the Gauss-Seidel update step."); return result; } } /* *** DEBUG *** if (harmonic->currentIteration % harmonic->numIterationsToStaggerCheck == 0) { printf("Iteration %i --- %e\n", harmonic->currentIteration, harmonic->delta); fflush(stdout); } //*/ } result = harmonic_get_potential_values_gpu(harmonic); if (result != EPIC_SUCCESS) { fprintf(stderr, "Error[harmonic_execute_gpu]: %s\n", "Failed to get all the potential values."); return result; } result = harmonic_uninitialize_gpu(harmonic); if (result != EPIC_SUCCESS) { fprintf(stderr, "Error[harmonic_execute_gpu]: %s\n", "Failed to uninitialize GPU variables."); return result; } return EPIC_SUCCESS; } int harmonic_uninitialize_gpu(Harmonic *harmonic) { int result; result = EPIC_SUCCESS; if (harmonic->d_delta != nullptr) { if (hipFree(harmonic->d_delta) != hipSuccess) { fprintf(stderr, "Error[harmonic_uninitialize_gpu]: %s\n", "Failed to free device-side memory for delta."); result = EPIC_ERROR_DEVICE_FREE; } } harmonic->d_delta = nullptr; return result; } int harmonic_update_gpu(Harmonic *harmonic, unsigned int numThreads) { unsigned int numBlocks = harmonic_compute_num_blocks_gpu(harmonic, numThreads); if (harmonic->n == 2) { hipLaunchKernelGGL(( harmonic_update_2d_gpu), dim3(numBlocks), dim3(numThreads) , 0, 0, harmonic->d_m, harmonic->d_u, harmonic->d_locked, harmonic->currentIteration); } else if (harmonic->n == 3) { } else if (harmonic->n == 4) { } // Check if there was an error executing the kernel. if (hipGetLastError() != hipSuccess) { fprintf(stderr, "Error[harmonic_update_gpu]: %s\n", "Failed to execute the 'Gauss-Seidel update' kernel."); return EPIC_ERROR_KERNEL_EXECUTION; } // Wait for the kernel to finish before looping more. if (hipDeviceSynchronize() != hipSuccess) { fprintf(stderr, "Error[harmonic_update_gpu]: %s\n", "Failed to synchronize the device after 'Gauss-Seidel update' kernel."); return EPIC_ERROR_DEVICE_SYNCHRONIZE; } harmonic->currentIteration++; return EPIC_SUCCESS; } int harmonic_update_and_check_gpu(Harmonic *harmonic, unsigned int numThreads) { // The number of blocks depends on n, m, and the number of threads. unsigned int numBlocks = harmonic_compute_num_blocks_gpu(harmonic, numThreads); if (harmonic->n == 2) { hipLaunchKernelGGL(( harmonic_update_and_check_2d_gpu), dim3(numBlocks), dim3(numThreads), numThreads * sizeof(float) , 0, harmonic->d_m, harmonic->d_u, harmonic->d_locked, harmonic->currentIteration, harmonic->d_delta); } else if (harmonic->n == 3) { } else if (harmonic->n == 4) { } // Check if there was an error executing the kernel. if (hipGetLastError() != hipSuccess) { fprintf(stderr, "Error[harmonic_update_and_check_gpu]: %s\n", "Failed to execute the 'Gauss-Seidel update' kernel."); return EPIC_ERROR_KERNEL_EXECUTION; } // Wait for the kernel to finish before looping more. if (hipDeviceSynchronize() != hipSuccess) { fprintf(stderr, "Error[harmonic_update_and_check_gpu]: %s\n", "Failed to synchronize the device after 'Gauss-Seidel update' kernel."); return EPIC_ERROR_DEVICE_SYNCHRONIZE; } hipLaunchKernelGGL(( harmonic_compute_max_delta_gpu), dim3(1), dim3(numThreads) , 0, 0, numBlocks, harmonic->d_delta); // Check if there was an error executing the kernel. if (hipGetLastError() != hipSuccess) { fprintf(stderr, "Error[harmonic_update_and_check_gpu]: %s\n", "Failed to execute the 'delta check update' kernel."); return EPIC_ERROR_KERNEL_EXECUTION; } // Wait for the kernel to finish before looping more. if (hipDeviceSynchronize() != hipSuccess) { fprintf(stderr, "Error[harmonic_update_and_check_gpu]: %s\n", "Failed to synchronize the device after 'delta check update' kernel."); return EPIC_ERROR_DEVICE_SYNCHRONIZE; } // Retrieve the max delta value to check for convergence. Note: The first value in d_delta holds the maximal value. if (hipMemcpy(&harmonic->delta, harmonic->d_delta, 1 * sizeof(float), hipMemcpyDeviceToHost) != hipSuccess) { fprintf(stderr, "Error[harmonic_update_and_check_gpu]: %s\n", "Failed to copy memory from device to host for the max delta."); return EPIC_ERROR_MEMCPY_TO_HOST; } harmonic->currentIteration++; if (harmonic->delta < harmonic->epsilon) { return EPIC_SUCCESS_AND_CONVERGED; } else { return EPIC_SUCCESS; } } int harmonic_get_potential_values_gpu(Harmonic *harmonic) { // Compute the number of cells. unsigned int numCells = 1; for (unsigned int i = 0; i < harmonic->n; i++) { numCells *= harmonic->m[i]; } // Copy the final (or intermediate) result from device to host. if (hipMemcpy(harmonic->u, harmonic->d_u, numCells * sizeof(float), hipMemcpyDeviceToHost) != hipSuccess) { fprintf(stderr, "Error[harmonic_get_potential_values_gpu]: %s\n", "Failed to copy memory from device to host for the potential values."); return EPIC_ERROR_MEMCPY_TO_HOST; } return EPIC_SUCCESS; } }; // namespace epic
e762a947fc772bb2a51081f81f1d0501ee7dd473.cu
/** * The MIT License (MIT) * * Copyright (c) 2014 Kyle Hollins Wray, University of Massachusetts * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <epic/harmonic/harmonic.h> #include <epic/harmonic/harmonic_gpu.h> #include <epic/harmonic/harmonic_model_gpu.h> #include <epic/error_codes.h> #include <epic/constants.h> #include <iostream> #include <stdio.h> #include <math.h> #include <cmath> namespace epic { __global__ void harmonic_update_2d_gpu(unsigned int *m, float *u, unsigned int *locked, unsigned int currentIteration) { for (unsigned int x0 = blockIdx.x; x0 < m[0]; x0 += gridDim.x) { unsigned int offset = (unsigned int)((currentIteration % 2) != (x0 % 2)); for (unsigned int x1 = 2 * threadIdx.x + offset; x1 < m[1]; x1 += 2 * blockDim.x) { // If this is locked, then wait for the other threads to finish in the warp, then continue. if (locked[x0 * m[1] + x1]) { __syncthreads(); continue; } // Update the value at this location with the log-sum-exp trick. float maxVal = EPIC_FLT_MIN; maxVal = fmaxf(u[(x0 - 1) * m[1] + x1], u[(x0 + 1) * m[1] + x1]); maxVal = fmaxf(maxVal, u[x0 * m[1] + (x1 - 1)]); maxVal = fmaxf(maxVal, u[x0 * m[1] + (x1 + 1)]); u[x0 * m[1] + x1] = maxVal + __logf(__expf(u[(x0 - 1) * m[1] + x1] - maxVal) + __expf(u[(x0 + 1) * m[1] + x1] - maxVal) + __expf(u[x0 * m[1] + (x1 - 1)] - maxVal) + __expf(u[x0 * m[1] + (x1 + 1)] - maxVal)) - 1.38629436f; //This is equivalent to ln(2.0 * n) for n = 2. __syncthreads(); } } } __global__ void harmonic_update_and_check_2d_gpu(unsigned int *m, float *u, unsigned int *locked, unsigned int currentIteration, float *delta) { // Since float and unsigned int are 4 bytes each, and we need each array to be the size of // the number of threads, we will need to call this with: sizeof(float) * numThreads. // Note: blockDim.x == numThreads extern __shared__ float sdata[]; float *deltaLocalMax = (float *)sdata; deltaLocalMax[threadIdx.x] = 0.0f; __syncthreads(); for (unsigned int x0 = blockIdx.x; x0 < m[0]; x0 += gridDim.x) { unsigned int offset = (unsigned int)((currentIteration % 2) != (x0 % 2)); for (unsigned int x1 = 2 * threadIdx.x + offset; x1 < m[1]; x1 += 2 * blockDim.x) { // If this is locked, then wait for the other threads to finish in the warp, then continue. if (locked[x0 * m[1] + x1]) { __syncthreads(); continue; } float uPrevious = u[x0 * m[1] + x1]; // Update the value at this location with the log-sum-exp trick. float maxVal = EPIC_FLT_MIN; maxVal = fmaxf(u[(x0 - 1) * m[1] + x1], u[(x0 + 1) * m[1] + x1]); maxVal = fmaxf(maxVal, u[x0 * m[1] + (x1 - 1)]); maxVal = fmaxf(maxVal, u[x0 * m[1] + (x1 + 1)]); u[x0 * m[1] + x1] = maxVal + __logf(__expf(u[(x0 - 1) * m[1] + x1] - maxVal) + __expf(u[(x0 + 1) * m[1] + x1] - maxVal) + __expf(u[x0 * m[1] + (x1 - 1)] - maxVal) + __expf(u[x0 * m[1] + (x1 + 1)] - maxVal)) - 1.38629436f; //This is equivalent to ln(2.0 * n) for n = 2. // Compute the updated delta. deltaLocalMax[threadIdx.x] = fmaxf(deltaLocalMax[threadIdx.x], fabs(uPrevious - u[x0 * m[1] + x1])); __syncthreads(); } } // At the end, perform a reduction to efficiently compute the maximal delta for this thread block. for (unsigned int index = blockDim.x / 2; index > 0; index >>= 1) { if (threadIdx.x < index && threadIdx.x + index < blockDim.x) { if (deltaLocalMax[threadIdx.x] < deltaLocalMax[threadIdx.x + index]) { deltaLocalMax[threadIdx.x] = deltaLocalMax[threadIdx.x + index]; } } __syncthreads(); } // Store the maximal delta in the array for delta values. We will use another kernel to quickly // do a reduction over this to find the max delta. if (threadIdx.x == 0) { delta[blockIdx.x] = deltaLocalMax[0]; } } __global__ void harmonic_compute_max_delta_gpu(unsigned int numBlocks, float *delta) { // Stride this thread to compute its individual max. for (unsigned int i = threadIdx.x; i < numBlocks; i += blockDim.x) { delta[threadIdx.x] = fmaxf(delta[threadIdx.x], delta[i]); } __syncthreads(); // Do a final reduction on these values to efficiently compute the true maximal delta. // Note: Afterwards, delta[0] will hold the max delta over all cells. for (unsigned int index = blockDim.x / 2; index > 0; index >>= 1) { if (threadIdx.x < index && threadIdx.x + index < numBlocks) { //delta[threadIdx.x] = fmaxf(delta[threadIdx.x], delta[threadIdx.x + index]); if (delta[threadIdx.x] < delta[threadIdx.x + index]) { delta[threadIdx.x] = delta[threadIdx.x + index]; } } __syncthreads(); } } unsigned int harmonic_compute_num_blocks_gpu(Harmonic *harmonic, unsigned int numThreads) { if (harmonic->n == 2) { return harmonic->m[0]; } else if (harmonic->n == 3) { } else if (harmonic->n == 4) { } return 0; } int harmonic_complete_gpu(Harmonic *harmonic, unsigned int numThreads) { int result; result = harmonic_initialize_dimension_size_gpu(harmonic); if (result != EPIC_SUCCESS) { return result; } result = harmonic_initialize_potential_values_gpu(harmonic); if (result != EPIC_SUCCESS) { return result; } result = harmonic_initialize_locked_gpu(harmonic); if (result != EPIC_SUCCESS) { return result; } result = harmonic_execute_gpu(harmonic, numThreads); if (result != EPIC_SUCCESS) { return result; } result = EPIC_SUCCESS; if (harmonic_uninitialize_dimension_size_gpu(harmonic) != EPIC_SUCCESS) { result = EPIC_ERROR_DEVICE_FREE; } if (harmonic_uninitialize_potential_values_gpu(harmonic) != EPIC_SUCCESS) { result = EPIC_ERROR_DEVICE_FREE; } if (harmonic_uninitialize_locked_gpu(harmonic) != EPIC_SUCCESS) { result = EPIC_ERROR_DEVICE_FREE; } return result; } int harmonic_initialize_gpu(Harmonic *harmonic, unsigned int numThreads) { // Ensure the data is valid. if (harmonic->n == 0 || harmonic->m == nullptr || harmonic->d_delta != nullptr) { fprintf(stderr, "Error[harmonic_initialize_gpu]: %s\n", "Invalid input."); return EPIC_ERROR_INVALID_DATA; } unsigned int numBlocks = harmonic_compute_num_blocks_gpu(harmonic, numThreads); // Allocate the memory on the device. if (cudaMalloc(&harmonic->d_delta, numBlocks * sizeof(float)) != cudaSuccess) { fprintf(stderr, "Error[harmonic_initialize_gpu]: %s\n", "Failed to allocate device-side memory for delta."); return EPIC_ERROR_DEVICE_MALLOC; } return EPIC_SUCCESS; } int harmonic_execute_gpu(Harmonic *harmonic, unsigned int numThreads) { // The result from calling other functions. int result; // Ensure data is valid before we begin. if (harmonic == nullptr || harmonic->m == nullptr || harmonic->u == nullptr || harmonic->locked == nullptr || harmonic->epsilon <= 0.0 || harmonic->d_m == nullptr || harmonic->d_u == nullptr || harmonic->d_locked == nullptr) { fprintf(stderr, "Error[harmonic_execute_gpu]: %s\n", "Invalid data."); return EPIC_ERROR_INVALID_DATA; } if (numThreads % 32 != 0) { fprintf(stderr, "Error[harmonic_execute_gpu]: %s\n", "Must specficy a number of threads divisible by 32 (the number of threads in a warp)."); return EPIC_ERROR_INVALID_CUDA_PARAM; } // Reset the current iteration. harmonic->currentIteration = 0; result = harmonic_initialize_gpu(harmonic, numThreads); if (result != EPIC_SUCCESS) { fprintf(stderr, "Error[harmonic_execute_gpu]: %s\n", "Failed to initialize GPU variables."); return result; } // Make sure 'information' can at least be propagated throughout the entire grid. unsigned int mMax = 0; for (unsigned int i = 0; i < harmonic->n; i++) { mMax = std::max(mMax, harmonic->m[i]); } harmonic->delta = harmonic->epsilon + 1.0f; result = EPIC_SUCCESS; // Keep going until a threshold is reached. while (result != EPIC_SUCCESS_AND_CONVERGED || harmonic->currentIteration < mMax) { // We check for convergence on a staggered number of iterations. if (harmonic->currentIteration % harmonic->numIterationsToStaggerCheck == 0) { result = harmonic_update_and_check_gpu(harmonic, numThreads); if (result != EPIC_SUCCESS && result != EPIC_SUCCESS_AND_CONVERGED) { fprintf(stderr, "Error[harmonic_execute_gpu]: %s\n", "Failed to perform the Gauss-Seidel update and check step."); return result; } } else { result = harmonic_update_gpu(harmonic, numThreads); if (result != EPIC_SUCCESS) { fprintf(stderr, "Error[harmonic_execute_gpu]: %s\n", "Failed to perform the Gauss-Seidel update step."); return result; } } /* *** DEBUG *** if (harmonic->currentIteration % harmonic->numIterationsToStaggerCheck == 0) { printf("Iteration %i --- %e\n", harmonic->currentIteration, harmonic->delta); fflush(stdout); } //*/ } result = harmonic_get_potential_values_gpu(harmonic); if (result != EPIC_SUCCESS) { fprintf(stderr, "Error[harmonic_execute_gpu]: %s\n", "Failed to get all the potential values."); return result; } result = harmonic_uninitialize_gpu(harmonic); if (result != EPIC_SUCCESS) { fprintf(stderr, "Error[harmonic_execute_gpu]: %s\n", "Failed to uninitialize GPU variables."); return result; } return EPIC_SUCCESS; } int harmonic_uninitialize_gpu(Harmonic *harmonic) { int result; result = EPIC_SUCCESS; if (harmonic->d_delta != nullptr) { if (cudaFree(harmonic->d_delta) != cudaSuccess) { fprintf(stderr, "Error[harmonic_uninitialize_gpu]: %s\n", "Failed to free device-side memory for delta."); result = EPIC_ERROR_DEVICE_FREE; } } harmonic->d_delta = nullptr; return result; } int harmonic_update_gpu(Harmonic *harmonic, unsigned int numThreads) { unsigned int numBlocks = harmonic_compute_num_blocks_gpu(harmonic, numThreads); if (harmonic->n == 2) { harmonic_update_2d_gpu<<< numBlocks, numThreads >>>(harmonic->d_m, harmonic->d_u, harmonic->d_locked, harmonic->currentIteration); } else if (harmonic->n == 3) { } else if (harmonic->n == 4) { } // Check if there was an error executing the kernel. if (cudaGetLastError() != cudaSuccess) { fprintf(stderr, "Error[harmonic_update_gpu]: %s\n", "Failed to execute the 'Gauss-Seidel update' kernel."); return EPIC_ERROR_KERNEL_EXECUTION; } // Wait for the kernel to finish before looping more. if (cudaDeviceSynchronize() != cudaSuccess) { fprintf(stderr, "Error[harmonic_update_gpu]: %s\n", "Failed to synchronize the device after 'Gauss-Seidel update' kernel."); return EPIC_ERROR_DEVICE_SYNCHRONIZE; } harmonic->currentIteration++; return EPIC_SUCCESS; } int harmonic_update_and_check_gpu(Harmonic *harmonic, unsigned int numThreads) { // The number of blocks depends on n, m, and the number of threads. unsigned int numBlocks = harmonic_compute_num_blocks_gpu(harmonic, numThreads); if (harmonic->n == 2) { harmonic_update_and_check_2d_gpu<<< numBlocks, numThreads, numThreads * sizeof(float) >>>( harmonic->d_m, harmonic->d_u, harmonic->d_locked, harmonic->currentIteration, harmonic->d_delta); } else if (harmonic->n == 3) { } else if (harmonic->n == 4) { } // Check if there was an error executing the kernel. if (cudaGetLastError() != cudaSuccess) { fprintf(stderr, "Error[harmonic_update_and_check_gpu]: %s\n", "Failed to execute the 'Gauss-Seidel update' kernel."); return EPIC_ERROR_KERNEL_EXECUTION; } // Wait for the kernel to finish before looping more. if (cudaDeviceSynchronize() != cudaSuccess) { fprintf(stderr, "Error[harmonic_update_and_check_gpu]: %s\n", "Failed to synchronize the device after 'Gauss-Seidel update' kernel."); return EPIC_ERROR_DEVICE_SYNCHRONIZE; } harmonic_compute_max_delta_gpu<<< 1, numThreads >>>(numBlocks, harmonic->d_delta); // Check if there was an error executing the kernel. if (cudaGetLastError() != cudaSuccess) { fprintf(stderr, "Error[harmonic_update_and_check_gpu]: %s\n", "Failed to execute the 'delta check update' kernel."); return EPIC_ERROR_KERNEL_EXECUTION; } // Wait for the kernel to finish before looping more. if (cudaDeviceSynchronize() != cudaSuccess) { fprintf(stderr, "Error[harmonic_update_and_check_gpu]: %s\n", "Failed to synchronize the device after 'delta check update' kernel."); return EPIC_ERROR_DEVICE_SYNCHRONIZE; } // Retrieve the max delta value to check for convergence. Note: The first value in d_delta holds the maximal value. if (cudaMemcpy(&harmonic->delta, harmonic->d_delta, 1 * sizeof(float), cudaMemcpyDeviceToHost) != cudaSuccess) { fprintf(stderr, "Error[harmonic_update_and_check_gpu]: %s\n", "Failed to copy memory from device to host for the max delta."); return EPIC_ERROR_MEMCPY_TO_HOST; } harmonic->currentIteration++; if (harmonic->delta < harmonic->epsilon) { return EPIC_SUCCESS_AND_CONVERGED; } else { return EPIC_SUCCESS; } } int harmonic_get_potential_values_gpu(Harmonic *harmonic) { // Compute the number of cells. unsigned int numCells = 1; for (unsigned int i = 0; i < harmonic->n; i++) { numCells *= harmonic->m[i]; } // Copy the final (or intermediate) result from device to host. if (cudaMemcpy(harmonic->u, harmonic->d_u, numCells * sizeof(float), cudaMemcpyDeviceToHost) != cudaSuccess) { fprintf(stderr, "Error[harmonic_get_potential_values_gpu]: %s\n", "Failed to copy memory from device to host for the potential values."); return EPIC_ERROR_MEMCPY_TO_HOST; } return EPIC_SUCCESS; } }; // namespace epic
5d8e75e1fd92865bb9fa4c3e39a9b3c39f5a1f1c.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hipfft.h> #include <hip/hip_runtime_api.h> #include <stdio.h> template<typename T> __device__ __forceinline__ T ldg(const T* ptr) { #if __CUDA_ARCH__ >= 350 return __ldg(ptr); #else return *ptr; #endif } extern "C" __global__ void zSmooth( int nz , int ny , int nx , float alpha , float * data // data (in/out) ) { int kx = blockIdx.x*blockDim.x + threadIdx.x; int ky = blockIdx.y*blockDim.y + threadIdx.y; if (kx < nx && ky < ny) { int k_0 = nx*ky + kx; int k_1 = nx*ny + nx*ky + kx; for (int i = 0; i + 1 < nz; i++, k_0 += nx*ny, k_1 += nx*ny) { data[k_1] += data[k_0] * alpha; } k_0 -= nx*ny; k_1 -= nx*ny; for (int i = 0; i + 1 < nz && k_0 >= 0 && k_1 >= 0; i++, k_0 -= nx*ny, k_1 -= nx*ny) { data[k_0] += data[k_1] * alpha; } } }
5d8e75e1fd92865bb9fa4c3e39a9b3c39f5a1f1c.cu
#include <cuda.h> #include <cufft.h> #include <cuda_profiler_api.h> #include <stdio.h> template<typename T> __device__ __forceinline__ T ldg(const T* ptr) { #if __CUDA_ARCH__ >= 350 return __ldg(ptr); #else return *ptr; #endif } extern "C" __global__ void zSmooth( int nz , int ny , int nx , float alpha , float * data // data (in/out) ) { int kx = blockIdx.x*blockDim.x + threadIdx.x; int ky = blockIdx.y*blockDim.y + threadIdx.y; if (kx < nx && ky < ny) { int k_0 = nx*ky + kx; int k_1 = nx*ny + nx*ky + kx; for (int i = 0; i + 1 < nz; i++, k_0 += nx*ny, k_1 += nx*ny) { data[k_1] += data[k_0] * alpha; } k_0 -= nx*ny; k_1 -= nx*ny; for (int i = 0; i + 1 < nz && k_0 >= 0 && k_1 >= 0; i++, k_0 -= nx*ny, k_1 -= nx*ny) { data[k_0] += data[k_1] * alpha; } } }
8d6667efee51a9eebe73743abb0eddba46f22fe0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <time.h> __global__ void kernel(float *vec, float *mat, float *out, const int N, const int M) { int tid=threadIdx.x+blockIdx.x*blockDim.x; float sum=0; if(tid<M) { for(int i=0; i<N; i++) out[tid] += vec[i]*mat[(i*M)+tid]; } } __global__ void sum_mat(float* A,float* B,float* C,int rows,int cols) { int i=(blockDim.x*blockIdx.x)+threadIdx.x; if(i<(rows*cols)) A[i]=B[i]+C[i]; } __global__ void sum_mat_r(float* A,float* B,float* C,int rows,int cols) { int index_rows=(blockDim.x*blockIdx.x)+threadIdx.x; if(index_rows<rows) { for(unsigned int i=0;i<cols;++i) A[index_rows*cols+i]=B[index_rows*cols+i]+C[index_rows*cols+i]; } } __global__ void sum_mat_c(float* A,float* B,float* C,int rows,int cols) { int index_columns=(blockDim.x*blockIdx.x)+threadIdx.x; if(index_columns<cols) { for(unsigned int i=0;i<rows;++i) A[cols*i+index_columns]=B[cols*i+index_columns]+C[cols*i+index_columns]; } } // debuging functions void init_array(float *a, const int N); void init_mat(float *a, const int N, const int M); void print_array(float *a, const int N, char *d); void print_mat(float *a, const int N, const int M, char *d); int main (void) { srand( time(NULL) ); float *a, *b, *c; float *dev_a, *dev_b, *dev_c; int N=5; int M=7; a=(float*)malloc(sizeof(float)*N); b=(float*)malloc(sizeof(float)*N*M); c=(float*)malloc(sizeof(float)*M); init_array(a, N); init_mat(b, N, M); init_array(c, M); printf("<<<<<<<<<< initial data:\n"); print_array(a, N, "in-vector"); print_mat(b, N, M, "matrix"); print_array(c, M, "out-vector"); hipMalloc((void**)&dev_a, sizeof(float)*N); hipMalloc((void**)&dev_b, sizeof(float)*N*M); hipMalloc((void**)&dev_c, sizeof(float)*M); hipMemcpy(dev_a, a, sizeof(float)*N, hipMemcpyHostToDevice); hipMemcpy(dev_b, b, sizeof(float)*N*M, hipMemcpyHostToDevice); printf("\n\nRunning Kernel...\n\n"); hipLaunchKernelGGL(( kernel), dim3(M/256+1), dim3(256), 0, 0, dev_a, dev_b, dev_c, N, M); //printf("error code: %s\n",hipGetErrorString(hipGetLastError())); hipMemcpy(c, dev_c, sizeof(float)*M, hipMemcpyDeviceToHost); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); printf(">>>>>>>>>> final data:\n"); print_array(c, M, "out-vector"); return 0; }; void init_array(float *a, const int N) { int i; for(i=0; i<N; i++) a[i] = rand() % 4 + 1; } void init_mat(float *a, const int N, const int M) { int i, j; for(i=0; i<N; i++) for(j=0; j<M; j++) a[i*M+j] = rand() % 4 + 1; } void print_array(float *a, const int N, char *d) { int i; for(i=0; i<N; i++) printf("\n%s[%d]: %f",d, i, a[i]); printf("\n"); } void print_mat(float *a, const int N, const int M, char *d) { int i, j; for(i=0; i<N; i++){ printf("\n%s[%d]:", d, i); for (j=0; j<M; j++) printf("\t%6.4f", a[i*M+j]); } printf("\n"); }
8d6667efee51a9eebe73743abb0eddba46f22fe0.cu
#include <stdio.h> #include <cuda.h> #include <time.h> __global__ void kernel(float *vec, float *mat, float *out, const int N, const int M) { int tid=threadIdx.x+blockIdx.x*blockDim.x; float sum=0; if(tid<M) { for(int i=0; i<N; i++) out[tid] += vec[i]*mat[(i*M)+tid]; } } __global__ void sum_mat(float* A,float* B,float* C,int rows,int cols) { int i=(blockDim.x*blockIdx.x)+threadIdx.x; if(i<(rows*cols)) A[i]=B[i]+C[i]; } __global__ void sum_mat_r(float* A,float* B,float* C,int rows,int cols) { int index_rows=(blockDim.x*blockIdx.x)+threadIdx.x; if(index_rows<rows) { for(unsigned int i=0;i<cols;++i) A[index_rows*cols+i]=B[index_rows*cols+i]+C[index_rows*cols+i]; } } __global__ void sum_mat_c(float* A,float* B,float* C,int rows,int cols) { int index_columns=(blockDim.x*blockIdx.x)+threadIdx.x; if(index_columns<cols) { for(unsigned int i=0;i<rows;++i) A[cols*i+index_columns]=B[cols*i+index_columns]+C[cols*i+index_columns]; } } // debuging functions void init_array(float *a, const int N); void init_mat(float *a, const int N, const int M); void print_array(float *a, const int N, char *d); void print_mat(float *a, const int N, const int M, char *d); int main (void) { srand( time(NULL) ); float *a, *b, *c; float *dev_a, *dev_b, *dev_c; int N=5; int M=7; a=(float*)malloc(sizeof(float)*N); b=(float*)malloc(sizeof(float)*N*M); c=(float*)malloc(sizeof(float)*M); init_array(a, N); init_mat(b, N, M); init_array(c, M); printf("<<<<<<<<<< initial data:\n"); print_array(a, N, "in-vector"); print_mat(b, N, M, "matrix"); print_array(c, M, "out-vector"); cudaMalloc((void**)&dev_a, sizeof(float)*N); cudaMalloc((void**)&dev_b, sizeof(float)*N*M); cudaMalloc((void**)&dev_c, sizeof(float)*M); cudaMemcpy(dev_a, a, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, sizeof(float)*N*M, cudaMemcpyHostToDevice); printf("\n\nRunning Kernel...\n\n"); kernel<<<M/256+1, 256>>>(dev_a, dev_b, dev_c, N, M); //printf("error code: %s\n",cudaGetErrorString(cudaGetLastError())); cudaMemcpy(c, dev_c, sizeof(float)*M, cudaMemcpyDeviceToHost); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); printf(">>>>>>>>>> final data:\n"); print_array(c, M, "out-vector"); return 0; }; void init_array(float *a, const int N) { int i; for(i=0; i<N; i++) a[i] = rand() % 4 + 1; } void init_mat(float *a, const int N, const int M) { int i, j; for(i=0; i<N; i++) for(j=0; j<M; j++) a[i*M+j] = rand() % 4 + 1; } void print_array(float *a, const int N, char *d) { int i; for(i=0; i<N; i++) printf("\n%s[%d]: %f",d, i, a[i]); printf("\n"); } void print_mat(float *a, const int N, const int M, char *d) { int i, j; for(i=0; i<N; i++){ printf("\n%s[%d]:", d, i); for (j=0; j<M; j++) printf("\t%6.4f", a[i*M+j]); } printf("\n"); }
756b96cf0394c4152e64b1353b2575fee887ca40.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Distributed under MIT licence. See https://github.com/aniabrown/QuEST_GPU/blob/master/LICENCE.txt for details /** @file * An implementation of the backend in ../QuEST_internal.h for a GPU environment. */ # include "QuEST.h" # include "QuEST_precision.h" # include "QuEST_internal.h" // purely to resolve getQuESTDefaultSeedKey # include "mt19937ar.h" # include <stdlib.h> # include <stdio.h> # include <math.h> # define REDUCE_SHARED_SIZE 512 # define DEBUG 0 static __device__ int extractBit (int locationOfBitFromRight, long long int theEncodedNumber) { return (theEncodedNumber & ( 1LL << locationOfBitFromRight )) >> locationOfBitFromRight; } #ifdef __cplusplus extern "C" { #endif void statevec_setAmps(Qureg qureg, long long int startInd, qreal* reals, qreal* imags, long long int numAmps) { hipDeviceSynchronize(); hipMemcpy( qureg.deviceStateVec.real + startInd, reals, numAmps * sizeof(*(qureg.deviceStateVec.real)), hipMemcpyHostToDevice); hipMemcpy( qureg.deviceStateVec.imag + startInd, imags, numAmps * sizeof(*(qureg.deviceStateVec.real)), hipMemcpyHostToDevice); } /** works for both statevectors and density matrices */ void statevec_cloneQureg(Qureg targetQureg, Qureg copyQureg) { // copy copyQureg's GPU statevec to targetQureg's GPU statevec hipDeviceSynchronize(); hipMemcpy( targetQureg.deviceStateVec.real, copyQureg.deviceStateVec.real, targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.real)), hipMemcpyDeviceToDevice); hipMemcpy( targetQureg.deviceStateVec.imag, copyQureg.deviceStateVec.imag, targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.imag)), hipMemcpyDeviceToDevice); } __global__ void densmatr_initPureStateKernel( long long int numPureAmps, qreal *targetVecReal, qreal *targetVecImag, qreal *copyVecReal, qreal *copyVecImag) { // this is a particular index of the pure copyQureg long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=numPureAmps) return; qreal realRow = copyVecReal[index]; qreal imagRow = copyVecImag[index]; for (long long int col=0; col < numPureAmps; col++) { qreal realCol = copyVecReal[col]; qreal imagCol = - copyVecImag[col]; // minus for conjugation targetVecReal[col*numPureAmps + index] = realRow*realCol - imagRow*imagCol; targetVecImag[col*numPureAmps + index] = realRow*imagCol + imagRow*realCol; } } void densmatr_initPureState(Qureg targetQureg, Qureg copyQureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(copyQureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_initPureStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, copyQureg.numAmpsPerChunk, targetQureg.deviceStateVec.real, targetQureg.deviceStateVec.imag, copyQureg.deviceStateVec.real, copyQureg.deviceStateVec.imag); } __global__ void densmatr_initPlusStateKernel(long long int stateVecSize, qreal probFactor, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = probFactor; stateVecImag[index] = 0.0; } void densmatr_initPlusState(Qureg qureg) { qreal probFactor = 1.0/((qreal) (1LL << qureg.numQubitsRepresented)); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_initPlusStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, probFactor, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void densmatr_initClassicalStateKernel( long long int densityNumElems, qreal *densityReal, qreal *densityImag, long long int densityInd) { // initialise the state to all zeros long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= densityNumElems) return; densityReal[index] = 0.0; densityImag[index] = 0.0; if (index==densityInd){ // classical state has probability 1 densityReal[densityInd] = 1.0; densityImag[densityInd] = 0.0; } } void densmatr_initClassicalState(Qureg qureg, long long int stateInd) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); // index of the desired state in the flat density matrix long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int densityInd = (densityDim + 1)*stateInd; // identical to pure version hipLaunchKernelGGL(( densmatr_initClassicalStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, densityInd); } void statevec_createQureg(Qureg *qureg, int numQubits, QuESTEnv env) { // allocate CPU memory long long int numAmps = 1L << numQubits; long long int numAmpsPerRank = numAmps/env.numRanks; qureg->stateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.real)); qureg->stateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.imag)); if (env.numRanks>1){ qureg->pairStateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.real)); qureg->pairStateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.imag)); } // check cpu memory allocation was successful if ( (!(qureg->stateVec.real) || !(qureg->stateVec.imag)) && numAmpsPerRank ) { printf("Could not allocate memory!\n"); exit (EXIT_FAILURE); } if ( env.numRanks>1 && (!(qureg->pairStateVec.real) || !(qureg->pairStateVec.imag)) && numAmpsPerRank ) { printf("Could not allocate memory!\n"); exit (EXIT_FAILURE); } qureg->numQubitsInStateVec = numQubits; qureg->numAmpsPerChunk = numAmpsPerRank; qureg->numAmpsTotal = numAmps; qureg->chunkId = env.rank; qureg->numChunks = env.numRanks; qureg->isDensityMatrix = 0; // allocate GPU memory hipMalloc(&(qureg->deviceStateVec.real), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.real))); hipMalloc(&(qureg->deviceStateVec.imag), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.imag))); hipMalloc(&(qureg->firstLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)REDUCE_SHARED_SIZE)*sizeof(qreal)); hipMalloc(&(qureg->secondLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)(REDUCE_SHARED_SIZE*REDUCE_SHARED_SIZE))* sizeof(qreal)); // check gpu memory allocation was successful if (!(qureg->deviceStateVec.real) || !(qureg->deviceStateVec.imag)){ printf("Could not allocate memory on GPU!\n"); exit (EXIT_FAILURE); } } void statevec_destroyQureg(Qureg qureg, QuESTEnv env) { // Free CPU memory free(qureg.stateVec.real); free(qureg.stateVec.imag); if (env.numRanks>1){ free(qureg.pairStateVec.real); free(qureg.pairStateVec.imag); } // Free GPU memory hipFree(qureg.deviceStateVec.real); hipFree(qureg.deviceStateVec.imag); } int GPUExists(void){ int deviceCount, device; int gpuDeviceCount = 0; struct hipDeviceProp_t properties; hipError_t cudaResultCode = hipGetDeviceCount(&deviceCount); if (cudaResultCode != hipSuccess) deviceCount = 0; /* machines with no GPUs can still report one emulation device */ for (device = 0; device < deviceCount; ++device) { hipGetDeviceProperties(&properties, device); if (properties.major != 9999) { /* 9999 means emulation only */ ++gpuDeviceCount; } } if (gpuDeviceCount) return 1; else return 0; } QuESTEnv createQuESTEnv(void) { // init MPI environment if (!GPUExists()){ printf("Trying to run GPU code with no GPU available\n"); exit(EXIT_FAILURE); } QuESTEnv env; env.rank=0; env.numRanks=1; seedQuESTDefault(); return env; } void syncQuESTEnv(QuESTEnv env){ hipDeviceSynchronize(); } int syncQuESTSuccess(int successCode){ return successCode; } void destroyQuESTEnv(QuESTEnv env){ // MPI finalize goes here in MPI version. Call this function anyway for consistency } void reportQuESTEnv(QuESTEnv env){ printf("EXECUTION ENVIRONMENT:\n"); printf("Running locally on one node with GPU\n"); printf("Number of ranks is %d\n", env.numRanks); # ifdef _OPENMP printf("OpenMP enabled\n"); printf("Number of threads available is %d\n", omp_get_max_threads()); # else printf("OpenMP disabled\n"); # endif } void getEnvironmentString(QuESTEnv env, Qureg qureg, char str[200]){ sprintf(str, "%dqubits_GPU_noMpi_noOMP", qureg.numQubitsInStateVec); } void copyStateToGPU(Qureg qureg) { if (DEBUG) printf("Copying data to GPU\n"); hipMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), hipMemcpyHostToDevice); hipMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), hipMemcpyHostToDevice); hipMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyHostToDevice); hipMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyHostToDevice); if (DEBUG) printf("Finished copying data to GPU\n"); } void copyStateFromGPU(Qureg qureg) { hipDeviceSynchronize(); if (DEBUG) printf("Copying data from GPU\n"); hipMemcpy(qureg.stateVec.real, qureg.deviceStateVec.real, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), hipMemcpyDeviceToHost); hipMemcpy(qureg.stateVec.imag, qureg.deviceStateVec.imag, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyDeviceToHost); if (DEBUG) printf("Finished copying data from GPU\n"); } /** Print the current state vector of probability amplitudes for a set of qubits to standard out. For debugging purposes. Each rank should print output serially. Only print output for systems <= 5 qubits */ void statevec_reportStateToScreen(Qureg qureg, QuESTEnv env, int reportRank){ long long int index; int rank; copyStateFromGPU(qureg); if (qureg.numQubitsInStateVec<=5){ for (rank=0; rank<qureg.numChunks; rank++){ if (qureg.chunkId==rank){ if (reportRank) { printf("Reporting state from rank %d [\n", qureg.chunkId); //printf("\trank, index, real, imag\n"); printf("real, imag\n"); } else if (rank==0) { printf("Reporting state [\n"); printf("real, imag\n"); } for(index=0; index<qureg.numAmpsPerChunk; index++){ printf(REAL_STRING_FORMAT ", " REAL_STRING_FORMAT "\n", qureg.stateVec.real[index], qureg.stateVec.imag[index]); } if (reportRank || rank==qureg.numChunks-1) printf("]\n"); } syncQuESTEnv(env); } } } qreal statevec_getRealAmp(Qureg qureg, long long int index){ qreal el=0; hipMemcpy(&el, &(qureg.deviceStateVec.real[index]), sizeof(*(qureg.deviceStateVec.real)), hipMemcpyDeviceToHost); return el; } qreal statevec_getImagAmp(Qureg qureg, long long int index){ qreal el=0; hipMemcpy(&el, &(qureg.deviceStateVec.imag[index]), sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyDeviceToHost); return el; } __global__ void statevec_initZeroStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; // initialise the state to |0000..0000> index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; if (index==0){ // zero state |0000..0000> has probability 1 stateVecReal[0] = 1.0; stateVecImag[0] = 0.0; } } void statevec_initZeroState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_initZeroStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void statevec_initPlusStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qreal normFactor = 1.0/sqrt((qreal)stateVecSize); stateVecReal[index] = normFactor; stateVecImag[index] = 0.0; } void statevec_initPlusState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_initPlusStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void statevec_initClassicalStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, long long int stateInd){ long long int index; // initialise the state to |stateInd> index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; if (index==stateInd){ // classical state has probability 1 stateVecReal[stateInd] = 1.0; stateVecImag[stateInd] = 0.0; } } void statevec_initClassicalState(Qureg qureg, long long int stateInd) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_initClassicalStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, stateInd); } __global__ void statevec_initStateDebugKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = (index*2.0)/10.0; stateVecImag[index] = (index*2.0+1.0)/10.0; } void statevec_initStateDebug(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_initStateDebugKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void statevec_initStateOfSingleQubitKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, int qubitId, int outcome){ long long int index; int bit; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qreal normFactor = 1.0/sqrt((qreal)stateVecSize/2); bit = extractBit(qubitId, index); if (bit==outcome) { stateVecReal[index] = normFactor; stateVecImag[index] = 0.0; } else { stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; } } void statevec_initStateOfSingleQubit(Qureg *qureg, int qubitId, int outcome) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg->numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_initStateOfSingleQubitKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg->numAmpsPerChunk, qureg->deviceStateVec.real, qureg->deviceStateVec.imag, qubitId, outcome); } // returns 1 if successful, else 0 int statevec_initStateFromSingleFile(Qureg *qureg, char filename[200], QuESTEnv env){ long long int chunkSize, stateVecSize; long long int indexInChunk, totalIndex; chunkSize = qureg->numAmpsPerChunk; stateVecSize = chunkSize*qureg->numChunks; qreal *stateVecReal = qureg->stateVec.real; qreal *stateVecImag = qureg->stateVec.imag; FILE *fp; char line[200]; fp = fopen(filename, "r"); if (fp == NULL) return 0; indexInChunk = 0; totalIndex = 0; while (fgets(line, sizeof(char)*200, fp) != NULL && totalIndex<stateVecSize){ if (line[0]!='#'){ int chunkId = totalIndex/chunkSize; if (chunkId==qureg->chunkId){ # if QuEST_PREC==1 sscanf(line, "%f, %f", &(stateVecReal[indexInChunk]), &(stateVecImag[indexInChunk])); # elif QuEST_PREC==2 sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]), &(stateVecImag[indexInChunk])); # elif QuEST_PREC==4 sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]), &(stateVecImag[indexInChunk])); # endif indexInChunk += 1; } totalIndex += 1; } } fclose(fp); copyStateToGPU(*qureg); // indicate success return 1; } int statevec_compareStates(Qureg mq1, Qureg mq2, qreal precision){ qreal diff; int chunkSize = mq1.numAmpsPerChunk; copyStateFromGPU(mq1); copyStateFromGPU(mq2); for (int i=0; i<chunkSize; i++){ diff = mq1.stateVec.real[i] - mq2.stateVec.real[i]; if (diff<0) diff *= -1; if (diff>precision) return 0; diff = mq1.stateVec.imag[i] - mq2.stateVec.imag[i]; if (diff<0) diff *= -1; if (diff>precision) return 0; } return 1; } __global__ void statevec_compactUnitaryKernel (Qureg qureg, const int rotQubit, Complex alpha, Complex beta){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity const long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << rotQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; qreal alphaImag=alpha.imag, alphaReal=alpha.real; qreal betaImag=beta.imag, betaReal=beta.real; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo] stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp - betaReal*stateRealLo - betaImag*stateImagLo; stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp - betaReal*stateImagLo + betaImag*stateRealLo; // state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo] stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp + alphaReal*stateRealLo + alphaImag*stateImagLo; stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp + alphaReal*stateImagLo - alphaImag*stateRealLo; } void statevec_compactUnitary(Qureg qureg, const int targetQubit, Complex alpha, Complex beta) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_compactUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, alpha, beta); } __global__ void statevec_controlledCompactUnitaryKernel (Qureg qureg, const int controlQubit, const int targetQubit, Complex alpha, Complex beta){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity const long long int numTasks=qureg.numAmpsPerChunk>>1; int controlBit; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; qreal alphaImag=alpha.imag, alphaReal=alpha.real; qreal betaImag=beta.imag, betaReal=beta.real; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo] stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp - betaReal*stateRealLo - betaImag*stateImagLo; stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp - betaReal*stateImagLo + betaImag*stateRealLo; // state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo] stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp + alphaReal*stateRealLo + alphaImag*stateImagLo; stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp + alphaReal*stateImagLo - alphaImag*stateRealLo; } } void statevec_controlledCompactUnitary(Qureg qureg, const int controlQubit, const int targetQubit, Complex alpha, Complex beta) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledCompactUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, alpha, beta); } __global__ void statevec_unitaryKernel(Qureg qureg, const int targetQubit, ComplexMatrix2 u){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity const long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } void statevec_unitary(Qureg qureg, const int targetQubit, ComplexMatrix2 u) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_unitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, u); } __global__ void statevec_controlledUnitaryKernel(Qureg qureg, const int controlQubit, const int targetQubit, ComplexMatrix2 u){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity const long long int numTasks=qureg.numAmpsPerChunk>>1; int controlBit; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } } void statevec_controlledUnitary(Qureg qureg, const int controlQubit, const int targetQubit, ComplexMatrix2 u) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, u); } __global__ void statevec_multiControlledUnitaryKernel(Qureg qureg, long long int mask, const int targetQubit, ComplexMatrix2 u){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity const long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; if (mask == (mask & indexUp) ){ // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } } void statevec_multiControlledUnitary(Qureg qureg, int *controlQubits, int numControlQubits, const int targetQubit, ComplexMatrix2 u) { int threadsPerCUDABlock, CUDABlocks; long long int mask=0; for (int i=0; i<numControlQubits; i++) mask = mask | (1LL<<controlQubits[i]); threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_multiControlledUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask, targetQubit, u); } __global__ void statevec_pauliXKernel(Qureg qureg, const int targetQubit){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp, // storage for previous state values stateImagUp; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity const long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateVecReal[indexUp] = stateVecReal[indexLo]; stateVecImag[indexUp] = stateVecImag[indexLo]; stateVecReal[indexLo] = stateRealUp; stateVecImag[indexLo] = stateImagUp; } void statevec_pauliX(Qureg qureg, const int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_pauliXKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit); } __global__ void statevec_pauliYKernel(Qureg qureg, const int targetQubit, const int conjFac){ long long int sizeHalfBlock = 1LL << targetQubit; long long int sizeBlock = 2LL * sizeHalfBlock; long long int numTasks = qureg.numAmpsPerChunk >> 1; long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; long long int thisBlock = thisTask / sizeHalfBlock; long long int indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; long long int indexLo = indexUp + sizeHalfBlock; qreal stateRealUp, stateImagUp; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; // update under +-{{0, -i}, {i, 0}} stateVecReal[indexUp] = conjFac * stateVecImag[indexLo]; stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo]; stateVecReal[indexLo] = conjFac * -stateImagUp; stateVecImag[indexLo] = conjFac * stateRealUp; } void statevec_pauliY(Qureg qureg, const int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_pauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, 1); } void statevec_pauliYConj(Qureg qureg, const int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_pauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, -1); } __global__ void statevec_controlledPauliYKernel(Qureg qureg, const int controlQubit, const int targetQubit, const int conjFac) { long long int index; long long int sizeBlock, sizeHalfBlock; long long int stateVecSize; int controlBit; qreal stateRealUp, stateImagUp; long long int thisBlock, indexUp, indexLo; sizeHalfBlock = 1LL << targetQubit; sizeBlock = 2LL * sizeHalfBlock; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=(stateVecSize>>1)) return; thisBlock = index / sizeHalfBlock; indexUp = thisBlock*sizeBlock + index%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; // update under +-{{0, -i}, {i, 0}} stateVecReal[indexUp] = conjFac * stateVecImag[indexLo]; stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo]; stateVecReal[indexLo] = conjFac * -stateImagUp; stateVecImag[indexLo] = conjFac * stateRealUp; } } void statevec_controlledPauliY(Qureg qureg, const int controlQubit, const int targetQubit) { int conjFactor = 1; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledPauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, conjFactor); } void statevec_controlledPauliYConj(Qureg qureg, const int controlQubit, const int targetQubit) { int conjFactor = -1; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledPauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, conjFactor); } __global__ void statevec_phaseShiftByTermKernel(Qureg qureg, const int targetQubit, qreal cosAngle, qreal sinAngle) { long long int sizeBlock, sizeHalfBlock; long long int thisBlock, indexUp,indexLo; qreal stateRealLo, stateImagLo; long long int thisTask; const long long int numTasks = qureg.numAmpsPerChunk >> 1; sizeHalfBlock = 1LL << targetQubit; sizeBlock = 2LL * sizeHalfBlock; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; stateVecReal[indexLo] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[indexLo] = sinAngle*stateRealLo + cosAngle*stateImagLo; } void statevec_phaseShiftByTerm(Qureg qureg, const int targetQubit, Complex term) { qreal cosAngle = term.real; qreal sinAngle = term.imag; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_phaseShiftByTermKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, cosAngle, sinAngle); } __global__ void statevec_controlledPhaseShiftKernel(Qureg qureg, const int idQubit1, const int idQubit2, qreal cosAngle, qreal sinAngle) { long long int index; long long int stateVecSize; int bit1, bit2; qreal stateRealLo, stateImagLo; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; bit1 = extractBit (idQubit1, index); bit2 = extractBit (idQubit2, index); if (bit1 && bit2) { stateRealLo = stateVecReal[index]; stateImagLo = stateVecImag[index]; stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo; } } void statevec_controlledPhaseShift(Qureg qureg, const int idQubit1, const int idQubit2, qreal angle) { qreal cosAngle = cos(angle); qreal sinAngle = sin(angle); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledPhaseShiftKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, idQubit1, idQubit2, cosAngle, sinAngle); } __global__ void statevec_multiControlledPhaseShiftKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) { qreal stateRealLo, stateImagLo; long long int index; long long int stateVecSize; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; if (mask == (mask & index) ){ stateRealLo = stateVecReal[index]; stateImagLo = stateVecImag[index]; stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo; } } void statevec_multiControlledPhaseShift(Qureg qureg, int *controlQubits, int numControlQubits, qreal angle) { qreal cosAngle = cos(angle); qreal sinAngle = sin(angle); long long int mask=0; for (int i=0; i<numControlQubits; i++) mask = mask | (1LL<<controlQubits[i]); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_multiControlledPhaseShiftKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask, cosAngle, sinAngle); } qreal densmatr_calcTotalProb(Qureg qureg) { // computes the trace using Kahan summation qreal pTotal=0; qreal y, t, c; c = 0; long long int numCols = 1LL << qureg.numQubitsRepresented; long long diagIndex; copyStateFromGPU(qureg); for (int col=0; col< numCols; col++) { diagIndex = col*(numCols + 1); y = qureg.stateVec.real[diagIndex] - c; t = pTotal + y; c = ( t - pTotal ) - y; // brackets are important pTotal = t; } return pTotal; } qreal statevec_calcTotalProb(Qureg qureg){ /* IJB - implemented using Kahan summation for greater accuracy at a slight floating point operation overhead. For more details see https://en.wikipedia.org/wiki/Kahan_summation_algorithm */ /* Don't change the bracketing in this routine! */ qreal pTotal=0; qreal y, t, c; long long int index; long long int numAmpsPerRank = qureg.numAmpsPerChunk; copyStateFromGPU(qureg); c = 0.0; for (index=0; index<numAmpsPerRank; index++){ /* Perform pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; by Kahan */ // pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; y = qureg.stateVec.real[index]*qureg.stateVec.real[index] - c; t = pTotal + y; c = ( t - pTotal ) - y; pTotal = t; /* Perform pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; by Kahan */ //pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; y = qureg.stateVec.imag[index]*qureg.stateVec.imag[index] - c; t = pTotal + y; c = ( t - pTotal ) - y; pTotal = t; } return pTotal; } __global__ void statevec_controlledPhaseFlipKernel(Qureg qureg, const int idQubit1, const int idQubit2) { long long int index; long long int stateVecSize; int bit1, bit2; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; bit1 = extractBit (idQubit1, index); bit2 = extractBit (idQubit2, index); if (bit1 && bit2) { stateVecReal [index] = - stateVecReal [index]; stateVecImag [index] = - stateVecImag [index]; } } void statevec_controlledPhaseFlip(Qureg qureg, const int idQubit1, const int idQubit2) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledPhaseFlipKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, idQubit1, idQubit2); } __global__ void statevec_multiControlledPhaseFlipKernel(Qureg qureg, long long int mask) { long long int index; long long int stateVecSize; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; if (mask == (mask & index) ){ stateVecReal [index] = - stateVecReal [index]; stateVecImag [index] = - stateVecImag [index]; } } void statevec_multiControlledPhaseFlip(Qureg qureg, int *controlQubits, int numControlQubits) { int threadsPerCUDABlock, CUDABlocks; long long int mask=0; for (int i=0; i<numControlQubits; i++) mask = mask | (1LL<<controlQubits[i]); threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_multiControlledPhaseFlipKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask); } __global__ void statevec_hadamardKernel (Qureg qureg, const int targetQubit){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity const long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; qreal recRoot2 = 1.0/sqrt(2.0); thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; stateVecReal[indexUp] = recRoot2*(stateRealUp + stateRealLo); stateVecImag[indexUp] = recRoot2*(stateImagUp + stateImagLo); stateVecReal[indexLo] = recRoot2*(stateRealUp - stateRealLo); stateVecImag[indexLo] = recRoot2*(stateImagUp - stateImagLo); } void statevec_hadamard(Qureg qureg, const int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_hadamardKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit); } __global__ void statevec_controlledNotKernel(Qureg qureg, const int controlQubit, const int targetQubit) { long long int index; long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved long long int stateVecSize; int controlBit; // ----- temp variables qreal stateRealUp, // storage for previous state values stateImagUp; // (used in updates) long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=(stateVecSize>>1)) return; thisBlock = index / sizeHalfBlock; indexUp = thisBlock*sizeBlock + index%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateVecReal[indexUp] = stateVecReal[indexLo]; stateVecImag[indexUp] = stateVecImag[indexLo]; stateVecReal[indexLo] = stateRealUp; stateVecImag[indexLo] = stateImagUp; } } void statevec_controlledNot(Qureg qureg, const int controlQubit, const int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledNotKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit); } __device__ __host__ unsigned int log2Int( unsigned int x ) { unsigned int ans = 0 ; while( x>>=1 ) ans++; return ans ; } __device__ void reduceBlock(qreal *arrayIn, qreal *reducedArray, int length){ int i, l, r; int threadMax, maxDepth; threadMax = length/2; maxDepth = log2Int(length/2); for (i=0; i<maxDepth+1; i++){ if (threadIdx.x<threadMax){ l = threadIdx.x; r = l + threadMax; arrayIn[l] = arrayIn[r] + arrayIn[l]; } threadMax = threadMax >> 1; __syncthreads(); // optimise -- use warp shuffle instead } if (threadIdx.x==0) reducedArray[blockIdx.x] = arrayIn[0]; } __global__ void copySharedReduceBlock(qreal*arrayIn, qreal *reducedArray, int length){ extern __shared__ qreal tempReductionArray[]; int blockOffset = blockIdx.x*length; tempReductionArray[threadIdx.x*2] = arrayIn[blockOffset + threadIdx.x*2]; tempReductionArray[threadIdx.x*2+1] = arrayIn[blockOffset + threadIdx.x*2+1]; __syncthreads(); reduceBlock(tempReductionArray, reducedArray, length); } __global__ void densmatr_findProbabilityOfZeroKernel( Qureg qureg, const int measureQubit, qreal *reducedArray ) { // run by each thread // use of block here refers to contiguous amplitudes where measureQubit = 0, // (then =1) and NOT the CUDA block, which is the partitioning of CUDA threads long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numTasks = densityDim >> 1; long long int sizeHalfBlock = 1LL << (measureQubit); long long int sizeBlock = 2LL * sizeHalfBlock; long long int thisBlock; // which block this thread is processing long long int thisTask; // which part of the block this thread is processing long long int basisIndex; // index of this thread's computational basis state long long int densityIndex; // " " index of |basis><basis| in the flat density matrix // array of each thread's collected probability, to be summed extern __shared__ qreal tempReductionArray[]; // figure out which density matrix prob that this thread is assigned thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; basisIndex = thisBlock*sizeBlock + thisTask%sizeHalfBlock; densityIndex = (densityDim + 1) * basisIndex; // record the probability in the CUDA-BLOCK-wide array qreal prob = qureg.deviceStateVec.real[densityIndex]; // im[densityIndex] assumed ~ 0 tempReductionArray[threadIdx.x] = prob; // sum the probs collected by this CUDA-BLOCK's threads into a per-CUDA-BLOCK array __syncthreads(); if (threadIdx.x<blockDim.x/2){ reduceBlock(tempReductionArray, reducedArray, blockDim.x); } } __global__ void statevec_findProbabilityOfZeroKernel( Qureg qureg, const int measureQubit, qreal *reducedArray ) { // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block index; // current index for first half block // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; // (good for shared memory parallelism) extern __shared__ qreal tempReductionArray[]; // ---------------------------------------------------------------- // // dimensions // // ---------------------------------------------------------------- // sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum, // and then the number to skip sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries) // ---------------------------------------------------------------- // // find probability // // ---------------------------------------------------------------- // // // --- task-based shared-memory parallel implementation // qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; index = thisBlock*sizeBlock + thisTask%sizeHalfBlock; qreal realVal, imagVal; realVal = stateVecReal[index]; imagVal = stateVecImag[index]; tempReductionArray[threadIdx.x] = realVal*realVal + imagVal*imagVal; __syncthreads(); if (threadIdx.x<blockDim.x/2){ reduceBlock(tempReductionArray, reducedArray, blockDim.x); } } int getNumReductionLevels(long long int numValuesToReduce, int numReducedPerLevel){ int levels=0; while (numValuesToReduce){ numValuesToReduce = numValuesToReduce/numReducedPerLevel; levels++; } return levels; } void swapDouble(qreal **a, qreal **b){ qreal *temp; temp = *a; *a = *b; *b = temp; } qreal densmatr_findProbabilityOfZero(Qureg qureg, const int measureQubit) { long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numValuesToReduce = densityDim >> 1; // half of the diagonal has measureQubit=0 int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block if (firstTime) { hipLaunchKernelGGL(( densmatr_findProbabilityOfZeroKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, qureg, measureQubit, qureg.firstLevelReduction); firstTime = 0; // sum the block probs } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal zeroProb; hipMemcpy(&zeroProb, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); return zeroProb; } qreal statevec_findProbabilityOfZero(Qureg qureg, const int measureQubit) { long long int numValuesToReduce = qureg.numAmpsPerChunk>>1; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; qreal stateProb=0; int firstTime=1; int maxReducedPerLevel = REDUCE_SHARED_SIZE; while(numValuesToReduce>1){ if (numValuesToReduce<maxReducedPerLevel){ // Need less than one CUDA block to reduce values valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { // Use full CUDA blocks, with block size constrained by shared mem usage valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime){ hipLaunchKernelGGL(( statevec_findProbabilityOfZeroKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, qureg, measureQubit, qureg.firstLevelReduction); firstTime=0; } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } hipMemcpy(&stateProb, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); return stateProb; } qreal statevec_calcProbOfOutcome(Qureg qureg, const int measureQubit, int outcome) { qreal outcomeProb = statevec_findProbabilityOfZero(qureg, measureQubit); if (outcome==1) outcomeProb = 1.0 - outcomeProb; return outcomeProb; } qreal densmatr_calcProbOfOutcome(Qureg qureg, const int measureQubit, int outcome) { qreal outcomeProb = densmatr_findProbabilityOfZero(qureg, measureQubit); if (outcome==1) outcomeProb = 1.0 - outcomeProb; return outcomeProb; } /** computes either a real or imag term in the inner product */ __global__ void statevec_calcInnerProductKernel( int getRealComp, qreal* vecReal1, qreal* vecImag1, qreal* vecReal2, qreal* vecImag2, long long int numTermsToSum, qreal* reducedArray) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numTermsToSum) return; // choose whether to calculate the real or imaginary term of the inner product qreal innerProdTerm; if (getRealComp) innerProdTerm = vecReal1[index]*vecReal2[index] + vecImag1[index]*vecImag2[index]; else innerProdTerm = vecReal1[index]*vecImag2[index] - vecImag1[index]*vecReal2[index]; // array of each thread's collected probability, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = innerProdTerm; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } /** Terrible code which unnecessarily individually computes and sums the real and imaginary components of the * inner product, so as to not have to worry about keeping the sums separated during reduction. * Truly disgusting, probably doubles runtime, please fix. * @TODO could even do the kernel twice, storing real in bra.reduc and imag in ket.reduc? */ Complex statevec_calcInnerProduct(Qureg bra, Qureg ket) { qreal innerProdReal, innerProdImag; int getRealComp; long long int numValuesToReduce; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel; int firstTime; // compute real component of inner product getRealComp = 1; numValuesToReduce = bra.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { hipLaunchKernelGGL(( statevec_calcInnerProductKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, getRealComp, bra.deviceStateVec.real, bra.deviceStateVec.imag, ket.deviceStateVec.real, ket.deviceStateVec.imag, numValuesToReduce, bra.firstLevelReduction); firstTime = 0; } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, bra.firstLevelReduction, bra.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } hipMemcpy(&innerProdReal, bra.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); // compute imag component of inner product getRealComp = 0; numValuesToReduce = bra.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { hipLaunchKernelGGL(( statevec_calcInnerProductKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, getRealComp, bra.deviceStateVec.real, bra.deviceStateVec.imag, ket.deviceStateVec.real, ket.deviceStateVec.imag, numValuesToReduce, bra.firstLevelReduction); firstTime = 0; } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, bra.firstLevelReduction, bra.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } hipMemcpy(&innerProdImag, bra.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); // return complex Complex innerProd; innerProd.real = innerProdReal; innerProd.imag = innerProdImag; return innerProd; } /** computes one term of (vec^*T) dens * vec */ __global__ void densmatr_calcFidelityKernel(Qureg dens, Qureg vec, long long int dim, qreal* reducedArray) { // figure out which density matrix row to consider long long int col; long long int row = blockIdx.x*blockDim.x + threadIdx.x; if (row >= dim) return; qreal* densReal = dens.deviceStateVec.real; qreal* densImag = dens.deviceStateVec.imag; qreal* vecReal = vec.deviceStateVec.real; qreal* vecImag = vec.deviceStateVec.imag; // compute the row-th element of the product dens*vec qreal prodReal = 0; qreal prodImag = 0; for (col=0LL; col < dim; col++) { qreal densElemReal = densReal[dim*col + row]; qreal densElemImag = densImag[dim*col + row]; prodReal += densElemReal*vecReal[col] - densElemImag*vecImag[col]; prodImag += densElemReal*vecImag[col] + densElemImag*vecReal[col]; } // multiply with row-th elem of (vec^*) qreal termReal = prodImag*vecImag[row] + prodReal*vecReal[row]; // imag of every term should be zero, because each is a valid fidelity calc of an eigenstate //qreal termImag = prodImag*vecReal[row] - prodReal*vecImag[row]; extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = termReal; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } // @TODO implement qreal densmatr_calcFidelity(Qureg qureg, Qureg pureState) { // we're summing the square of every term in the density matrix long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numValuesToReduce = densityDim; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block // store the reduction in the pureState array if (firstTime) { hipLaunchKernelGGL(( densmatr_calcFidelityKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, qureg, pureState, densityDim, pureState.firstLevelReduction); firstTime = 0; // sum the block probs } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, pureState.firstLevelReduction, pureState.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(pureState.firstLevelReduction), &(pureState.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal fidelity; hipMemcpy(&fidelity, pureState.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); return fidelity; } __global__ void densmatr_calcPurityKernel(qreal* vecReal, qreal* vecImag, long long int numAmpsToSum, qreal *reducedArray) { // figure out which density matrix term this thread is assigned long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numAmpsToSum) return; qreal term = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index]; // array of each thread's collected probability, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = term; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } /** Computes the trace of the density matrix squared */ qreal densmatr_calcPurity(Qureg qureg) { // we're summing the square of every term in the density matrix long long int numValuesToReduce = qureg.numAmpsPerChunk; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block if (firstTime) { hipLaunchKernelGGL(( densmatr_calcPurityKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; // sum the block probs } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal traceDensSquared; hipMemcpy(&traceDensSquared, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); return traceDensSquared; } __global__ void statevec_collapseToKnownProbOutcomeKernel(Qureg qureg, int measureQubit, int outcome, qreal totalProbability) { // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block index; // current index for first half block // ----- measured probability qreal renorm; // probability (returned) value // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity // (good for shared memory parallelism) long long int numTasks=qureg.numAmpsPerChunk>>1; // ---------------------------------------------------------------- // // dimensions // // ---------------------------------------------------------------- // sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum, // and then the number to skip sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries) // ---------------------------------------------------------------- // // find probability // // ---------------------------------------------------------------- // // // --- task-based shared-memory parallel implementation // renorm=1/sqrt(totalProbability); qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; index = thisBlock*sizeBlock + thisTask%sizeHalfBlock; if (outcome==0){ stateVecReal[index]=stateVecReal[index]*renorm; stateVecImag[index]=stateVecImag[index]*renorm; stateVecReal[index+sizeHalfBlock]=0; stateVecImag[index+sizeHalfBlock]=0; } else if (outcome==1){ stateVecReal[index]=0; stateVecImag[index]=0; stateVecReal[index+sizeHalfBlock]=stateVecReal[index+sizeHalfBlock]*renorm; stateVecImag[index+sizeHalfBlock]=stateVecImag[index+sizeHalfBlock]*renorm; } } /* * outcomeProb must accurately be the probability of that qubit outcome in the state-vector, or * else the state-vector will lose normalisation */ void statevec_collapseToKnownProbOutcome(Qureg qureg, const int measureQubit, int outcome, qreal outcomeProb) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_collapseToKnownProbOutcomeKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, measureQubit, outcome, outcomeProb); } /** Maps thread ID to a |..0..><..0..| state and then locates |0><1|, |1><0| and |1><1| */ __global__ void densmatr_collapseToKnownProbOutcomeKernel( qreal outcomeProb, qreal* vecReal, qreal *vecImag, long long int numBasesToVisit, long long int part1, long long int part2, long long int part3, long long int rowBit, long long int colBit, long long int desired, long long int undesired) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numBasesToVisit) return; long long int base = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); // renormalise desired outcome vecReal[base + desired] /= outcomeProb; vecImag[base + desired] /= outcomeProb; // kill undesired outcome vecReal[base + undesired] = 0; vecImag[base + undesired] = 0; // kill |..0..><..1..| states vecReal[base + colBit] = 0; vecImag[base + colBit] = 0; vecReal[base + rowBit] = 0; vecImag[base + rowBit] = 0; } /** This involves finding |...i...><...j...| states and killing those where i!=j */ void densmatr_collapseToKnownProbOutcome(Qureg qureg, const int measureQubit, int outcome, qreal outcomeProb) { int rowQubit = measureQubit + qureg.numQubitsRepresented; int colBit = 1LL << measureQubit; int rowBit = 1LL << rowQubit; long long int numBasesToVisit = qureg.numAmpsPerChunk/4; long long int part1 = colBit -1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numBasesToVisit - (rowBit >> 1); long long int desired, undesired; if (outcome == 0) { desired = 0; undesired = colBit | rowBit; } else { desired = colBit | rowBit; undesired = 0; } int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numBasesToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_collapseToKnownProbOutcomeKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, outcomeProb, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBasesToVisit, part1, part2, part3, rowBit, colBit, desired, undesired); } __global__ void densmatr_addDensityMatrixKernel(Qureg combineQureg, qreal otherProb, Qureg otherQureg, long long int numAmpsToVisit) { long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x; if (ampInd >= numAmpsToVisit) return; combineQureg.deviceStateVec.real[ampInd] *= 1-otherProb; combineQureg.deviceStateVec.imag[ampInd] *= 1-otherProb; combineQureg.deviceStateVec.real[ampInd] += otherProb*otherQureg.deviceStateVec.real[ampInd]; combineQureg.deviceStateVec.imag[ampInd] += otherProb*otherQureg.deviceStateVec.imag[ampInd]; } void densmatr_addDensityMatrix(Qureg combineQureg, qreal otherProb, Qureg otherQureg) { long long int numAmpsToVisit = combineQureg.numAmpsPerChunk; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_addDensityMatrixKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, combineQureg, otherProb, otherQureg, numAmpsToVisit ); } /** Called once for every 4 amplitudes in density matrix * Works by establishing the |..0..><..0..| state (for its given index) then * visiting |..1..><..0..| and |..0..><..1..|. Labels |part1 X pa><rt2 NOT(X) part3| * From the brain of Simon Benjamin */ __global__ void densmatr_oneQubitDephaseKernel( qreal fac, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int colBit, long long int rowBit) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; long long int ampInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); vecReal[ampInd + colBit] *= fac; vecImag[ampInd + colBit] *= fac; vecReal[ampInd + rowBit] *= fac; vecImag[ampInd + rowBit] *= fac; } void densmatr_oneQubitDephase(Qureg qureg, const int targetQubit, qreal dephase) { if (dephase == 0) return; long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; int rowQubit = targetQubit + qureg.numQubitsRepresented; long long int colBit = 1LL << targetQubit; long long int rowBit = 1LL << rowQubit; long long int part1 = colBit - 1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numAmpsToVisit - (rowBit >> 1); qreal dephFac = 1 - dephase; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_oneQubitDephaseKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit, part1, part2, part3, colBit, rowBit); } /** Called 12 times for every 16 amplitudes in density matrix * Each sums from the |..0..0..><..0..0..| index to visit either * |..0..0..><..0..1..|, |..0..0..><..1..0..|, |..0..0..><..1..1..|, |..0..1..><..0..0..| * etc and so on to |..1..1..><..1..0|. Labels |part1 0 part2 0 par><t3 0 part4 0 part5|. * From the brain of Simon Benjamin */ __global__ void densmatr_twoQubitDephaseKernel( qreal fac, qreal* vecReal, qreal *vecImag, long long int numBackgroundStates, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int part4, long long int part5, long long int colBit1, long long int rowBit1, long long int colBit2, long long int rowBit2) { long long int outerInd = blockIdx.x*blockDim.x + threadIdx.x; if (outerInd >= numAmpsToVisit) return; // sets meta in 1...14 excluding 5, 10, creating bit string DCBA for |..D..C..><..B..A| int meta = 1 + (outerInd/numBackgroundStates); if (meta > 4) meta++; if (meta > 9) meta++; long long int shift = rowBit2*((meta>>3)%2) + rowBit1*((meta>>2)%2) + colBit2*((meta>>1)%2) + colBit1*(meta%2); long long int scanInd = outerInd % numBackgroundStates; long long int stateInd = ( shift + (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4)); vecReal[stateInd] *= fac; vecImag[stateInd] *= fac; } // @TODO is separating these 12 amplitudes really faster than letting every 16th base modify 12 elems? void densmatr_twoQubitDephase(Qureg qureg, int qubit1, int qubit2, qreal dephase) { if (dephase == 0) return; // assumes qubit2 > qubit1 int rowQubit1 = qubit1 + qureg.numQubitsRepresented; int rowQubit2 = qubit2 + qureg.numQubitsRepresented; long long int colBit1 = 1LL << qubit1; long long int rowBit1 = 1LL << rowQubit1; long long int colBit2 = 1LL << qubit2; long long int rowBit2 = 1LL << rowQubit2; long long int part1 = colBit1 - 1; long long int part2 = (colBit2 >> 1) - colBit1; long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1); long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2); long long int part5 = (qureg.numAmpsPerChunk/16) - (rowBit2 >> 3); qreal dephFac = 1 - dephase; // refers to states |a 0 b 0 c><d 0 e 0 f| (target qubits are fixed) long long int numBackgroundStates = qureg.numAmpsPerChunk/16; // 12 of these states experience dephasing long long int numAmpsToVisit = 12 * numBackgroundStates; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_twoQubitDephaseKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBackgroundStates, numAmpsToVisit, part1, part2, part3, part4, part5, colBit1, rowBit1, colBit2, rowBit2); } /** Works like oneQubitDephase but modifies every other element, and elements are averaged in pairs */ __global__ void densmatr_oneQubitDepolariseKernel( qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int bothBits) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); long long int targetInd = baseInd + bothBits; qreal realAvDepol = depolLevel * 0.5 * (vecReal[baseInd] + vecReal[targetInd]); qreal imagAvDepol = depolLevel * 0.5 * (vecImag[baseInd] + vecImag[targetInd]); vecReal[baseInd] *= 1 - depolLevel; vecImag[baseInd] *= 1 - depolLevel; vecReal[targetInd] *= 1 - depolLevel; vecImag[targetInd] *= 1 - depolLevel; vecReal[baseInd] += realAvDepol; vecImag[baseInd] += imagAvDepol; vecReal[targetInd] += realAvDepol; vecImag[targetInd] += imagAvDepol; } void densmatr_oneQubitDepolarise(Qureg qureg, const int targetQubit, qreal depolLevel) { if (depolLevel == 0) return; densmatr_oneQubitDephase(qureg, targetQubit, depolLevel); long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; int rowQubit = targetQubit + qureg.numQubitsRepresented; long long int colBit = 1LL << targetQubit; long long int rowBit = 1LL << rowQubit; long long int bothBits = colBit | rowBit; long long int part1 = colBit - 1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numAmpsToVisit - (rowBit >> 1); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_oneQubitDepolariseKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit, part1, part2, part3, bothBits); } /** Called once for every 16 amplitudes */ __global__ void densmatr_twoQubitDepolariseKernel( qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int part4, long long int part5, long long int rowCol1, long long int rowCol2) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; // index of |..0..0..><..0..0| long long int ind00 = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4); long long int ind01 = ind00 + rowCol1; long long int ind10 = ind00 + rowCol2; long long int ind11 = ind00 + rowCol1 + rowCol2; qreal realAvDepol = depolLevel * 0.25 * ( vecReal[ind00] + vecReal[ind01] + vecReal[ind10] + vecReal[ind11]); qreal imagAvDepol = depolLevel * 0.25 * ( vecImag[ind00] + vecImag[ind01] + vecImag[ind10] + vecImag[ind11]); qreal retain = 1 - depolLevel; vecReal[ind00] *= retain; vecImag[ind00] *= retain; vecReal[ind01] *= retain; vecImag[ind01] *= retain; vecReal[ind10] *= retain; vecImag[ind10] *= retain; vecReal[ind11] *= retain; vecImag[ind11] *= retain; vecReal[ind00] += realAvDepol; vecImag[ind00] += imagAvDepol; vecReal[ind01] += realAvDepol; vecImag[ind01] += imagAvDepol; vecReal[ind10] += realAvDepol; vecImag[ind10] += imagAvDepol; vecReal[ind11] += realAvDepol; vecImag[ind11] += imagAvDepol; } void densmatr_twoQubitDepolarise(Qureg qureg, int qubit1, int qubit2, qreal depolLevel) { if (depolLevel == 0) return; // assumes qubit2 > qubit1 densmatr_twoQubitDephase(qureg, qubit1, qubit2, depolLevel); int rowQubit1 = qubit1 + qureg.numQubitsRepresented; int rowQubit2 = qubit2 + qureg.numQubitsRepresented; long long int colBit1 = 1LL << qubit1; long long int rowBit1 = 1LL << rowQubit1; long long int colBit2 = 1LL << qubit2; long long int rowBit2 = 1LL << rowQubit2; long long int rowCol1 = colBit1 | rowBit1; long long int rowCol2 = colBit2 | rowBit2; long long int numAmpsToVisit = qureg.numAmpsPerChunk/16; long long int part1 = colBit1 - 1; long long int part2 = (colBit2 >> 1) - colBit1; long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1); long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2); long long int part5 = numAmpsToVisit - (rowBit2 >> 3); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_twoQubitDepolariseKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit, part1, part2, part3, part4, part5, rowCol1, rowCol2); } void seedQuESTDefault(){ // init MT random number generator with three keys -- time, pid and a hash of hostname // for the MPI version, it is ok that all procs will get the same seed as random numbers will only be // used by the master process unsigned long int key[3]; getQuESTDefaultSeedKey(key); init_by_array(key, 3); } #ifdef __cplusplus } #endif
756b96cf0394c4152e64b1353b2575fee887ca40.cu
// Distributed under MIT licence. See https://github.com/aniabrown/QuEST_GPU/blob/master/LICENCE.txt for details /** @file * An implementation of the backend in ../QuEST_internal.h for a GPU environment. */ # include "QuEST.h" # include "QuEST_precision.h" # include "QuEST_internal.h" // purely to resolve getQuESTDefaultSeedKey # include "mt19937ar.h" # include <stdlib.h> # include <stdio.h> # include <math.h> # define REDUCE_SHARED_SIZE 512 # define DEBUG 0 static __device__ int extractBit (int locationOfBitFromRight, long long int theEncodedNumber) { return (theEncodedNumber & ( 1LL << locationOfBitFromRight )) >> locationOfBitFromRight; } #ifdef __cplusplus extern "C" { #endif void statevec_setAmps(Qureg qureg, long long int startInd, qreal* reals, qreal* imags, long long int numAmps) { cudaDeviceSynchronize(); cudaMemcpy( qureg.deviceStateVec.real + startInd, reals, numAmps * sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyHostToDevice); cudaMemcpy( qureg.deviceStateVec.imag + startInd, imags, numAmps * sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyHostToDevice); } /** works for both statevectors and density matrices */ void statevec_cloneQureg(Qureg targetQureg, Qureg copyQureg) { // copy copyQureg's GPU statevec to targetQureg's GPU statevec cudaDeviceSynchronize(); cudaMemcpy( targetQureg.deviceStateVec.real, copyQureg.deviceStateVec.real, targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.real)), cudaMemcpyDeviceToDevice); cudaMemcpy( targetQureg.deviceStateVec.imag, copyQureg.deviceStateVec.imag, targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.imag)), cudaMemcpyDeviceToDevice); } __global__ void densmatr_initPureStateKernel( long long int numPureAmps, qreal *targetVecReal, qreal *targetVecImag, qreal *copyVecReal, qreal *copyVecImag) { // this is a particular index of the pure copyQureg long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=numPureAmps) return; qreal realRow = copyVecReal[index]; qreal imagRow = copyVecImag[index]; for (long long int col=0; col < numPureAmps; col++) { qreal realCol = copyVecReal[col]; qreal imagCol = - copyVecImag[col]; // minus for conjugation targetVecReal[col*numPureAmps + index] = realRow*realCol - imagRow*imagCol; targetVecImag[col*numPureAmps + index] = realRow*imagCol + imagRow*realCol; } } void densmatr_initPureState(Qureg targetQureg, Qureg copyQureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(copyQureg.numAmpsPerChunk)/threadsPerCUDABlock); densmatr_initPureStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( copyQureg.numAmpsPerChunk, targetQureg.deviceStateVec.real, targetQureg.deviceStateVec.imag, copyQureg.deviceStateVec.real, copyQureg.deviceStateVec.imag); } __global__ void densmatr_initPlusStateKernel(long long int stateVecSize, qreal probFactor, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = probFactor; stateVecImag[index] = 0.0; } void densmatr_initPlusState(Qureg qureg) { qreal probFactor = 1.0/((qreal) (1LL << qureg.numQubitsRepresented)); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); densmatr_initPlusStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, probFactor, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void densmatr_initClassicalStateKernel( long long int densityNumElems, qreal *densityReal, qreal *densityImag, long long int densityInd) { // initialise the state to all zeros long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= densityNumElems) return; densityReal[index] = 0.0; densityImag[index] = 0.0; if (index==densityInd){ // classical state has probability 1 densityReal[densityInd] = 1.0; densityImag[densityInd] = 0.0; } } void densmatr_initClassicalState(Qureg qureg, long long int stateInd) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); // index of the desired state in the flat density matrix long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int densityInd = (densityDim + 1)*stateInd; // identical to pure version densmatr_initClassicalStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, densityInd); } void statevec_createQureg(Qureg *qureg, int numQubits, QuESTEnv env) { // allocate CPU memory long long int numAmps = 1L << numQubits; long long int numAmpsPerRank = numAmps/env.numRanks; qureg->stateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.real)); qureg->stateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.imag)); if (env.numRanks>1){ qureg->pairStateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.real)); qureg->pairStateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.imag)); } // check cpu memory allocation was successful if ( (!(qureg->stateVec.real) || !(qureg->stateVec.imag)) && numAmpsPerRank ) { printf("Could not allocate memory!\n"); exit (EXIT_FAILURE); } if ( env.numRanks>1 && (!(qureg->pairStateVec.real) || !(qureg->pairStateVec.imag)) && numAmpsPerRank ) { printf("Could not allocate memory!\n"); exit (EXIT_FAILURE); } qureg->numQubitsInStateVec = numQubits; qureg->numAmpsPerChunk = numAmpsPerRank; qureg->numAmpsTotal = numAmps; qureg->chunkId = env.rank; qureg->numChunks = env.numRanks; qureg->isDensityMatrix = 0; // allocate GPU memory cudaMalloc(&(qureg->deviceStateVec.real), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.real))); cudaMalloc(&(qureg->deviceStateVec.imag), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.imag))); cudaMalloc(&(qureg->firstLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)REDUCE_SHARED_SIZE)*sizeof(qreal)); cudaMalloc(&(qureg->secondLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)(REDUCE_SHARED_SIZE*REDUCE_SHARED_SIZE))* sizeof(qreal)); // check gpu memory allocation was successful if (!(qureg->deviceStateVec.real) || !(qureg->deviceStateVec.imag)){ printf("Could not allocate memory on GPU!\n"); exit (EXIT_FAILURE); } } void statevec_destroyQureg(Qureg qureg, QuESTEnv env) { // Free CPU memory free(qureg.stateVec.real); free(qureg.stateVec.imag); if (env.numRanks>1){ free(qureg.pairStateVec.real); free(qureg.pairStateVec.imag); } // Free GPU memory cudaFree(qureg.deviceStateVec.real); cudaFree(qureg.deviceStateVec.imag); } int GPUExists(void){ int deviceCount, device; int gpuDeviceCount = 0; struct cudaDeviceProp properties; cudaError_t cudaResultCode = cudaGetDeviceCount(&deviceCount); if (cudaResultCode != cudaSuccess) deviceCount = 0; /* machines with no GPUs can still report one emulation device */ for (device = 0; device < deviceCount; ++device) { cudaGetDeviceProperties(&properties, device); if (properties.major != 9999) { /* 9999 means emulation only */ ++gpuDeviceCount; } } if (gpuDeviceCount) return 1; else return 0; } QuESTEnv createQuESTEnv(void) { // init MPI environment if (!GPUExists()){ printf("Trying to run GPU code with no GPU available\n"); exit(EXIT_FAILURE); } QuESTEnv env; env.rank=0; env.numRanks=1; seedQuESTDefault(); return env; } void syncQuESTEnv(QuESTEnv env){ cudaDeviceSynchronize(); } int syncQuESTSuccess(int successCode){ return successCode; } void destroyQuESTEnv(QuESTEnv env){ // MPI finalize goes here in MPI version. Call this function anyway for consistency } void reportQuESTEnv(QuESTEnv env){ printf("EXECUTION ENVIRONMENT:\n"); printf("Running locally on one node with GPU\n"); printf("Number of ranks is %d\n", env.numRanks); # ifdef _OPENMP printf("OpenMP enabled\n"); printf("Number of threads available is %d\n", omp_get_max_threads()); # else printf("OpenMP disabled\n"); # endif } void getEnvironmentString(QuESTEnv env, Qureg qureg, char str[200]){ sprintf(str, "%dqubits_GPU_noMpi_noOMP", qureg.numQubitsInStateVec); } void copyStateToGPU(Qureg qureg) { if (DEBUG) printf("Copying data to GPU\n"); cudaMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyHostToDevice); cudaMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyHostToDevice); cudaMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyHostToDevice); cudaMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyHostToDevice); if (DEBUG) printf("Finished copying data to GPU\n"); } void copyStateFromGPU(Qureg qureg) { cudaDeviceSynchronize(); if (DEBUG) printf("Copying data from GPU\n"); cudaMemcpy(qureg.stateVec.real, qureg.deviceStateVec.real, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyDeviceToHost); cudaMemcpy(qureg.stateVec.imag, qureg.deviceStateVec.imag, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyDeviceToHost); if (DEBUG) printf("Finished copying data from GPU\n"); } /** Print the current state vector of probability amplitudes for a set of qubits to standard out. For debugging purposes. Each rank should print output serially. Only print output for systems <= 5 qubits */ void statevec_reportStateToScreen(Qureg qureg, QuESTEnv env, int reportRank){ long long int index; int rank; copyStateFromGPU(qureg); if (qureg.numQubitsInStateVec<=5){ for (rank=0; rank<qureg.numChunks; rank++){ if (qureg.chunkId==rank){ if (reportRank) { printf("Reporting state from rank %d [\n", qureg.chunkId); //printf("\trank, index, real, imag\n"); printf("real, imag\n"); } else if (rank==0) { printf("Reporting state [\n"); printf("real, imag\n"); } for(index=0; index<qureg.numAmpsPerChunk; index++){ printf(REAL_STRING_FORMAT ", " REAL_STRING_FORMAT "\n", qureg.stateVec.real[index], qureg.stateVec.imag[index]); } if (reportRank || rank==qureg.numChunks-1) printf("]\n"); } syncQuESTEnv(env); } } } qreal statevec_getRealAmp(Qureg qureg, long long int index){ qreal el=0; cudaMemcpy(&el, &(qureg.deviceStateVec.real[index]), sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyDeviceToHost); return el; } qreal statevec_getImagAmp(Qureg qureg, long long int index){ qreal el=0; cudaMemcpy(&el, &(qureg.deviceStateVec.imag[index]), sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyDeviceToHost); return el; } __global__ void statevec_initZeroStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; // initialise the state to |0000..0000> index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; if (index==0){ // zero state |0000..0000> has probability 1 stateVecReal[0] = 1.0; stateVecImag[0] = 0.0; } } void statevec_initZeroState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_initZeroStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void statevec_initPlusStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qreal normFactor = 1.0/sqrt((qreal)stateVecSize); stateVecReal[index] = normFactor; stateVecImag[index] = 0.0; } void statevec_initPlusState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_initPlusStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void statevec_initClassicalStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, long long int stateInd){ long long int index; // initialise the state to |stateInd> index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; if (index==stateInd){ // classical state has probability 1 stateVecReal[stateInd] = 1.0; stateVecImag[stateInd] = 0.0; } } void statevec_initClassicalState(Qureg qureg, long long int stateInd) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_initClassicalStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, stateInd); } __global__ void statevec_initStateDebugKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = (index*2.0)/10.0; stateVecImag[index] = (index*2.0+1.0)/10.0; } void statevec_initStateDebug(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_initStateDebugKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void statevec_initStateOfSingleQubitKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, int qubitId, int outcome){ long long int index; int bit; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qreal normFactor = 1.0/sqrt((qreal)stateVecSize/2); bit = extractBit(qubitId, index); if (bit==outcome) { stateVecReal[index] = normFactor; stateVecImag[index] = 0.0; } else { stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; } } void statevec_initStateOfSingleQubit(Qureg *qureg, int qubitId, int outcome) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg->numAmpsPerChunk)/threadsPerCUDABlock); statevec_initStateOfSingleQubitKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg->numAmpsPerChunk, qureg->deviceStateVec.real, qureg->deviceStateVec.imag, qubitId, outcome); } // returns 1 if successful, else 0 int statevec_initStateFromSingleFile(Qureg *qureg, char filename[200], QuESTEnv env){ long long int chunkSize, stateVecSize; long long int indexInChunk, totalIndex; chunkSize = qureg->numAmpsPerChunk; stateVecSize = chunkSize*qureg->numChunks; qreal *stateVecReal = qureg->stateVec.real; qreal *stateVecImag = qureg->stateVec.imag; FILE *fp; char line[200]; fp = fopen(filename, "r"); if (fp == NULL) return 0; indexInChunk = 0; totalIndex = 0; while (fgets(line, sizeof(char)*200, fp) != NULL && totalIndex<stateVecSize){ if (line[0]!='#'){ int chunkId = totalIndex/chunkSize; if (chunkId==qureg->chunkId){ # if QuEST_PREC==1 sscanf(line, "%f, %f", &(stateVecReal[indexInChunk]), &(stateVecImag[indexInChunk])); # elif QuEST_PREC==2 sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]), &(stateVecImag[indexInChunk])); # elif QuEST_PREC==4 sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]), &(stateVecImag[indexInChunk])); # endif indexInChunk += 1; } totalIndex += 1; } } fclose(fp); copyStateToGPU(*qureg); // indicate success return 1; } int statevec_compareStates(Qureg mq1, Qureg mq2, qreal precision){ qreal diff; int chunkSize = mq1.numAmpsPerChunk; copyStateFromGPU(mq1); copyStateFromGPU(mq2); for (int i=0; i<chunkSize; i++){ diff = mq1.stateVec.real[i] - mq2.stateVec.real[i]; if (diff<0) diff *= -1; if (diff>precision) return 0; diff = mq1.stateVec.imag[i] - mq2.stateVec.imag[i]; if (diff<0) diff *= -1; if (diff>precision) return 0; } return 1; } __global__ void statevec_compactUnitaryKernel (Qureg qureg, const int rotQubit, Complex alpha, Complex beta){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity const long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << rotQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; qreal alphaImag=alpha.imag, alphaReal=alpha.real; qreal betaImag=beta.imag, betaReal=beta.real; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo] stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp - betaReal*stateRealLo - betaImag*stateImagLo; stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp - betaReal*stateImagLo + betaImag*stateRealLo; // state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo] stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp + alphaReal*stateRealLo + alphaImag*stateImagLo; stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp + alphaReal*stateImagLo - alphaImag*stateRealLo; } void statevec_compactUnitary(Qureg qureg, const int targetQubit, Complex alpha, Complex beta) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_compactUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, alpha, beta); } __global__ void statevec_controlledCompactUnitaryKernel (Qureg qureg, const int controlQubit, const int targetQubit, Complex alpha, Complex beta){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity const long long int numTasks=qureg.numAmpsPerChunk>>1; int controlBit; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; qreal alphaImag=alpha.imag, alphaReal=alpha.real; qreal betaImag=beta.imag, betaReal=beta.real; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo] stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp - betaReal*stateRealLo - betaImag*stateImagLo; stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp - betaReal*stateImagLo + betaImag*stateRealLo; // state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo] stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp + alphaReal*stateRealLo + alphaImag*stateImagLo; stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp + alphaReal*stateImagLo - alphaImag*stateRealLo; } } void statevec_controlledCompactUnitary(Qureg qureg, const int controlQubit, const int targetQubit, Complex alpha, Complex beta) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_controlledCompactUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, alpha, beta); } __global__ void statevec_unitaryKernel(Qureg qureg, const int targetQubit, ComplexMatrix2 u){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity const long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } void statevec_unitary(Qureg qureg, const int targetQubit, ComplexMatrix2 u) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_unitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, u); } __global__ void statevec_controlledUnitaryKernel(Qureg qureg, const int controlQubit, const int targetQubit, ComplexMatrix2 u){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity const long long int numTasks=qureg.numAmpsPerChunk>>1; int controlBit; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } } void statevec_controlledUnitary(Qureg qureg, const int controlQubit, const int targetQubit, ComplexMatrix2 u) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_controlledUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, u); } __global__ void statevec_multiControlledUnitaryKernel(Qureg qureg, long long int mask, const int targetQubit, ComplexMatrix2 u){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity const long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; if (mask == (mask & indexUp) ){ // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } } void statevec_multiControlledUnitary(Qureg qureg, int *controlQubits, int numControlQubits, const int targetQubit, ComplexMatrix2 u) { int threadsPerCUDABlock, CUDABlocks; long long int mask=0; for (int i=0; i<numControlQubits; i++) mask = mask | (1LL<<controlQubits[i]); threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_multiControlledUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask, targetQubit, u); } __global__ void statevec_pauliXKernel(Qureg qureg, const int targetQubit){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp, // storage for previous state values stateImagUp; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity const long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateVecReal[indexUp] = stateVecReal[indexLo]; stateVecImag[indexUp] = stateVecImag[indexLo]; stateVecReal[indexLo] = stateRealUp; stateVecImag[indexLo] = stateImagUp; } void statevec_pauliX(Qureg qureg, const int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_pauliXKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit); } __global__ void statevec_pauliYKernel(Qureg qureg, const int targetQubit, const int conjFac){ long long int sizeHalfBlock = 1LL << targetQubit; long long int sizeBlock = 2LL * sizeHalfBlock; long long int numTasks = qureg.numAmpsPerChunk >> 1; long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; long long int thisBlock = thisTask / sizeHalfBlock; long long int indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; long long int indexLo = indexUp + sizeHalfBlock; qreal stateRealUp, stateImagUp; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; // update under +-{{0, -i}, {i, 0}} stateVecReal[indexUp] = conjFac * stateVecImag[indexLo]; stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo]; stateVecReal[indexLo] = conjFac * -stateImagUp; stateVecImag[indexLo] = conjFac * stateRealUp; } void statevec_pauliY(Qureg qureg, const int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_pauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, 1); } void statevec_pauliYConj(Qureg qureg, const int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_pauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, -1); } __global__ void statevec_controlledPauliYKernel(Qureg qureg, const int controlQubit, const int targetQubit, const int conjFac) { long long int index; long long int sizeBlock, sizeHalfBlock; long long int stateVecSize; int controlBit; qreal stateRealUp, stateImagUp; long long int thisBlock, indexUp, indexLo; sizeHalfBlock = 1LL << targetQubit; sizeBlock = 2LL * sizeHalfBlock; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=(stateVecSize>>1)) return; thisBlock = index / sizeHalfBlock; indexUp = thisBlock*sizeBlock + index%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; // update under +-{{0, -i}, {i, 0}} stateVecReal[indexUp] = conjFac * stateVecImag[indexLo]; stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo]; stateVecReal[indexLo] = conjFac * -stateImagUp; stateVecImag[indexLo] = conjFac * stateRealUp; } } void statevec_controlledPauliY(Qureg qureg, const int controlQubit, const int targetQubit) { int conjFactor = 1; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_controlledPauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, conjFactor); } void statevec_controlledPauliYConj(Qureg qureg, const int controlQubit, const int targetQubit) { int conjFactor = -1; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_controlledPauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, conjFactor); } __global__ void statevec_phaseShiftByTermKernel(Qureg qureg, const int targetQubit, qreal cosAngle, qreal sinAngle) { long long int sizeBlock, sizeHalfBlock; long long int thisBlock, indexUp,indexLo; qreal stateRealLo, stateImagLo; long long int thisTask; const long long int numTasks = qureg.numAmpsPerChunk >> 1; sizeHalfBlock = 1LL << targetQubit; sizeBlock = 2LL * sizeHalfBlock; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; stateVecReal[indexLo] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[indexLo] = sinAngle*stateRealLo + cosAngle*stateImagLo; } void statevec_phaseShiftByTerm(Qureg qureg, const int targetQubit, Complex term) { qreal cosAngle = term.real; qreal sinAngle = term.imag; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_phaseShiftByTermKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, cosAngle, sinAngle); } __global__ void statevec_controlledPhaseShiftKernel(Qureg qureg, const int idQubit1, const int idQubit2, qreal cosAngle, qreal sinAngle) { long long int index; long long int stateVecSize; int bit1, bit2; qreal stateRealLo, stateImagLo; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; bit1 = extractBit (idQubit1, index); bit2 = extractBit (idQubit2, index); if (bit1 && bit2) { stateRealLo = stateVecReal[index]; stateImagLo = stateVecImag[index]; stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo; } } void statevec_controlledPhaseShift(Qureg qureg, const int idQubit1, const int idQubit2, qreal angle) { qreal cosAngle = cos(angle); qreal sinAngle = sin(angle); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_controlledPhaseShiftKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, idQubit1, idQubit2, cosAngle, sinAngle); } __global__ void statevec_multiControlledPhaseShiftKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) { qreal stateRealLo, stateImagLo; long long int index; long long int stateVecSize; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; if (mask == (mask & index) ){ stateRealLo = stateVecReal[index]; stateImagLo = stateVecImag[index]; stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo; } } void statevec_multiControlledPhaseShift(Qureg qureg, int *controlQubits, int numControlQubits, qreal angle) { qreal cosAngle = cos(angle); qreal sinAngle = sin(angle); long long int mask=0; for (int i=0; i<numControlQubits; i++) mask = mask | (1LL<<controlQubits[i]); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_multiControlledPhaseShiftKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask, cosAngle, sinAngle); } qreal densmatr_calcTotalProb(Qureg qureg) { // computes the trace using Kahan summation qreal pTotal=0; qreal y, t, c; c = 0; long long int numCols = 1LL << qureg.numQubitsRepresented; long long diagIndex; copyStateFromGPU(qureg); for (int col=0; col< numCols; col++) { diagIndex = col*(numCols + 1); y = qureg.stateVec.real[diagIndex] - c; t = pTotal + y; c = ( t - pTotal ) - y; // brackets are important pTotal = t; } return pTotal; } qreal statevec_calcTotalProb(Qureg qureg){ /* IJB - implemented using Kahan summation for greater accuracy at a slight floating point operation overhead. For more details see https://en.wikipedia.org/wiki/Kahan_summation_algorithm */ /* Don't change the bracketing in this routine! */ qreal pTotal=0; qreal y, t, c; long long int index; long long int numAmpsPerRank = qureg.numAmpsPerChunk; copyStateFromGPU(qureg); c = 0.0; for (index=0; index<numAmpsPerRank; index++){ /* Perform pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; by Kahan */ // pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; y = qureg.stateVec.real[index]*qureg.stateVec.real[index] - c; t = pTotal + y; c = ( t - pTotal ) - y; pTotal = t; /* Perform pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; by Kahan */ //pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; y = qureg.stateVec.imag[index]*qureg.stateVec.imag[index] - c; t = pTotal + y; c = ( t - pTotal ) - y; pTotal = t; } return pTotal; } __global__ void statevec_controlledPhaseFlipKernel(Qureg qureg, const int idQubit1, const int idQubit2) { long long int index; long long int stateVecSize; int bit1, bit2; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; bit1 = extractBit (idQubit1, index); bit2 = extractBit (idQubit2, index); if (bit1 && bit2) { stateVecReal [index] = - stateVecReal [index]; stateVecImag [index] = - stateVecImag [index]; } } void statevec_controlledPhaseFlip(Qureg qureg, const int idQubit1, const int idQubit2) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_controlledPhaseFlipKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, idQubit1, idQubit2); } __global__ void statevec_multiControlledPhaseFlipKernel(Qureg qureg, long long int mask) { long long int index; long long int stateVecSize; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; if (mask == (mask & index) ){ stateVecReal [index] = - stateVecReal [index]; stateVecImag [index] = - stateVecImag [index]; } } void statevec_multiControlledPhaseFlip(Qureg qureg, int *controlQubits, int numControlQubits) { int threadsPerCUDABlock, CUDABlocks; long long int mask=0; for (int i=0; i<numControlQubits; i++) mask = mask | (1LL<<controlQubits[i]); threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_multiControlledPhaseFlipKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask); } __global__ void statevec_hadamardKernel (Qureg qureg, const int targetQubit){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity const long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; qreal recRoot2 = 1.0/sqrt(2.0); thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; stateVecReal[indexUp] = recRoot2*(stateRealUp + stateRealLo); stateVecImag[indexUp] = recRoot2*(stateImagUp + stateImagLo); stateVecReal[indexLo] = recRoot2*(stateRealUp - stateRealLo); stateVecImag[indexLo] = recRoot2*(stateImagUp - stateImagLo); } void statevec_hadamard(Qureg qureg, const int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_hadamardKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit); } __global__ void statevec_controlledNotKernel(Qureg qureg, const int controlQubit, const int targetQubit) { long long int index; long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved long long int stateVecSize; int controlBit; // ----- temp variables qreal stateRealUp, // storage for previous state values stateImagUp; // (used in updates) long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=(stateVecSize>>1)) return; thisBlock = index / sizeHalfBlock; indexUp = thisBlock*sizeBlock + index%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateVecReal[indexUp] = stateVecReal[indexLo]; stateVecImag[indexUp] = stateVecImag[indexLo]; stateVecReal[indexLo] = stateRealUp; stateVecImag[indexLo] = stateImagUp; } } void statevec_controlledNot(Qureg qureg, const int controlQubit, const int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_controlledNotKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit); } __device__ __host__ unsigned int log2Int( unsigned int x ) { unsigned int ans = 0 ; while( x>>=1 ) ans++; return ans ; } __device__ void reduceBlock(qreal *arrayIn, qreal *reducedArray, int length){ int i, l, r; int threadMax, maxDepth; threadMax = length/2; maxDepth = log2Int(length/2); for (i=0; i<maxDepth+1; i++){ if (threadIdx.x<threadMax){ l = threadIdx.x; r = l + threadMax; arrayIn[l] = arrayIn[r] + arrayIn[l]; } threadMax = threadMax >> 1; __syncthreads(); // optimise -- use warp shuffle instead } if (threadIdx.x==0) reducedArray[blockIdx.x] = arrayIn[0]; } __global__ void copySharedReduceBlock(qreal*arrayIn, qreal *reducedArray, int length){ extern __shared__ qreal tempReductionArray[]; int blockOffset = blockIdx.x*length; tempReductionArray[threadIdx.x*2] = arrayIn[blockOffset + threadIdx.x*2]; tempReductionArray[threadIdx.x*2+1] = arrayIn[blockOffset + threadIdx.x*2+1]; __syncthreads(); reduceBlock(tempReductionArray, reducedArray, length); } __global__ void densmatr_findProbabilityOfZeroKernel( Qureg qureg, const int measureQubit, qreal *reducedArray ) { // run by each thread // use of block here refers to contiguous amplitudes where measureQubit = 0, // (then =1) and NOT the CUDA block, which is the partitioning of CUDA threads long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numTasks = densityDim >> 1; long long int sizeHalfBlock = 1LL << (measureQubit); long long int sizeBlock = 2LL * sizeHalfBlock; long long int thisBlock; // which block this thread is processing long long int thisTask; // which part of the block this thread is processing long long int basisIndex; // index of this thread's computational basis state long long int densityIndex; // " " index of |basis><basis| in the flat density matrix // array of each thread's collected probability, to be summed extern __shared__ qreal tempReductionArray[]; // figure out which density matrix prob that this thread is assigned thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; basisIndex = thisBlock*sizeBlock + thisTask%sizeHalfBlock; densityIndex = (densityDim + 1) * basisIndex; // record the probability in the CUDA-BLOCK-wide array qreal prob = qureg.deviceStateVec.real[densityIndex]; // im[densityIndex] assumed ~ 0 tempReductionArray[threadIdx.x] = prob; // sum the probs collected by this CUDA-BLOCK's threads into a per-CUDA-BLOCK array __syncthreads(); if (threadIdx.x<blockDim.x/2){ reduceBlock(tempReductionArray, reducedArray, blockDim.x); } } __global__ void statevec_findProbabilityOfZeroKernel( Qureg qureg, const int measureQubit, qreal *reducedArray ) { // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block index; // current index for first half block // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; // (good for shared memory parallelism) extern __shared__ qreal tempReductionArray[]; // ---------------------------------------------------------------- // // dimensions // // ---------------------------------------------------------------- // sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum, // and then the number to skip sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries) // ---------------------------------------------------------------- // // find probability // // ---------------------------------------------------------------- // // // --- task-based shared-memory parallel implementation // qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; index = thisBlock*sizeBlock + thisTask%sizeHalfBlock; qreal realVal, imagVal; realVal = stateVecReal[index]; imagVal = stateVecImag[index]; tempReductionArray[threadIdx.x] = realVal*realVal + imagVal*imagVal; __syncthreads(); if (threadIdx.x<blockDim.x/2){ reduceBlock(tempReductionArray, reducedArray, blockDim.x); } } int getNumReductionLevels(long long int numValuesToReduce, int numReducedPerLevel){ int levels=0; while (numValuesToReduce){ numValuesToReduce = numValuesToReduce/numReducedPerLevel; levels++; } return levels; } void swapDouble(qreal **a, qreal **b){ qreal *temp; temp = *a; *a = *b; *b = temp; } qreal densmatr_findProbabilityOfZero(Qureg qureg, const int measureQubit) { long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numValuesToReduce = densityDim >> 1; // half of the diagonal has measureQubit=0 int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block if (firstTime) { densmatr_findProbabilityOfZeroKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( qureg, measureQubit, qureg.firstLevelReduction); firstTime = 0; // sum the block probs } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal zeroProb; cudaMemcpy(&zeroProb, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); return zeroProb; } qreal statevec_findProbabilityOfZero(Qureg qureg, const int measureQubit) { long long int numValuesToReduce = qureg.numAmpsPerChunk>>1; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; qreal stateProb=0; int firstTime=1; int maxReducedPerLevel = REDUCE_SHARED_SIZE; while(numValuesToReduce>1){ if (numValuesToReduce<maxReducedPerLevel){ // Need less than one CUDA block to reduce values valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { // Use full CUDA blocks, with block size constrained by shared mem usage valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime){ statevec_findProbabilityOfZeroKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( qureg, measureQubit, qureg.firstLevelReduction); firstTime=0; } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } cudaMemcpy(&stateProb, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); return stateProb; } qreal statevec_calcProbOfOutcome(Qureg qureg, const int measureQubit, int outcome) { qreal outcomeProb = statevec_findProbabilityOfZero(qureg, measureQubit); if (outcome==1) outcomeProb = 1.0 - outcomeProb; return outcomeProb; } qreal densmatr_calcProbOfOutcome(Qureg qureg, const int measureQubit, int outcome) { qreal outcomeProb = densmatr_findProbabilityOfZero(qureg, measureQubit); if (outcome==1) outcomeProb = 1.0 - outcomeProb; return outcomeProb; } /** computes either a real or imag term in the inner product */ __global__ void statevec_calcInnerProductKernel( int getRealComp, qreal* vecReal1, qreal* vecImag1, qreal* vecReal2, qreal* vecImag2, long long int numTermsToSum, qreal* reducedArray) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numTermsToSum) return; // choose whether to calculate the real or imaginary term of the inner product qreal innerProdTerm; if (getRealComp) innerProdTerm = vecReal1[index]*vecReal2[index] + vecImag1[index]*vecImag2[index]; else innerProdTerm = vecReal1[index]*vecImag2[index] - vecImag1[index]*vecReal2[index]; // array of each thread's collected probability, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = innerProdTerm; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } /** Terrible code which unnecessarily individually computes and sums the real and imaginary components of the * inner product, so as to not have to worry about keeping the sums separated during reduction. * Truly disgusting, probably doubles runtime, please fix. * @TODO could even do the kernel twice, storing real in bra.reduc and imag in ket.reduc? */ Complex statevec_calcInnerProduct(Qureg bra, Qureg ket) { qreal innerProdReal, innerProdImag; int getRealComp; long long int numValuesToReduce; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel; int firstTime; // compute real component of inner product getRealComp = 1; numValuesToReduce = bra.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { statevec_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( getRealComp, bra.deviceStateVec.real, bra.deviceStateVec.imag, ket.deviceStateVec.real, ket.deviceStateVec.imag, numValuesToReduce, bra.firstLevelReduction); firstTime = 0; } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( bra.firstLevelReduction, bra.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } cudaMemcpy(&innerProdReal, bra.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); // compute imag component of inner product getRealComp = 0; numValuesToReduce = bra.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { statevec_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( getRealComp, bra.deviceStateVec.real, bra.deviceStateVec.imag, ket.deviceStateVec.real, ket.deviceStateVec.imag, numValuesToReduce, bra.firstLevelReduction); firstTime = 0; } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( bra.firstLevelReduction, bra.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } cudaMemcpy(&innerProdImag, bra.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); // return complex Complex innerProd; innerProd.real = innerProdReal; innerProd.imag = innerProdImag; return innerProd; } /** computes one term of (vec^*T) dens * vec */ __global__ void densmatr_calcFidelityKernel(Qureg dens, Qureg vec, long long int dim, qreal* reducedArray) { // figure out which density matrix row to consider long long int col; long long int row = blockIdx.x*blockDim.x + threadIdx.x; if (row >= dim) return; qreal* densReal = dens.deviceStateVec.real; qreal* densImag = dens.deviceStateVec.imag; qreal* vecReal = vec.deviceStateVec.real; qreal* vecImag = vec.deviceStateVec.imag; // compute the row-th element of the product dens*vec qreal prodReal = 0; qreal prodImag = 0; for (col=0LL; col < dim; col++) { qreal densElemReal = densReal[dim*col + row]; qreal densElemImag = densImag[dim*col + row]; prodReal += densElemReal*vecReal[col] - densElemImag*vecImag[col]; prodImag += densElemReal*vecImag[col] + densElemImag*vecReal[col]; } // multiply with row-th elem of (vec^*) qreal termReal = prodImag*vecImag[row] + prodReal*vecReal[row]; // imag of every term should be zero, because each is a valid fidelity calc of an eigenstate //qreal termImag = prodImag*vecReal[row] - prodReal*vecImag[row]; extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = termReal; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } // @TODO implement qreal densmatr_calcFidelity(Qureg qureg, Qureg pureState) { // we're summing the square of every term in the density matrix long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numValuesToReduce = densityDim; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block // store the reduction in the pureState array if (firstTime) { densmatr_calcFidelityKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( qureg, pureState, densityDim, pureState.firstLevelReduction); firstTime = 0; // sum the block probs } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( pureState.firstLevelReduction, pureState.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(pureState.firstLevelReduction), &(pureState.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal fidelity; cudaMemcpy(&fidelity, pureState.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); return fidelity; } __global__ void densmatr_calcPurityKernel(qreal* vecReal, qreal* vecImag, long long int numAmpsToSum, qreal *reducedArray) { // figure out which density matrix term this thread is assigned long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numAmpsToSum) return; qreal term = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index]; // array of each thread's collected probability, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = term; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } /** Computes the trace of the density matrix squared */ qreal densmatr_calcPurity(Qureg qureg) { // we're summing the square of every term in the density matrix long long int numValuesToReduce = qureg.numAmpsPerChunk; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block if (firstTime) { densmatr_calcPurityKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; // sum the block probs } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal traceDensSquared; cudaMemcpy(&traceDensSquared, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); return traceDensSquared; } __global__ void statevec_collapseToKnownProbOutcomeKernel(Qureg qureg, int measureQubit, int outcome, qreal totalProbability) { // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block index; // current index for first half block // ----- measured probability qreal renorm; // probability (returned) value // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity // (good for shared memory parallelism) long long int numTasks=qureg.numAmpsPerChunk>>1; // ---------------------------------------------------------------- // // dimensions // // ---------------------------------------------------------------- // sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum, // and then the number to skip sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries) // ---------------------------------------------------------------- // // find probability // // ---------------------------------------------------------------- // // // --- task-based shared-memory parallel implementation // renorm=1/sqrt(totalProbability); qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; index = thisBlock*sizeBlock + thisTask%sizeHalfBlock; if (outcome==0){ stateVecReal[index]=stateVecReal[index]*renorm; stateVecImag[index]=stateVecImag[index]*renorm; stateVecReal[index+sizeHalfBlock]=0; stateVecImag[index+sizeHalfBlock]=0; } else if (outcome==1){ stateVecReal[index]=0; stateVecImag[index]=0; stateVecReal[index+sizeHalfBlock]=stateVecReal[index+sizeHalfBlock]*renorm; stateVecImag[index+sizeHalfBlock]=stateVecImag[index+sizeHalfBlock]*renorm; } } /* * outcomeProb must accurately be the probability of that qubit outcome in the state-vector, or * else the state-vector will lose normalisation */ void statevec_collapseToKnownProbOutcome(Qureg qureg, const int measureQubit, int outcome, qreal outcomeProb) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_collapseToKnownProbOutcomeKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, measureQubit, outcome, outcomeProb); } /** Maps thread ID to a |..0..><..0..| state and then locates |0><1|, |1><0| and |1><1| */ __global__ void densmatr_collapseToKnownProbOutcomeKernel( qreal outcomeProb, qreal* vecReal, qreal *vecImag, long long int numBasesToVisit, long long int part1, long long int part2, long long int part3, long long int rowBit, long long int colBit, long long int desired, long long int undesired) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numBasesToVisit) return; long long int base = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); // renormalise desired outcome vecReal[base + desired] /= outcomeProb; vecImag[base + desired] /= outcomeProb; // kill undesired outcome vecReal[base + undesired] = 0; vecImag[base + undesired] = 0; // kill |..0..><..1..| states vecReal[base + colBit] = 0; vecImag[base + colBit] = 0; vecReal[base + rowBit] = 0; vecImag[base + rowBit] = 0; } /** This involves finding |...i...><...j...| states and killing those where i!=j */ void densmatr_collapseToKnownProbOutcome(Qureg qureg, const int measureQubit, int outcome, qreal outcomeProb) { int rowQubit = measureQubit + qureg.numQubitsRepresented; int colBit = 1LL << measureQubit; int rowBit = 1LL << rowQubit; long long int numBasesToVisit = qureg.numAmpsPerChunk/4; long long int part1 = colBit -1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numBasesToVisit - (rowBit >> 1); long long int desired, undesired; if (outcome == 0) { desired = 0; undesired = colBit | rowBit; } else { desired = colBit | rowBit; undesired = 0; } int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numBasesToVisit / (qreal) threadsPerCUDABlock); densmatr_collapseToKnownProbOutcomeKernel<<<CUDABlocks, threadsPerCUDABlock>>>( outcomeProb, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBasesToVisit, part1, part2, part3, rowBit, colBit, desired, undesired); } __global__ void densmatr_addDensityMatrixKernel(Qureg combineQureg, qreal otherProb, Qureg otherQureg, long long int numAmpsToVisit) { long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x; if (ampInd >= numAmpsToVisit) return; combineQureg.deviceStateVec.real[ampInd] *= 1-otherProb; combineQureg.deviceStateVec.imag[ampInd] *= 1-otherProb; combineQureg.deviceStateVec.real[ampInd] += otherProb*otherQureg.deviceStateVec.real[ampInd]; combineQureg.deviceStateVec.imag[ampInd] += otherProb*otherQureg.deviceStateVec.imag[ampInd]; } void densmatr_addDensityMatrix(Qureg combineQureg, qreal otherProb, Qureg otherQureg) { long long int numAmpsToVisit = combineQureg.numAmpsPerChunk; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); densmatr_addDensityMatrixKernel<<<CUDABlocks, threadsPerCUDABlock>>>( combineQureg, otherProb, otherQureg, numAmpsToVisit ); } /** Called once for every 4 amplitudes in density matrix * Works by establishing the |..0..><..0..| state (for its given index) then * visiting |..1..><..0..| and |..0..><..1..|. Labels |part1 X pa><rt2 NOT(X) part3| * From the brain of Simon Benjamin */ __global__ void densmatr_oneQubitDephaseKernel( qreal fac, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int colBit, long long int rowBit) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; long long int ampInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); vecReal[ampInd + colBit] *= fac; vecImag[ampInd + colBit] *= fac; vecReal[ampInd + rowBit] *= fac; vecImag[ampInd + rowBit] *= fac; } void densmatr_oneQubitDephase(Qureg qureg, const int targetQubit, qreal dephase) { if (dephase == 0) return; long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; int rowQubit = targetQubit + qureg.numQubitsRepresented; long long int colBit = 1LL << targetQubit; long long int rowBit = 1LL << rowQubit; long long int part1 = colBit - 1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numAmpsToVisit - (rowBit >> 1); qreal dephFac = 1 - dephase; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); densmatr_oneQubitDephaseKernel<<<CUDABlocks, threadsPerCUDABlock>>>( dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit, part1, part2, part3, colBit, rowBit); } /** Called 12 times for every 16 amplitudes in density matrix * Each sums from the |..0..0..><..0..0..| index to visit either * |..0..0..><..0..1..|, |..0..0..><..1..0..|, |..0..0..><..1..1..|, |..0..1..><..0..0..| * etc and so on to |..1..1..><..1..0|. Labels |part1 0 part2 0 par><t3 0 part4 0 part5|. * From the brain of Simon Benjamin */ __global__ void densmatr_twoQubitDephaseKernel( qreal fac, qreal* vecReal, qreal *vecImag, long long int numBackgroundStates, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int part4, long long int part5, long long int colBit1, long long int rowBit1, long long int colBit2, long long int rowBit2) { long long int outerInd = blockIdx.x*blockDim.x + threadIdx.x; if (outerInd >= numAmpsToVisit) return; // sets meta in 1...14 excluding 5, 10, creating bit string DCBA for |..D..C..><..B..A| int meta = 1 + (outerInd/numBackgroundStates); if (meta > 4) meta++; if (meta > 9) meta++; long long int shift = rowBit2*((meta>>3)%2) + rowBit1*((meta>>2)%2) + colBit2*((meta>>1)%2) + colBit1*(meta%2); long long int scanInd = outerInd % numBackgroundStates; long long int stateInd = ( shift + (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4)); vecReal[stateInd] *= fac; vecImag[stateInd] *= fac; } // @TODO is separating these 12 amplitudes really faster than letting every 16th base modify 12 elems? void densmatr_twoQubitDephase(Qureg qureg, int qubit1, int qubit2, qreal dephase) { if (dephase == 0) return; // assumes qubit2 > qubit1 int rowQubit1 = qubit1 + qureg.numQubitsRepresented; int rowQubit2 = qubit2 + qureg.numQubitsRepresented; long long int colBit1 = 1LL << qubit1; long long int rowBit1 = 1LL << rowQubit1; long long int colBit2 = 1LL << qubit2; long long int rowBit2 = 1LL << rowQubit2; long long int part1 = colBit1 - 1; long long int part2 = (colBit2 >> 1) - colBit1; long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1); long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2); long long int part5 = (qureg.numAmpsPerChunk/16) - (rowBit2 >> 3); qreal dephFac = 1 - dephase; // refers to states |a 0 b 0 c><d 0 e 0 f| (target qubits are fixed) long long int numBackgroundStates = qureg.numAmpsPerChunk/16; // 12 of these states experience dephasing long long int numAmpsToVisit = 12 * numBackgroundStates; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); densmatr_twoQubitDephaseKernel<<<CUDABlocks, threadsPerCUDABlock>>>( dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBackgroundStates, numAmpsToVisit, part1, part2, part3, part4, part5, colBit1, rowBit1, colBit2, rowBit2); } /** Works like oneQubitDephase but modifies every other element, and elements are averaged in pairs */ __global__ void densmatr_oneQubitDepolariseKernel( qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int bothBits) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); long long int targetInd = baseInd + bothBits; qreal realAvDepol = depolLevel * 0.5 * (vecReal[baseInd] + vecReal[targetInd]); qreal imagAvDepol = depolLevel * 0.5 * (vecImag[baseInd] + vecImag[targetInd]); vecReal[baseInd] *= 1 - depolLevel; vecImag[baseInd] *= 1 - depolLevel; vecReal[targetInd] *= 1 - depolLevel; vecImag[targetInd] *= 1 - depolLevel; vecReal[baseInd] += realAvDepol; vecImag[baseInd] += imagAvDepol; vecReal[targetInd] += realAvDepol; vecImag[targetInd] += imagAvDepol; } void densmatr_oneQubitDepolarise(Qureg qureg, const int targetQubit, qreal depolLevel) { if (depolLevel == 0) return; densmatr_oneQubitDephase(qureg, targetQubit, depolLevel); long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; int rowQubit = targetQubit + qureg.numQubitsRepresented; long long int colBit = 1LL << targetQubit; long long int rowBit = 1LL << rowQubit; long long int bothBits = colBit | rowBit; long long int part1 = colBit - 1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numAmpsToVisit - (rowBit >> 1); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); densmatr_oneQubitDepolariseKernel<<<CUDABlocks, threadsPerCUDABlock>>>( depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit, part1, part2, part3, bothBits); } /** Called once for every 16 amplitudes */ __global__ void densmatr_twoQubitDepolariseKernel( qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int part4, long long int part5, long long int rowCol1, long long int rowCol2) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; // index of |..0..0..><..0..0| long long int ind00 = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4); long long int ind01 = ind00 + rowCol1; long long int ind10 = ind00 + rowCol2; long long int ind11 = ind00 + rowCol1 + rowCol2; qreal realAvDepol = depolLevel * 0.25 * ( vecReal[ind00] + vecReal[ind01] + vecReal[ind10] + vecReal[ind11]); qreal imagAvDepol = depolLevel * 0.25 * ( vecImag[ind00] + vecImag[ind01] + vecImag[ind10] + vecImag[ind11]); qreal retain = 1 - depolLevel; vecReal[ind00] *= retain; vecImag[ind00] *= retain; vecReal[ind01] *= retain; vecImag[ind01] *= retain; vecReal[ind10] *= retain; vecImag[ind10] *= retain; vecReal[ind11] *= retain; vecImag[ind11] *= retain; vecReal[ind00] += realAvDepol; vecImag[ind00] += imagAvDepol; vecReal[ind01] += realAvDepol; vecImag[ind01] += imagAvDepol; vecReal[ind10] += realAvDepol; vecImag[ind10] += imagAvDepol; vecReal[ind11] += realAvDepol; vecImag[ind11] += imagAvDepol; } void densmatr_twoQubitDepolarise(Qureg qureg, int qubit1, int qubit2, qreal depolLevel) { if (depolLevel == 0) return; // assumes qubit2 > qubit1 densmatr_twoQubitDephase(qureg, qubit1, qubit2, depolLevel); int rowQubit1 = qubit1 + qureg.numQubitsRepresented; int rowQubit2 = qubit2 + qureg.numQubitsRepresented; long long int colBit1 = 1LL << qubit1; long long int rowBit1 = 1LL << rowQubit1; long long int colBit2 = 1LL << qubit2; long long int rowBit2 = 1LL << rowQubit2; long long int rowCol1 = colBit1 | rowBit1; long long int rowCol2 = colBit2 | rowBit2; long long int numAmpsToVisit = qureg.numAmpsPerChunk/16; long long int part1 = colBit1 - 1; long long int part2 = (colBit2 >> 1) - colBit1; long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1); long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2); long long int part5 = numAmpsToVisit - (rowBit2 >> 3); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); densmatr_twoQubitDepolariseKernel<<<CUDABlocks, threadsPerCUDABlock>>>( depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit, part1, part2, part3, part4, part5, rowCol1, rowCol2); } void seedQuESTDefault(){ // init MT random number generator with three keys -- time, pid and a hash of hostname // for the MPI version, it is ok that all procs will get the same seed as random numbers will only be // used by the master process unsigned long int key[3]; getQuESTDefaultSeedKey(key); init_by_array(key, 3); } #ifdef __cplusplus } #endif
c0a23009701cc9357d8df26ac2b97ef0c7a0bcb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hiprand/hiprand_kernel.h" #include "hiprand/hiprand.h" #include <stdio.h> #include <iostream> const int numThreads = 512; typedef struct SimPlan { int device; int dataSize; int numBlocks; hipStream_t streamID; int timeLimit; int t = 0; float dt; unsigned int seed; float **h_V; float mu; float sigma; float *d_v0; float *d_v1; hiprandState_t *PRNG; bool needDestruct = false; ~SimPlan() { if (needDestruct) { hipFree(this->d_v0); hipFree(this->d_v1); for (int i = 0; i < this->timeLimit; i++) { delete[] this->h_V[i]; } delete[] this->h_V; } } }; void setData(SimPlan *plan) { plan->h_V = new float*[plan->timeLimit]; for (int t = 0; t < plan->timeLimit; t++) { plan->h_V[t] = new float[plan->dataSize]; } for (int i = 0; i < plan->dataSize; i++) { plan->h_V[0][i] = 100.0f; } hipMemcpyAsync(plan->d_v0, plan->h_V[0], plan->dataSize*sizeof(float), hipMemcpyHostToDevice, plan->streamID); } __global__ void initializePRNG(SimPlan *plan) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= plan->dataSize){ return; } hiprand_init(plan->seed, i, 0, &plan->PRNG[i]); } void initializePlan( SimPlan *plan, int device_, int n_, float mu_, float sigma_, int timeLimit_, hipStream_t streamID_ = 0) { plan->needDestruct = true; plan->device = device_; plan->dataSize = n_; plan->numBlocks = (plan->dataSize + numThreads - 1) / numThreads; plan->mu = mu_; plan->sigma = sigma_; plan->timeLimit = timeLimit_; plan->dt = 1.0f / float(timeLimit_); hipLaunchKernelGGL(( initializePRNG) , dim3(plan->numBlocks), dim3(numThreads) , 0, 0, plan); if (streamID_ != 0) { plan->streamID = streamID_; } hipMalloc((void**)&plan->d_v0, plan->dataSize*sizeof(float)); hipMalloc((void**)&plan->d_v1, plan->dataSize*sizeof(float)); setData(plan); } __global__ void Kernel( SimPlan *plan) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= plan->dataSize) { return; } if (plan->t % 2 == 0) { plan->d_v1[i] = plan->d_v0[i] * (plan->mu * plan->dt + sqrtf(plan->dt) * plan->sigma * hiprand_normal(&plan->PRNG[i])); } else { plan->d_v0[i] = plan->d_v1[i] * (plan->mu * plan->dt + sqrtf(plan->dt) * plan->sigma * hiprand_normal(&plan->PRNG[i])); } } void runSim(SimPlan *plan) { while (plan->t < plan->timeLimit - 1) { std::cout << plan->t << std::endl; hipLaunchKernelGGL(( Kernel) , dim3(plan->numBlocks), dim3(numThreads) , 0, 0, plan); if (plan->t % 2 == 0) { hipMemcpy(plan->h_V[plan->t + 1], plan->d_v1, plan->dataSize*sizeof(float), hipMemcpyDeviceToHost); } else { hipMemcpy(plan->h_V[plan->t + 1], plan->d_v0, plan->dataSize*sizeof(float), hipMemcpyDeviceToHost); } plan->t += 1; } } int main() { SimPlan p1; hipStream_t s1; hipStreamCreate(&s1); initializePlan( &p1, 0, 1000, 0.05, 0.2, 252, s1); runSim(&p1); return 0; }
c0a23009701cc9357d8df26ac2b97ef0c7a0bcb5.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "curand_kernel.h" #include "curand.h" #include <stdio.h> #include <iostream> const int numThreads = 512; typedef struct SimPlan { int device; int dataSize; int numBlocks; cudaStream_t streamID; int timeLimit; int t = 0; float dt; unsigned int seed; float **h_V; float mu; float sigma; float *d_v0; float *d_v1; curandState_t *PRNG; bool needDestruct = false; ~SimPlan() { if (needDestruct) { cudaFree(this->d_v0); cudaFree(this->d_v1); for (int i = 0; i < this->timeLimit; i++) { delete[] this->h_V[i]; } delete[] this->h_V; } } }; void setData(SimPlan *plan) { plan->h_V = new float*[plan->timeLimit]; for (int t = 0; t < plan->timeLimit; t++) { plan->h_V[t] = new float[plan->dataSize]; } for (int i = 0; i < plan->dataSize; i++) { plan->h_V[0][i] = 100.0f; } cudaMemcpyAsync(plan->d_v0, plan->h_V[0], plan->dataSize*sizeof(float), cudaMemcpyHostToDevice, plan->streamID); } __global__ void initializePRNG(SimPlan *plan) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= plan->dataSize){ return; } curand_init(plan->seed, i, 0, &plan->PRNG[i]); } void initializePlan( SimPlan *plan, int device_, int n_, float mu_, float sigma_, int timeLimit_, cudaStream_t streamID_ = 0) { plan->needDestruct = true; plan->device = device_; plan->dataSize = n_; plan->numBlocks = (plan->dataSize + numThreads - 1) / numThreads; plan->mu = mu_; plan->sigma = sigma_; plan->timeLimit = timeLimit_; plan->dt = 1.0f / float(timeLimit_); initializePRNG <<<plan->numBlocks, numThreads >>>(plan); if (streamID_ != 0) { plan->streamID = streamID_; } cudaMalloc((void**)&plan->d_v0, plan->dataSize*sizeof(float)); cudaMalloc((void**)&plan->d_v1, plan->dataSize*sizeof(float)); setData(plan); } __global__ void Kernel( SimPlan *plan) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= plan->dataSize) { return; } if (plan->t % 2 == 0) { plan->d_v1[i] = plan->d_v0[i] * (plan->mu * plan->dt + sqrtf(plan->dt) * plan->sigma * curand_normal(&plan->PRNG[i])); } else { plan->d_v0[i] = plan->d_v1[i] * (plan->mu * plan->dt + sqrtf(plan->dt) * plan->sigma * curand_normal(&plan->PRNG[i])); } } void runSim(SimPlan *plan) { while (plan->t < plan->timeLimit - 1) { std::cout << plan->t << std::endl; Kernel <<< plan->numBlocks, numThreads >>>(plan); if (plan->t % 2 == 0) { cudaMemcpy(plan->h_V[plan->t + 1], plan->d_v1, plan->dataSize*sizeof(float), cudaMemcpyDeviceToHost); } else { cudaMemcpy(plan->h_V[plan->t + 1], plan->d_v0, plan->dataSize*sizeof(float), cudaMemcpyDeviceToHost); } plan->t += 1; } } int main() { SimPlan p1; cudaStream_t s1; cudaStreamCreate(&s1); initializePlan( &p1, 0, 1000, 0.05, 0.2, 252, s1); runSim(&p1); return 0; }
09ee52e710deb60ae254f19278c316e8d10d07a8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=14 --blockDim=[32,32] --only-intra-group #include "common_ds.h" // The Mandelbrot secondary AA pass CUDA GPU thread function (double single version) __global__ void MandelbrotDS1(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1, const float yOff0, const float yOff1, const float xJP, const float yJP, const float scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __requires(imageW == 800); __requires(imageH == 600); __requires(gridWidth == 25); __requires(numBlocks == 475); __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while (1) { #ifndef KERNEL_BUG __syncthreads(); #endif if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); if (blockIndex >= numBlocks) { break; // finish } // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Get the current pixel color int pixel = imageW * iy + ix; uchar4 pixelColor = dst[pixel]; int count = 0; // Search for pixels out of tolerance surrounding the current pixel if (ix > 0) { count += CheckColors(pixelColor, dst[pixel - 1]); } if (ix + 1 < imageW) { count += CheckColors(pixelColor, dst[pixel + 1]); } if (iy > 0) { count += CheckColors(pixelColor, dst[pixel - imageW]); } if (iy + 1 < imageH) { count += CheckColors(pixelColor, dst[pixel + imageW]); } if (count) { // Calculate the location float xPos0 = (float)ix * scale; float xPos1 = 0.0f; float yPos0 = (float)iy * scale; float yPos1 = 0.0f; dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1); dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1); // Calculate the Mandelbrot index for the current location int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1; dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1; dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1; } } } } // MandelbrotDS1
09ee52e710deb60ae254f19278c316e8d10d07a8.cu
//pass //--gridDim=14 --blockDim=[32,32] --only-intra-group #include "common_ds.h" // The Mandelbrot secondary AA pass CUDA GPU thread function (double single version) __global__ void MandelbrotDS1(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1, const float yOff0, const float yOff1, const float xJP, const float yJP, const float scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __requires(imageW == 800); __requires(imageH == 600); __requires(gridWidth == 25); __requires(numBlocks == 475); __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while (1) { #ifndef KERNEL_BUG __syncthreads(); #endif if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); if (blockIndex >= numBlocks) { break; // finish } // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Get the current pixel color int pixel = imageW * iy + ix; uchar4 pixelColor = dst[pixel]; int count = 0; // Search for pixels out of tolerance surrounding the current pixel if (ix > 0) { count += CheckColors(pixelColor, dst[pixel - 1]); } if (ix + 1 < imageW) { count += CheckColors(pixelColor, dst[pixel + 1]); } if (iy > 0) { count += CheckColors(pixelColor, dst[pixel - imageW]); } if (iy + 1 < imageH) { count += CheckColors(pixelColor, dst[pixel + imageW]); } if (count) { // Calculate the location float xPos0 = (float)ix * scale; float xPos1 = 0.0f; float yPos0 = (float)iy * scale; float yPos1 = 0.0f; dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1); dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1); // Calculate the Mandelbrot index for the current location int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1; dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1; dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1; } } } } // MandelbrotDS1
b4719b9019b44deac1d9cab9985b7904675a8fb2.hip
// !!! This is a file automatically generated by hipify!!! #include <opencv2/cudafeatures2d.hpp> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" #define BLOCK_X 8 #define BLOCK_Y 8 #define BLOCK_Z 4 using namespace cv; // Algorithm itself has good performances, but memory allocation is a problem. // I will try to reduce it. namespace { // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) { return (bitmap >> pos) & 1; } // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ void SetBit(T &bitmap, unsigned char pos) { bitmap |= (1 << pos); } // Risale alla radice dell'albero a partire da un suo nodo n __device__ unsigned Find(const int *s_buf, unsigned n) { // Attenzione: non invocare la find su un pixel di background unsigned label = s_buf[n]; assert(label > 0); while (label - 1 != n) { n = label - 1; label = s_buf[n]; assert(label > 0); } return n; } // Init phase. // Labels start at value 1. __global__ void Init(const cuda::PtrStepSz3b img, cuda::PtrStepSz3i block_conn, cuda::PtrStepSz3i block_labels) { unsigned x = blockIdx.x * BLOCK_X + threadIdx.x; unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y; unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z; unsigned img_index = 2*z * (img.stepz / img.elem_size) + 2*y * (img.stepy / img.elem_size) + 2*x; unsigned conn_index = z * (block_conn.stepz / block_conn.elem_size) + y * (block_conn.stepy / block_conn.elem_size) + x; unsigned labels_index = z * (block_labels.stepz / block_labels.elem_size) + y * (block_labels.stepy / block_labels.elem_size) + x; if (x < block_conn.x && y < block_conn.y && z < block_conn.z) { #define P0 0x77707770777UL unsigned long long P = 0UL; if (img[img_index]) { P |= P0; } if (2 * x + 1 < img.x) { if (img[img_index + 1]) { P |= (P0 << 1); } if (2 * y + 1 < img.y && img[img_index + img.stepy / img.elem_size + 1]) { P |= (P0 << 5); } } if (2 * y + 1 < img.y) { if (img[img_index + img.stepy / img.elem_size]) { P |= (P0 << 4); } } if (2 * z + 1 < img.z) { if (img[img_index + img.stepz / img.elem_size]) { P |= P0 << 16; } if (2 * x + 1 < img.x) { if (img[img_index + img.stepz / img.elem_size + 1]) { P |= (P0 << 17); } if (2 * y + 1 < img.y && img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size + 1]) { P |= (P0 << 21); } } if (2 * y + 1 < img.y) { if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size]) { P |= (P0 << 20); } } } #undef P0 // checks on borders if (x == 0) { P &= 0xEEEEEEEEEEEEEEEE; } if (2 * x + 1 >= img.x) { P &= 0x3333333333333333; } else if (2 * x + 2 >= img.x) { P &= 0x7777777777777777; } if (y == 0) { P &= 0xFFF0FFF0FFF0FFF0; } if (2 * y + 1 >= img.y) { P &= 0x00FF00FF00FF00FF; } else if (2 * y + 2 >= img.y) { P &= 0x0FFF0FFF0FFF0FFF; } if (z == 0) { P &= 0xFFFFFFFFFFFF0000; } if (2 * z + 1 >= img.z) { P &= 0x00000000FFFFFFFF; } else if (2 * z + 2 >= img.z) { P &= 0x0000FFFFFFFFFFFF; } // P is now ready to be used to find neighbour blocks (or it should be) // P value avoids range errors unsigned int conn_bitmask = 0; if (P > 0) { block_labels[labels_index] = labels_index + 1; // Lower plane unsigned char * plane_data = img.data + img_index - (img.stepz / img.elem_size); if (HasBit(P, 0) && plane_data[0 - img.stepy - 1]) { SetBit(conn_bitmask, 0); } if ((HasBit(P, 1) && plane_data[0 - img.stepy]) || (HasBit(P, 2) && plane_data[0 - img.stepy + 1])) { SetBit(conn_bitmask, 1); } if (HasBit(P, 3) && plane_data[0 - img.stepy + 2]) { SetBit(conn_bitmask, 2); } if ((HasBit(P, 4) && plane_data[- 1]) || (HasBit(P, 8) && plane_data[img.stepy - 1])) { SetBit(conn_bitmask, 3); } if ((HasBit(P, 5) && plane_data[0]) || (HasBit(P, 6) && plane_data[1]) || (HasBit(P, 9) && plane_data[img.stepy]) || (HasBit(P, 10) && plane_data[img.stepy + 1])) { SetBit(conn_bitmask, 4); } if ((HasBit(P, 7) && plane_data[2]) || (HasBit(P, 11) && plane_data[img.stepy + 2])) { SetBit(conn_bitmask, 5); } if (HasBit(P, 12) && plane_data[2 * img.stepy - 1]) { SetBit(conn_bitmask, 6); } if ((HasBit(P, 13) && plane_data[2 * img.stepy]) || (HasBit(P, 14) && plane_data[2 * img.stepy + 1])) { SetBit(conn_bitmask, 7); } if (HasBit(P, 15) && plane_data[2 * img.stepy + 2]) { SetBit(conn_bitmask, 8); } // Current planes plane_data += img.stepz / img.elem_size; if ((HasBit(P, 16) && plane_data[0 - img.stepy - 1]) || (HasBit(P, 32) && plane_data[img.stepz - img.stepy - 1])) { SetBit(conn_bitmask, 9); } if ((HasBit(P, 17) && plane_data[0 - img.stepy]) || (HasBit(P, 18) && plane_data[0 - img.stepy + 1]) || (HasBit(P, 33) && plane_data[img.stepz - img.stepy]) || (HasBit(P, 34) && plane_data[img.stepz - img.stepy + 1])) { SetBit(conn_bitmask, 10); } if ((HasBit(P, 19) && plane_data[0 - img.stepy + 2]) || (HasBit(P, 35) && plane_data[img.stepz - img.stepy + 2])) { SetBit(conn_bitmask, 11); } if ((HasBit(P, 20) && plane_data[-1]) || (HasBit(P, 24) && plane_data[img.stepy - 1]) || (HasBit(P, 36) && plane_data[img.stepz - 1]) || (HasBit(P, 40) && plane_data[img.stepz + img.stepy - 1])) { SetBit(conn_bitmask, 12); } if ((HasBit(P, 23) && plane_data[2]) || (HasBit(P, 27) && plane_data[img.stepy + 2]) || (HasBit(P, 39) && plane_data[img.stepz + 2]) || (HasBit(P, 43) && plane_data[img.stepz + img.stepy + 2])) { SetBit(conn_bitmask, 14); } if ((HasBit(P, 28) && plane_data[2 * img.stepy - 1]) || (HasBit(P, 44) && plane_data[img.stepz + 2 * img.stepy - 1])) { SetBit(conn_bitmask, 15); } if ((HasBit(P, 29) && plane_data[2 * img.stepy]) || (HasBit(P, 30) && plane_data[2 * img.stepy + 1]) || (HasBit(P, 45) && plane_data[img.stepz + 2 * img.stepy]) || (HasBit(P, 46) && plane_data[img.stepz + 2 * img.stepy + 1])) { SetBit(conn_bitmask, 16); } if ((HasBit(P, 31) && plane_data[2 * img.stepy + 2]) || (HasBit(P, 47) && plane_data[img.stepz + 2 * img.stepy + 2])) { SetBit(conn_bitmask, 17); } // Upper plane plane_data += 2 * (img.stepz / img.elem_size); if (HasBit(P, 48) && plane_data[0 - img.stepy - 1]) { SetBit(conn_bitmask, 18); } if ((HasBit(P, 49) && plane_data[0 - img.stepy]) || (HasBit(P, 50) && plane_data[0 - img.stepy + 1])) { SetBit(conn_bitmask, 19); } if (HasBit(P, 51) && plane_data[0 - img.stepy + 2]) { SetBit(conn_bitmask, 20); } if ((HasBit(P, 52) && plane_data[-1]) || (HasBit(P, 56) && plane_data[img.stepy - 1])) { SetBit(conn_bitmask, 21); } if ((HasBit(P, 53) && plane_data[0]) || (HasBit(P, 54) && plane_data[1]) || (HasBit(P, 57) && plane_data[img.stepy]) || (HasBit(P, 58) && plane_data[img.stepy + 1])) { SetBit(conn_bitmask, 22); } if ((HasBit(P, 55) && plane_data[2]) || (HasBit(P, 59) && plane_data[img.stepy + 2])) { SetBit(conn_bitmask, 23); } if (HasBit(P, 60) && plane_data[2 * img.stepy - 1]) { SetBit(conn_bitmask, 24); } if ((HasBit(P, 61) && plane_data[2 * img.stepy]) || (HasBit(P, 62) && plane_data[2 * img.stepy + 1])) { SetBit(conn_bitmask, 25); } if (HasBit(P, 63) && plane_data[2 * img.stepy + 2]) { SetBit(conn_bitmask, 26); } } else { block_labels[labels_index] = 0; } block_conn[conn_index] = conn_bitmask; } } //__global__ void ExpandConnections(const cuda::PtrStepSzb connections, cuda::PtrStepSzb expansion) { // unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; // unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; // unsigned conn_index = row * (connections.step / connections.elem_size) + col; // unsigned exp_index = 3 * row * (expansion.step / expansion.elem_size) + 3 * col; // if (row < connections.rows && col < connections.cols) { // expansion[exp_index + (expansion.step / expansion.elem_size) + 1] = 2; // unsigned char neighbours = connections[conn_index]; // if (HasBit(neighbours, 0)) { // expansion[exp_index] = 1; // } // else { // expansion[exp_index] = 0; // } // if (HasBit(neighbours, 1)) { // expansion[exp_index + 1] = 1; // } // else { // expansion[exp_index + 1] = 0; // } // if (HasBit(neighbours, 2)) { // expansion[exp_index + 2] = 1; // } // else { // expansion[exp_index + 2] = 0; // } // if (HasBit(neighbours, 3)) { // expansion[exp_index + (expansion.step / expansion.elem_size)] = 1; // } // else { // expansion[exp_index + (expansion.step / expansion.elem_size)] = 0; // } // if (HasBit(neighbours, 4)) { // expansion[exp_index + (expansion.step / expansion.elem_size) + 2] = 1; // } // else { // expansion[exp_index + (expansion.step / expansion.elem_size) + 2] = 0; // } // if (HasBit(neighbours, 5)) { // expansion[exp_index + 2 * (expansion.step / expansion.elem_size)] = 1; // } // else { // expansion[exp_index + 2 * (expansion.step / expansion.elem_size)] = 0; // } // if (HasBit(neighbours, 6)) { // expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 1] = 1; // } // else { // expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 1] = 0; // } // if (HasBit(neighbours, 7)) { // expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 2] = 1; // } // else { // expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 2] = 0; // } // } //} __device__ unsigned int MinLabel(unsigned l1, unsigned l2) { if (l1 && l2) return min(l1, l2); else return l1; } __device__ unsigned int FindMinLabel(cuda::PtrStepSz3i labels, unsigned int neighbours, unsigned label, unsigned labels_index) { unsigned int min = label; for (char plane = -1; plane <= 1; plane++) { int * plane_data = labels.data + labels_index + plane * (labels.stepz / labels.elem_size); if (HasBit(neighbours, 0)) { min = MinLabel(min, plane_data[0 - (labels.stepy / labels.elem_size) - 1]); } if (HasBit(neighbours, 1)) { min = MinLabel(min, plane_data[0 - (labels.stepy / labels.elem_size)]); } if (HasBit(neighbours, 2)) { min = MinLabel(min, plane_data[0 - (labels.stepy / labels.elem_size) + 1]); } if (HasBit(neighbours, 3)) { min = MinLabel(min, plane_data[-1]); } if (plane && HasBit(neighbours, 4)) { min = MinLabel(min, plane_data[0]); } if (HasBit(neighbours, 5)) { min = MinLabel(min, plane_data[1]); } if (HasBit(neighbours, 6)) { min = MinLabel(min, plane_data[(labels.stepy / labels.elem_size) - 1]); } if (HasBit(neighbours, 7)) { min = MinLabel(min, plane_data[(labels.stepy / labels.elem_size)]); } if (HasBit(neighbours, 8)) { min = MinLabel(min, plane_data[(labels.stepy / labels.elem_size) + 1]); } neighbours >>= 9; } return min; } // Scan phase. // The pixel associated with current thread is given the minimum label of the neighbours. __global__ void Scan(cuda::PtrStepSz3i labels, cuda::PtrStepSz3i connections, char *changes) { unsigned x = blockIdx.x * BLOCK_X + threadIdx.x; unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y; unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z; unsigned conn_index = z * (connections.stepz / connections.elem_size) + y * (connections.stepy / connections.elem_size) + x; unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x; if (x < labels.x && y < labels.y && z < labels.z) { unsigned int neighbours = connections[conn_index]; unsigned label = labels[labels_index]; if (label) { unsigned min_label = FindMinLabel(labels, neighbours, label, labels_index); if (min_label < label) { labels[label - 1] = min(static_cast<unsigned int>(labels[label - 1]), min_label); *changes = 1; } } } } __global__ void Analyze(cuda::PtrStepSz3i labels) { unsigned x = blockIdx.x * BLOCK_X + threadIdx.x; unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y; unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z; unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x; if (x < labels.x && y < labels.y && z < labels.z) { unsigned int val = labels[labels_index]; if (val) { labels[labels_index] = Find(labels.data, labels_index) + 1; } } } // Final Labeling phase // Assigns every pixel of 2x2x2 blocks the block label __global__ void FinalLabeling(cuda::PtrStepSz3i block_labels, cuda::PtrStepSz3i labels, const cuda::PtrStepSz3b img) { unsigned x = blockIdx.x * BLOCK_X + threadIdx.x; unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y; unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z; unsigned blocks_index = z * (block_labels.stepz / block_labels.elem_size) + y * (block_labels.stepy / block_labels.elem_size) + x; unsigned labels_index = 2 * z * (labels.stepz / labels.elem_size) + 2 * y * (labels.stepy / labels.elem_size) + 2 * x; unsigned img_index = 2 * z * (img.stepz / img.elem_size) + 2 * y * (img.stepy / img.elem_size) + 2 * x; if (x < block_labels.x && y < block_labels.y && z < block_labels.z) { unsigned int label = block_labels[blocks_index]; // Current plane if (img[img_index]) { labels[labels_index] = label; } else { labels[labels_index] = 0; } if (2 * x + 1 < labels.x) { if (img[img_index + 1]) labels[labels_index + 1] = label; else { labels[labels_index + 1] = 0; } if (2 * y + 1 < labels.y) { if (img[img_index + img.stepy + 1]) labels[labels_index + (labels.stepy / labels.elem_size) + 1] = label; else { labels[labels_index + (labels.stepy / labels.elem_size) + 1] = 0; } } } if (2 * y + 1 < labels.y) { if (img[img_index + img.stepy]) labels[labels_index + (labels.stepy / labels.elem_size)] = label; else { labels[labels_index + (labels.stepy / labels.elem_size)] = 0; } } // Upper plane if (2 * z + 1 < labels.z) { if (img[img_index + img.stepz / img.elem_size]) labels[labels_index + labels.stepz / labels.elem_size] = label; else { labels[labels_index + labels.stepz / labels.elem_size] = 0; } if (2 * x + 1 < labels.x) { if (img[img_index + img.stepz / img.elem_size + 1]) labels[labels_index + labels.stepz / labels.elem_size + 1] = label; else { labels[labels_index + labels.stepz / labels.elem_size + 1] = 0; } if (2 * y + 1 < labels.y) { if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size + 1]) labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size) + 1] = label; else { labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size) + 1] = 0; } } } if (2 * y + 1 < labels.y) { if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size]) labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size)] = label; else { labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size)] = 0; } } } } } } class BE_3D : public GpuLabeling3D<CONN_26> { private: dim3 grid_size_; dim3 block_size_; char changes; char *d_changes; cuda::GpuMat3 d_connections_; cuda::GpuMat3 d_block_labels_; public: BE_3D() {} void PerformLabeling() { d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1); // Extra structures that I would gladly do without d_connections_.create((d_img_.x + 1) / 2, (d_img_.y + 1) / 2, (d_img_.z + 1) / 2, CV_32SC1); d_block_labels_.create((d_img_.x + 1) / 2, (d_img_.y + 1) / 2, (d_img_.z + 1) / 2, CV_32SC1); grid_size_ = dim3((d_block_labels_.x + BLOCK_X - 1) / BLOCK_X, (d_block_labels_.y + BLOCK_Y - 1) / BLOCK_Y, (d_block_labels_.z + BLOCK_Z - 1) / BLOCK_Z); block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z); hipMalloc(&d_changes, sizeof(char)); Init << <grid_size_, block_size_ >> > (d_img_, d_connections_, d_block_labels_); //Mat init_labels; //d_block_labels_.download(init_labels); //::NormalizeLabels(init_labels); //Mat img_out; //ColorLabels(init_labels, img_out); //volwrite("C:\\Users\\Stefano\\Desktop\\debug\\init_labels", img_out); while (true) { changes = 0; hipMemcpy(d_changes, &changes, sizeof(char), hipMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_block_labels_, d_connections_, d_changes); hipMemcpy(&changes, d_changes, sizeof(char), hipMemcpyDeviceToHost); if (!changes) break; Analyze << <grid_size_, block_size_ >> > (d_block_labels_); } //Mat block_labels; //d_block_labels_.download(block_labels); //::NormalizeLabels(block_labels); //ColorLabels(block_labels, img_out); //volwrite("C:\\Users\\Stefano\\Desktop\\debug\\block_labels", img_out); FinalLabeling << <grid_size_, block_size_ >> > (d_block_labels_, d_img_labels_, d_img_); //d_img_labels_.download(img_labels_); hipFree(d_changes); d_connections_.release(); d_block_labels_.release(); hipDeviceSynchronize(); //d_img_labels_.download(img_labels_); //Mat errors; //bool correct = CheckLabeledVolume(img_, img_labels_, errors); //volwrite("C:\\Users\\Stefano\\Desktop\\debug\\BE_errors", errors); } private: double Alloc() { perf_.start(); d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1); d_connections_.create((d_img_.x + 1) / 2, (d_img_.y + 1) / 2, (d_img_.z + 1) / 2, CV_32SC1); d_block_labels_.create((d_img_.x + 1) / 2, (d_img_.y + 1) / 2, (d_img_.z + 1) / 2, CV_32SC1); hipMalloc(&d_changes, sizeof(char)); perf_.stop(); return perf_.last(); } double Dealloc() { perf_.start(); hipFree(d_changes); d_connections_.release(); d_block_labels_.release(); perf_.stop(); return perf_.last(); } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { grid_size_ = dim3((d_block_labels_.x + BLOCK_X - 1) / BLOCK_X, (d_block_labels_.y + BLOCK_Y - 1) / BLOCK_Y, (d_block_labels_.z + BLOCK_Z - 1) / BLOCK_Z); block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z); Init << <grid_size_, block_size_ >> > (d_img_, d_connections_, d_block_labels_); // La Init esplode // Controlla che cosa contiene connections //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //assert(hipDeviceSynchronize() == hipSuccess); // Immagine di debug della inizializzazione //Mat1i init_labels; //d_block_labels_.download(init_labels); while (true) { changes = 0; hipMemcpy(d_changes, &changes, sizeof(char), hipMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_block_labels_, d_connections_, d_changes); hipMemcpy(&changes, d_changes, sizeof(char), hipMemcpyDeviceToHost); if (!changes) break; Analyze << <grid_size_, block_size_ >> > (d_block_labels_); } // Immagine di debug delle label dei blocchi //Mat1i block_labels; //d_block_labels_.download(block_labels); FinalLabeling << <grid_size_, block_size_ >> > (d_block_labels_, d_img_labels_, d_img_); } public: void PerformLabelingWithSteps() { double alloc_timing = Alloc(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); double dealloc_timing = Dealloc(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(BE_3D);
b4719b9019b44deac1d9cab9985b7904675a8fb2.cu
#include <opencv2/cudafeatures2d.hpp> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" #define BLOCK_X 8 #define BLOCK_Y 8 #define BLOCK_Z 4 using namespace cv; // Algorithm itself has good performances, but memory allocation is a problem. // I will try to reduce it. namespace { // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) { return (bitmap >> pos) & 1; } // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ void SetBit(T &bitmap, unsigned char pos) { bitmap |= (1 << pos); } // Risale alla radice dell'albero a partire da un suo nodo n __device__ unsigned Find(const int *s_buf, unsigned n) { // Attenzione: non invocare la find su un pixel di background unsigned label = s_buf[n]; assert(label > 0); while (label - 1 != n) { n = label - 1; label = s_buf[n]; assert(label > 0); } return n; } // Init phase. // Labels start at value 1. __global__ void Init(const cuda::PtrStepSz3b img, cuda::PtrStepSz3i block_conn, cuda::PtrStepSz3i block_labels) { unsigned x = blockIdx.x * BLOCK_X + threadIdx.x; unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y; unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z; unsigned img_index = 2*z * (img.stepz / img.elem_size) + 2*y * (img.stepy / img.elem_size) + 2*x; unsigned conn_index = z * (block_conn.stepz / block_conn.elem_size) + y * (block_conn.stepy / block_conn.elem_size) + x; unsigned labels_index = z * (block_labels.stepz / block_labels.elem_size) + y * (block_labels.stepy / block_labels.elem_size) + x; if (x < block_conn.x && y < block_conn.y && z < block_conn.z) { #define P0 0x77707770777UL unsigned long long P = 0UL; if (img[img_index]) { P |= P0; } if (2 * x + 1 < img.x) { if (img[img_index + 1]) { P |= (P0 << 1); } if (2 * y + 1 < img.y && img[img_index + img.stepy / img.elem_size + 1]) { P |= (P0 << 5); } } if (2 * y + 1 < img.y) { if (img[img_index + img.stepy / img.elem_size]) { P |= (P0 << 4); } } if (2 * z + 1 < img.z) { if (img[img_index + img.stepz / img.elem_size]) { P |= P0 << 16; } if (2 * x + 1 < img.x) { if (img[img_index + img.stepz / img.elem_size + 1]) { P |= (P0 << 17); } if (2 * y + 1 < img.y && img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size + 1]) { P |= (P0 << 21); } } if (2 * y + 1 < img.y) { if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size]) { P |= (P0 << 20); } } } #undef P0 // checks on borders if (x == 0) { P &= 0xEEEEEEEEEEEEEEEE; } if (2 * x + 1 >= img.x) { P &= 0x3333333333333333; } else if (2 * x + 2 >= img.x) { P &= 0x7777777777777777; } if (y == 0) { P &= 0xFFF0FFF0FFF0FFF0; } if (2 * y + 1 >= img.y) { P &= 0x00FF00FF00FF00FF; } else if (2 * y + 2 >= img.y) { P &= 0x0FFF0FFF0FFF0FFF; } if (z == 0) { P &= 0xFFFFFFFFFFFF0000; } if (2 * z + 1 >= img.z) { P &= 0x00000000FFFFFFFF; } else if (2 * z + 2 >= img.z) { P &= 0x0000FFFFFFFFFFFF; } // P is now ready to be used to find neighbour blocks (or it should be) // P value avoids range errors unsigned int conn_bitmask = 0; if (P > 0) { block_labels[labels_index] = labels_index + 1; // Lower plane unsigned char * plane_data = img.data + img_index - (img.stepz / img.elem_size); if (HasBit(P, 0) && plane_data[0 - img.stepy - 1]) { SetBit(conn_bitmask, 0); } if ((HasBit(P, 1) && plane_data[0 - img.stepy]) || (HasBit(P, 2) && plane_data[0 - img.stepy + 1])) { SetBit(conn_bitmask, 1); } if (HasBit(P, 3) && plane_data[0 - img.stepy + 2]) { SetBit(conn_bitmask, 2); } if ((HasBit(P, 4) && plane_data[- 1]) || (HasBit(P, 8) && plane_data[img.stepy - 1])) { SetBit(conn_bitmask, 3); } if ((HasBit(P, 5) && plane_data[0]) || (HasBit(P, 6) && plane_data[1]) || (HasBit(P, 9) && plane_data[img.stepy]) || (HasBit(P, 10) && plane_data[img.stepy + 1])) { SetBit(conn_bitmask, 4); } if ((HasBit(P, 7) && plane_data[2]) || (HasBit(P, 11) && plane_data[img.stepy + 2])) { SetBit(conn_bitmask, 5); } if (HasBit(P, 12) && plane_data[2 * img.stepy - 1]) { SetBit(conn_bitmask, 6); } if ((HasBit(P, 13) && plane_data[2 * img.stepy]) || (HasBit(P, 14) && plane_data[2 * img.stepy + 1])) { SetBit(conn_bitmask, 7); } if (HasBit(P, 15) && plane_data[2 * img.stepy + 2]) { SetBit(conn_bitmask, 8); } // Current planes plane_data += img.stepz / img.elem_size; if ((HasBit(P, 16) && plane_data[0 - img.stepy - 1]) || (HasBit(P, 32) && plane_data[img.stepz - img.stepy - 1])) { SetBit(conn_bitmask, 9); } if ((HasBit(P, 17) && plane_data[0 - img.stepy]) || (HasBit(P, 18) && plane_data[0 - img.stepy + 1]) || (HasBit(P, 33) && plane_data[img.stepz - img.stepy]) || (HasBit(P, 34) && plane_data[img.stepz - img.stepy + 1])) { SetBit(conn_bitmask, 10); } if ((HasBit(P, 19) && plane_data[0 - img.stepy + 2]) || (HasBit(P, 35) && plane_data[img.stepz - img.stepy + 2])) { SetBit(conn_bitmask, 11); } if ((HasBit(P, 20) && plane_data[-1]) || (HasBit(P, 24) && plane_data[img.stepy - 1]) || (HasBit(P, 36) && plane_data[img.stepz - 1]) || (HasBit(P, 40) && plane_data[img.stepz + img.stepy - 1])) { SetBit(conn_bitmask, 12); } if ((HasBit(P, 23) && plane_data[2]) || (HasBit(P, 27) && plane_data[img.stepy + 2]) || (HasBit(P, 39) && plane_data[img.stepz + 2]) || (HasBit(P, 43) && plane_data[img.stepz + img.stepy + 2])) { SetBit(conn_bitmask, 14); } if ((HasBit(P, 28) && plane_data[2 * img.stepy - 1]) || (HasBit(P, 44) && plane_data[img.stepz + 2 * img.stepy - 1])) { SetBit(conn_bitmask, 15); } if ((HasBit(P, 29) && plane_data[2 * img.stepy]) || (HasBit(P, 30) && plane_data[2 * img.stepy + 1]) || (HasBit(P, 45) && plane_data[img.stepz + 2 * img.stepy]) || (HasBit(P, 46) && plane_data[img.stepz + 2 * img.stepy + 1])) { SetBit(conn_bitmask, 16); } if ((HasBit(P, 31) && plane_data[2 * img.stepy + 2]) || (HasBit(P, 47) && plane_data[img.stepz + 2 * img.stepy + 2])) { SetBit(conn_bitmask, 17); } // Upper plane plane_data += 2 * (img.stepz / img.elem_size); if (HasBit(P, 48) && plane_data[0 - img.stepy - 1]) { SetBit(conn_bitmask, 18); } if ((HasBit(P, 49) && plane_data[0 - img.stepy]) || (HasBit(P, 50) && plane_data[0 - img.stepy + 1])) { SetBit(conn_bitmask, 19); } if (HasBit(P, 51) && plane_data[0 - img.stepy + 2]) { SetBit(conn_bitmask, 20); } if ((HasBit(P, 52) && plane_data[-1]) || (HasBit(P, 56) && plane_data[img.stepy - 1])) { SetBit(conn_bitmask, 21); } if ((HasBit(P, 53) && plane_data[0]) || (HasBit(P, 54) && plane_data[1]) || (HasBit(P, 57) && plane_data[img.stepy]) || (HasBit(P, 58) && plane_data[img.stepy + 1])) { SetBit(conn_bitmask, 22); } if ((HasBit(P, 55) && plane_data[2]) || (HasBit(P, 59) && plane_data[img.stepy + 2])) { SetBit(conn_bitmask, 23); } if (HasBit(P, 60) && plane_data[2 * img.stepy - 1]) { SetBit(conn_bitmask, 24); } if ((HasBit(P, 61) && plane_data[2 * img.stepy]) || (HasBit(P, 62) && plane_data[2 * img.stepy + 1])) { SetBit(conn_bitmask, 25); } if (HasBit(P, 63) && plane_data[2 * img.stepy + 2]) { SetBit(conn_bitmask, 26); } } else { block_labels[labels_index] = 0; } block_conn[conn_index] = conn_bitmask; } } //__global__ void ExpandConnections(const cuda::PtrStepSzb connections, cuda::PtrStepSzb expansion) { // unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; // unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; // unsigned conn_index = row * (connections.step / connections.elem_size) + col; // unsigned exp_index = 3 * row * (expansion.step / expansion.elem_size) + 3 * col; // if (row < connections.rows && col < connections.cols) { // expansion[exp_index + (expansion.step / expansion.elem_size) + 1] = 2; // unsigned char neighbours = connections[conn_index]; // if (HasBit(neighbours, 0)) { // expansion[exp_index] = 1; // } // else { // expansion[exp_index] = 0; // } // if (HasBit(neighbours, 1)) { // expansion[exp_index + 1] = 1; // } // else { // expansion[exp_index + 1] = 0; // } // if (HasBit(neighbours, 2)) { // expansion[exp_index + 2] = 1; // } // else { // expansion[exp_index + 2] = 0; // } // if (HasBit(neighbours, 3)) { // expansion[exp_index + (expansion.step / expansion.elem_size)] = 1; // } // else { // expansion[exp_index + (expansion.step / expansion.elem_size)] = 0; // } // if (HasBit(neighbours, 4)) { // expansion[exp_index + (expansion.step / expansion.elem_size) + 2] = 1; // } // else { // expansion[exp_index + (expansion.step / expansion.elem_size) + 2] = 0; // } // if (HasBit(neighbours, 5)) { // expansion[exp_index + 2 * (expansion.step / expansion.elem_size)] = 1; // } // else { // expansion[exp_index + 2 * (expansion.step / expansion.elem_size)] = 0; // } // if (HasBit(neighbours, 6)) { // expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 1] = 1; // } // else { // expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 1] = 0; // } // if (HasBit(neighbours, 7)) { // expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 2] = 1; // } // else { // expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 2] = 0; // } // } //} __device__ unsigned int MinLabel(unsigned l1, unsigned l2) { if (l1 && l2) return min(l1, l2); else return l1; } __device__ unsigned int FindMinLabel(cuda::PtrStepSz3i labels, unsigned int neighbours, unsigned label, unsigned labels_index) { unsigned int min = label; for (char plane = -1; plane <= 1; plane++) { int * plane_data = labels.data + labels_index + plane * (labels.stepz / labels.elem_size); if (HasBit(neighbours, 0)) { min = MinLabel(min, plane_data[0 - (labels.stepy / labels.elem_size) - 1]); } if (HasBit(neighbours, 1)) { min = MinLabel(min, plane_data[0 - (labels.stepy / labels.elem_size)]); } if (HasBit(neighbours, 2)) { min = MinLabel(min, plane_data[0 - (labels.stepy / labels.elem_size) + 1]); } if (HasBit(neighbours, 3)) { min = MinLabel(min, plane_data[-1]); } if (plane && HasBit(neighbours, 4)) { min = MinLabel(min, plane_data[0]); } if (HasBit(neighbours, 5)) { min = MinLabel(min, plane_data[1]); } if (HasBit(neighbours, 6)) { min = MinLabel(min, plane_data[(labels.stepy / labels.elem_size) - 1]); } if (HasBit(neighbours, 7)) { min = MinLabel(min, plane_data[(labels.stepy / labels.elem_size)]); } if (HasBit(neighbours, 8)) { min = MinLabel(min, plane_data[(labels.stepy / labels.elem_size) + 1]); } neighbours >>= 9; } return min; } // Scan phase. // The pixel associated with current thread is given the minimum label of the neighbours. __global__ void Scan(cuda::PtrStepSz3i labels, cuda::PtrStepSz3i connections, char *changes) { unsigned x = blockIdx.x * BLOCK_X + threadIdx.x; unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y; unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z; unsigned conn_index = z * (connections.stepz / connections.elem_size) + y * (connections.stepy / connections.elem_size) + x; unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x; if (x < labels.x && y < labels.y && z < labels.z) { unsigned int neighbours = connections[conn_index]; unsigned label = labels[labels_index]; if (label) { unsigned min_label = FindMinLabel(labels, neighbours, label, labels_index); if (min_label < label) { labels[label - 1] = min(static_cast<unsigned int>(labels[label - 1]), min_label); *changes = 1; } } } } __global__ void Analyze(cuda::PtrStepSz3i labels) { unsigned x = blockIdx.x * BLOCK_X + threadIdx.x; unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y; unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z; unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x; if (x < labels.x && y < labels.y && z < labels.z) { unsigned int val = labels[labels_index]; if (val) { labels[labels_index] = Find(labels.data, labels_index) + 1; } } } // Final Labeling phase // Assigns every pixel of 2x2x2 blocks the block label __global__ void FinalLabeling(cuda::PtrStepSz3i block_labels, cuda::PtrStepSz3i labels, const cuda::PtrStepSz3b img) { unsigned x = blockIdx.x * BLOCK_X + threadIdx.x; unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y; unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z; unsigned blocks_index = z * (block_labels.stepz / block_labels.elem_size) + y * (block_labels.stepy / block_labels.elem_size) + x; unsigned labels_index = 2 * z * (labels.stepz / labels.elem_size) + 2 * y * (labels.stepy / labels.elem_size) + 2 * x; unsigned img_index = 2 * z * (img.stepz / img.elem_size) + 2 * y * (img.stepy / img.elem_size) + 2 * x; if (x < block_labels.x && y < block_labels.y && z < block_labels.z) { unsigned int label = block_labels[blocks_index]; // Current plane if (img[img_index]) { labels[labels_index] = label; } else { labels[labels_index] = 0; } if (2 * x + 1 < labels.x) { if (img[img_index + 1]) labels[labels_index + 1] = label; else { labels[labels_index + 1] = 0; } if (2 * y + 1 < labels.y) { if (img[img_index + img.stepy + 1]) labels[labels_index + (labels.stepy / labels.elem_size) + 1] = label; else { labels[labels_index + (labels.stepy / labels.elem_size) + 1] = 0; } } } if (2 * y + 1 < labels.y) { if (img[img_index + img.stepy]) labels[labels_index + (labels.stepy / labels.elem_size)] = label; else { labels[labels_index + (labels.stepy / labels.elem_size)] = 0; } } // Upper plane if (2 * z + 1 < labels.z) { if (img[img_index + img.stepz / img.elem_size]) labels[labels_index + labels.stepz / labels.elem_size] = label; else { labels[labels_index + labels.stepz / labels.elem_size] = 0; } if (2 * x + 1 < labels.x) { if (img[img_index + img.stepz / img.elem_size + 1]) labels[labels_index + labels.stepz / labels.elem_size + 1] = label; else { labels[labels_index + labels.stepz / labels.elem_size + 1] = 0; } if (2 * y + 1 < labels.y) { if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size + 1]) labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size) + 1] = label; else { labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size) + 1] = 0; } } } if (2 * y + 1 < labels.y) { if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size]) labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size)] = label; else { labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size)] = 0; } } } } } } class BE_3D : public GpuLabeling3D<CONN_26> { private: dim3 grid_size_; dim3 block_size_; char changes; char *d_changes; cuda::GpuMat3 d_connections_; cuda::GpuMat3 d_block_labels_; public: BE_3D() {} void PerformLabeling() { d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1); // Extra structures that I would gladly do without d_connections_.create((d_img_.x + 1) / 2, (d_img_.y + 1) / 2, (d_img_.z + 1) / 2, CV_32SC1); d_block_labels_.create((d_img_.x + 1) / 2, (d_img_.y + 1) / 2, (d_img_.z + 1) / 2, CV_32SC1); grid_size_ = dim3((d_block_labels_.x + BLOCK_X - 1) / BLOCK_X, (d_block_labels_.y + BLOCK_Y - 1) / BLOCK_Y, (d_block_labels_.z + BLOCK_Z - 1) / BLOCK_Z); block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z); cudaMalloc(&d_changes, sizeof(char)); Init << <grid_size_, block_size_ >> > (d_img_, d_connections_, d_block_labels_); //Mat init_labels; //d_block_labels_.download(init_labels); //::NormalizeLabels(init_labels); //Mat img_out; //ColorLabels(init_labels, img_out); //volwrite("C:\\Users\\Stefano\\Desktop\\debug\\init_labels", img_out); while (true) { changes = 0; cudaMemcpy(d_changes, &changes, sizeof(char), cudaMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_block_labels_, d_connections_, d_changes); cudaMemcpy(&changes, d_changes, sizeof(char), cudaMemcpyDeviceToHost); if (!changes) break; Analyze << <grid_size_, block_size_ >> > (d_block_labels_); } //Mat block_labels; //d_block_labels_.download(block_labels); //::NormalizeLabels(block_labels); //ColorLabels(block_labels, img_out); //volwrite("C:\\Users\\Stefano\\Desktop\\debug\\block_labels", img_out); FinalLabeling << <grid_size_, block_size_ >> > (d_block_labels_, d_img_labels_, d_img_); //d_img_labels_.download(img_labels_); cudaFree(d_changes); d_connections_.release(); d_block_labels_.release(); cudaDeviceSynchronize(); //d_img_labels_.download(img_labels_); //Mat errors; //bool correct = CheckLabeledVolume(img_, img_labels_, errors); //volwrite("C:\\Users\\Stefano\\Desktop\\debug\\BE_errors", errors); } private: double Alloc() { perf_.start(); d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1); d_connections_.create((d_img_.x + 1) / 2, (d_img_.y + 1) / 2, (d_img_.z + 1) / 2, CV_32SC1); d_block_labels_.create((d_img_.x + 1) / 2, (d_img_.y + 1) / 2, (d_img_.z + 1) / 2, CV_32SC1); cudaMalloc(&d_changes, sizeof(char)); perf_.stop(); return perf_.last(); } double Dealloc() { perf_.start(); cudaFree(d_changes); d_connections_.release(); d_block_labels_.release(); perf_.stop(); return perf_.last(); } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { grid_size_ = dim3((d_block_labels_.x + BLOCK_X - 1) / BLOCK_X, (d_block_labels_.y + BLOCK_Y - 1) / BLOCK_Y, (d_block_labels_.z + BLOCK_Z - 1) / BLOCK_Z); block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z); Init << <grid_size_, block_size_ >> > (d_img_, d_connections_, d_block_labels_); // La Init esplode // Controlla che cosa contiene connections //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //assert(cudaDeviceSynchronize() == cudaSuccess); // Immagine di debug della inizializzazione //Mat1i init_labels; //d_block_labels_.download(init_labels); while (true) { changes = 0; cudaMemcpy(d_changes, &changes, sizeof(char), cudaMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_block_labels_, d_connections_, d_changes); cudaMemcpy(&changes, d_changes, sizeof(char), cudaMemcpyDeviceToHost); if (!changes) break; Analyze << <grid_size_, block_size_ >> > (d_block_labels_); } // Immagine di debug delle label dei blocchi //Mat1i block_labels; //d_block_labels_.download(block_labels); FinalLabeling << <grid_size_, block_size_ >> > (d_block_labels_, d_img_labels_, d_img_); } public: void PerformLabelingWithSteps() { double alloc_timing = Alloc(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); double dealloc_timing = Dealloc(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(BE_3D);
cc5b06d34f980cf665b1f5026f98238e706f4ffa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gtest/gtest.h" #include "nnsearch2.h" #include "defaults.h" #include "spdlog/spdlog.h" #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <iostream> #include <vector> #include <string> #include <limits> #include <random> namespace { class NearestNeighborSearchCudaKernelTest : public ::testing::Test { protected: std::shared_ptr<spdlog::logger> logger_; NearestNeighborSearchCudaKernelTest() { logger_ = spdlog::get("console"); if (! logger_) { logger_ = spdlog::stdout_logger_mt("console"); } } virtual ~NearestNeighborSearchCudaKernelTest() { } template <typename T> void print_matrix_debug( const size_t rows, const size_t row_start, const size_t col_start, const size_t drows, const size_t dcols, T& val) { std::cout << " "; for (size_t j = 0; j < dcols; j++) { std::cout << std::setw(6) << col_start + j << ","; } std::cout << std::endl; for (size_t i = 0; i < drows; i++) { std::cout << std::setw(6) << i << "| "; for (size_t j = 0; j < dcols; j++) { size_t idx = row_start + i + (col_start + j) * rows; std::cout << std::setw(6) << val[idx] << ","; } std::cout << std::endl; } } }; TEST_F(NearestNeighborSearchCudaKernelTest, SumSquared1Test) { unsigned int m = 6; unsigned int k = 3; thrust::device_vector<double> x(m * k); thrust::device_vector<double> x2(m); thrust::sequence(x.begin(), x.end()); hipLaunchKernelGGL(( cudautils::sum_squared), dim3(1), dim3(16), 0, 0, m, k, thrust::raw_pointer_cast(x.data()), thrust::raw_pointer_cast(x2.data())); thrust::host_vector<double> h_x(x); thrust::host_vector<double> h_x2(x2); for (unsigned int i = 0; i < m; i++) { double sum = 0.0; for (unsigned int j = 0; j < k; j++) { sum += h_x[i + j * m] * h_x[i + j * m]; } ASSERT_EQ(sum, h_x2[i]); } } TEST_F(NearestNeighborSearchCudaKernelTest, SumSquared2Test) { unsigned int m = 6; unsigned int k = 30; thrust::device_vector<double> x(m * k); thrust::device_vector<double> x2(m); thrust::sequence(x.begin(), x.end()); hipLaunchKernelGGL(( cudautils::sum_squared), dim3(2), dim3(16), 0, 0, m, k, thrust::raw_pointer_cast(x.data()), thrust::raw_pointer_cast(x2.data())); thrust::host_vector<double> h_x(x); thrust::host_vector<double> h_x2(x2); for (unsigned int i = 0; i < m; i++) { double sum = 0.0; for (unsigned int j = 0; j < k; j++) { sum += h_x[i + j * m] * h_x[i + j * m]; } ASSERT_EQ(sum, h_x2[i]); } } TEST_F(NearestNeighborSearchCudaKernelTest, CalcSquaredNorm1Test) { unsigned int m = 4; unsigned int n = 6; unsigned int k = 3; thrust::device_vector<double> x(m * k); thrust::device_vector<double> x2(m); thrust::device_vector<double> y(n * k); thrust::device_vector<double> y2(n); thrust::device_vector<double> r(m * n); thrust::sequence(x.begin(), x.end()); thrust::sequence(y.begin(), y.end(), 1); unsigned int m_blocks = cudautils::get_num_blocks(m, 16); unsigned int n_blocks = cudautils::get_num_blocks(n, 16); hipLaunchKernelGGL(( cudautils::sum_squared), dim3(m_blocks), dim3(16), 0, 0, m, k, thrust::raw_pointer_cast(x.data()), thrust::raw_pointer_cast(x2.data())); hipLaunchKernelGGL(( cudautils::sum_squared), dim3(n_blocks), dim3(16), 0, 0, n, k, thrust::raw_pointer_cast(y.data()), thrust::raw_pointer_cast(y2.data())); m_blocks = cudautils::get_num_blocks(m, 32); n_blocks = cudautils::get_num_blocks(n, 32); dim3 dim_blocks(n_blocks, m_blocks, 1); dim3 dim_threads(32, 32, 1); hipLaunchKernelGGL(( cudautils::calc_squared_norm), dim3(dim_blocks), dim3(dim_threads), 0, 0, m, n, k, thrust::raw_pointer_cast(x.data()), thrust::raw_pointer_cast(y.data()), thrust::raw_pointer_cast(x2.data()), thrust::raw_pointer_cast(y2.data()), thrust::raw_pointer_cast(r.data())); thrust::host_vector<double> h_x(x); thrust::host_vector<double> h_y(y); thrust::host_vector<double> h_r(r); for (unsigned int j = 0; j < n; j++) { for (unsigned int i = 0; i < m; i++) { double sum = 0.0; for (unsigned int w = 0; w < k; w++) { double diff = h_x[i + w * m] - h_y[j + w * n]; sum += diff * diff; } ASSERT_EQ(sum, h_r[j + i * n]); } } } TEST_F(NearestNeighborSearchCudaKernelTest, CalcSquaredNorm2Test) { unsigned int m = 40; unsigned int n = 6; unsigned int k = 3; thrust::device_vector<double> x(m * k); thrust::device_vector<double> x2(m); thrust::device_vector<double> y(n * k); thrust::device_vector<double> y2(n); thrust::device_vector<double> r(m * n); thrust::sequence(x.begin(), x.end()); thrust::sequence(y.begin(), y.end(), 1); unsigned int m_blocks = cudautils::get_num_blocks(m, 16); unsigned int n_blocks = cudautils::get_num_blocks(n, 16); hipLaunchKernelGGL(( cudautils::sum_squared), dim3(m_blocks), dim3(16), 0, 0, m, k, thrust::raw_pointer_cast(x.data()), thrust::raw_pointer_cast(x2.data())); hipLaunchKernelGGL(( cudautils::sum_squared), dim3(n_blocks), dim3(16), 0, 0, n, k, thrust::raw_pointer_cast(y.data()), thrust::raw_pointer_cast(y2.data())); m_blocks = cudautils::get_num_blocks(m, 32); n_blocks = cudautils::get_num_blocks(n, 32); dim3 dim_blocks(n_blocks, m_blocks, 1); dim3 dim_threads(32, 32, 1); hipLaunchKernelGGL(( cudautils::calc_squared_norm), dim3(dim_blocks), dim3(dim_threads), 0, 0, m, n, k, thrust::raw_pointer_cast(x.data()), thrust::raw_pointer_cast(y.data()), thrust::raw_pointer_cast(x2.data()), thrust::raw_pointer_cast(y2.data()), thrust::raw_pointer_cast(r.data())); thrust::host_vector<double> h_x(x); thrust::host_vector<double> h_y(y); thrust::host_vector<double> h_r(r); for (unsigned int j = 0; j < n; j++) { for (unsigned int i = 0; i < m; i++) { double sum = 0.0; for (unsigned int w = 0; w < k; w++) { double diff = h_x[i + w * m] - h_y[j + w * n]; sum += diff * diff; } ASSERT_EQ(sum, h_r[j + i * n]); } } } TEST_F(NearestNeighborSearchCudaKernelTest, CalcSquaredNorm3Test) { unsigned int m = 40; unsigned int n = 60; unsigned int k = 3; thrust::device_vector<double> x(m * k); thrust::device_vector<double> x2(m); thrust::device_vector<double> y(n * k); thrust::device_vector<double> y2(n); thrust::device_vector<double> r(m * n); thrust::sequence(x.begin(), x.end()); thrust::sequence(y.begin(), y.end(), 1); unsigned int m_blocks = cudautils::get_num_blocks(m, 16); unsigned int n_blocks = cudautils::get_num_blocks(n, 16); hipLaunchKernelGGL(( cudautils::sum_squared), dim3(m_blocks), dim3(16), 0, 0, m, k, thrust::raw_pointer_cast(x.data()), thrust::raw_pointer_cast(x2.data())); hipLaunchKernelGGL(( cudautils::sum_squared), dim3(n_blocks), dim3(16), 0, 0, n, k, thrust::raw_pointer_cast(y.data()), thrust::raw_pointer_cast(y2.data())); m_blocks = cudautils::get_num_blocks(m, 32); n_blocks = cudautils::get_num_blocks(n, 32); dim3 dim_blocks(n_blocks, m_blocks, 1); dim3 dim_threads(32, 32, 1); hipLaunchKernelGGL(( cudautils::calc_squared_norm), dim3(dim_blocks), dim3(dim_threads), 0, 0, m, n, k, thrust::raw_pointer_cast(x.data()), thrust::raw_pointer_cast(y.data()), thrust::raw_pointer_cast(x2.data()), thrust::raw_pointer_cast(y2.data()), thrust::raw_pointer_cast(r.data())); thrust::host_vector<double> h_x(x); thrust::host_vector<double> h_y(y); thrust::host_vector<double> h_r(r); for (unsigned int j = 0; j < n; j++) { for (unsigned int i = 0; i < m; i++) { double sum = 0.0; for (unsigned int w = 0; w < k; w++) { double diff = h_x[i + w * m] - h_y[j + w * n]; sum += diff * diff; } ASSERT_EQ(sum, h_r[j + i * n]); } } } TEST_F(NearestNeighborSearchCudaKernelTest, GetTwoMins1Test) { // check if it is correct of choosing two minimum valus from a set of four values unsigned int m = 12; unsigned int n = 4; thrust::host_vector<double> h_r(m * n); std::default_random_engine generator; std::uniform_real_distribution<double> distr(0.2, 1.0); for (unsigned int i = 0; i < m * n; i++) { h_r[i] = distr(generator); } // [a, b, c, d] --> [a, b] < [c, d] // min: a, b; a < b -> no movement h_r[0+0*n] = 0.10;//a h_r[1+0*n] = 0.15;//b // min: a, b; a > b -> no movement h_r[0+1*n] = 0.15;//a h_r[1+1*n] = 0.10;//b // min: a, c; a < c h_r[0+2*n] = 0.10;//a h_r[2+2*n] = 0.15;//c // min: a, c; a > c h_r[0+3*n] = 0.15;//a h_r[2+3*n] = 0.10;//c // min: a, d; a < d h_r[0+4*n] = 0.10;//a h_r[3+4*n] = 0.15;//d // min: a, d; a > d h_r[0+5*n] = 0.15;//a h_r[3+5*n] = 0.10;//d // min: b, c; b < c h_r[1+6*n] = 0.10;//b h_r[2+6*n] = 0.15;//c // min: b, c; b > c h_r[1+7*n] = 0.15;//b h_r[2+7*n] = 0.10;//c // min: b, d; b < d h_r[1+8*n] = 0.10;//b h_r[3+8*n] = 0.15;//d // min: b, d; b > d h_r[1+9*n] = 0.15;//b h_r[3+9*n] = 0.10;//d // min: c, d; c < d h_r[2+10*n] = 0.10;//c h_r[3+10*n] = 0.15;//d // min: c, d; c > d h_r[2+11*n] = 0.15;//c h_r[3+11*n] = 0.10;//d unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int n_blocks = cudautils::get_num_blocks(n, n_block_size); unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 dim_blocks(n_blocks, num_m_blocks_y, num_m_blocks_z); thrust::device_vector<double> r(h_r); thrust::device_vector<double> val(2 * m); thrust::device_vector<unsigned int> idx(2 * m); hipLaunchKernelGGL(( cudautils::get_two_mins), dim3(dim_blocks), dim3(defaults::num_threads_in_twotops_func), 0, 0, n, 0, thrust::raw_pointer_cast(r.data()), thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_val(val); thrust::host_vector<double> h_idx(idx); ASSERT_EQ(0.10, h_val[0+0*2]); ASSERT_EQ(0.15, h_val[1+0*2]); ASSERT_EQ(0.15, h_val[0+1*2]); ASSERT_EQ(0.10, h_val[1+1*2]); ASSERT_EQ(0.10, h_val[0+2*2]); ASSERT_EQ(0.15, h_val[1+2*2]); ASSERT_EQ(0.10, h_val[0+3*2]); ASSERT_EQ(0.15, h_val[1+3*2]); ASSERT_EQ(0.10, h_val[0+4*2]); ASSERT_EQ(0.15, h_val[1+4*2]); ASSERT_EQ(0.15, h_val[0+5*2]); ASSERT_EQ(0.10, h_val[1+5*2]); ASSERT_EQ(0.15, h_val[0+6*2]); ASSERT_EQ(0.10, h_val[1+6*2]); ASSERT_EQ(0.10, h_val[0+7*2]); ASSERT_EQ(0.15, h_val[1+7*2]); ASSERT_EQ(0.15, h_val[0+8*2]); ASSERT_EQ(0.10, h_val[1+8*2]); ASSERT_EQ(0.15, h_val[0+9*2]); ASSERT_EQ(0.10, h_val[1+9*2]); ASSERT_EQ(0.10, h_val[0+10*2]); ASSERT_EQ(0.15, h_val[1+10*2]); ASSERT_EQ(0.15, h_val[0+11*2]); ASSERT_EQ(0.10, h_val[1+11*2]); ASSERT_EQ(0, h_idx[0+0*2]); ASSERT_EQ(1, h_idx[1+0*2]); ASSERT_EQ(0, h_idx[0+1*2]); ASSERT_EQ(1, h_idx[1+1*2]); ASSERT_EQ(0, h_idx[0+2*2]); ASSERT_EQ(2, h_idx[1+2*2]); ASSERT_EQ(2, h_idx[0+3*2]); ASSERT_EQ(0, h_idx[1+3*2]); ASSERT_EQ(0, h_idx[0+4*2]); ASSERT_EQ(3, h_idx[1+4*2]); ASSERT_EQ(0, h_idx[0+5*2]); ASSERT_EQ(3, h_idx[1+5*2]); ASSERT_EQ(2, h_idx[0+6*2]); ASSERT_EQ(1, h_idx[1+6*2]); ASSERT_EQ(2, h_idx[0+7*2]); ASSERT_EQ(1, h_idx[1+7*2]); ASSERT_EQ(3, h_idx[0+8*2]); ASSERT_EQ(1, h_idx[1+8*2]); ASSERT_EQ(1, h_idx[0+9*2]); ASSERT_EQ(3, h_idx[1+9*2]); ASSERT_EQ(2, h_idx[0+10*2]); ASSERT_EQ(3, h_idx[1+10*2]); ASSERT_EQ(2, h_idx[0+11*2]); ASSERT_EQ(3, h_idx[1+11*2]); } TEST_F(NearestNeighborSearchCudaKernelTest, GetTwoMins2Test) { // check if it is correct using n; 1*32 < n < 2*32 in a case of within one set of threads unsigned int n = 40; unsigned int m = n * (n - 1) / 2; thrust::host_vector<double> h_r(m * n); std::default_random_engine generator; std::uniform_real_distribution<double> distr(0.2, 1.0); for (unsigned int i = 0; i < m * n; i++) { h_r[i] = distr(generator); } unsigned int count = 0; for (unsigned int i = 0; i < n - 1; i++) { for (unsigned int j = i + 1; j < n; j++) { h_r[i+count*n] = 0.10; h_r[j+count*n] = 0.15; count++; } } unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int n_blocks = cudautils::get_num_blocks(n, n_block_size); unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 dim_blocks(n_blocks, num_m_blocks_y, num_m_blocks_z); thrust::device_vector<double> r(h_r); thrust::device_vector<double> val(2 * m); thrust::device_vector<unsigned int> idx(2 * m); hipLaunchKernelGGL(( cudautils::get_two_mins), dim3(dim_blocks), dim3(defaults::num_threads_in_twotops_func), 0, 0, n, 0, thrust::raw_pointer_cast(r.data()), thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_val(val); thrust::host_vector<double> h_idx(idx); count = 0; for (unsigned int i = 0; i < n - 1; i++) { for (unsigned int j = i + 1; j < n; j++) { double val1 = h_val[0+count*2]; double val2 = h_val[1+count*2]; unsigned int idx1 = h_idx[0+count*2]; unsigned int idx2 = h_idx[1+count*2]; ASSERT_TRUE( (val1 == 0.10 && val2 == 0.15 && idx1 == i && idx2 == j) || (val2 == 0.10 && val1 == 0.15 && idx2 == i && idx1 == j)); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, GetTwoMins3Test) { // check if it is correct using n; 3*32 < n < 4*32 in a case of multi sets of threads but within one block unsigned int n = 120; unsigned int m = n * (n - 1) / 2; thrust::host_vector<double> h_r(m * n); std::default_random_engine generator; std::uniform_real_distribution<double> distr(0.2, 1.0); for (unsigned int i = 0; i < m * n; i++) { h_r[i] = distr(generator); } unsigned int count = 0; for (unsigned int i = 0; i < n - 1; i++) { for (unsigned int j = i + 1; j < n; j++) { h_r[i+count*n] = 0.10; h_r[j+count*n] = 0.15; count++; } } unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int n_blocks = cudautils::get_num_blocks(n, n_block_size); unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 dim_blocks(n_blocks, num_m_blocks_y, num_m_blocks_z); thrust::device_vector<double> r(h_r); thrust::device_vector<double> val(2 * m); thrust::device_vector<unsigned int> idx(2 * m); hipLaunchKernelGGL(( cudautils::get_two_mins), dim3(dim_blocks), dim3(defaults::num_threads_in_twotops_func), 0, 0, n, 0, thrust::raw_pointer_cast(r.data()), thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_val(val); thrust::host_vector<double> h_idx(idx); count = 0; for (unsigned int i = 0; i < n - 1; i++) { for (unsigned int j = i + 1; j < n; j++) { double val1 = h_val[0+count*2]; double val2 = h_val[1+count*2]; unsigned int idx1 = h_idx[0+count*2]; unsigned int idx2 = h_idx[1+count*2]; ASSERT_TRUE( (val1 == 0.10 && val2 == 0.15 && idx1 == i && idx2 == j) || (val2 == 0.10 && val1 == 0.15 && idx2 == i && idx1 == j)); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, GetTwoMins4Test) { // check if it is correct using n > 2*32 in a case of multi blocks unsigned int n = 2000; unsigned int m = 2000; thrust::host_vector<double> h_r(m * n); std::default_random_engine generator; std::uniform_real_distribution<double> distr(0.2, 1.0); for (unsigned int i = 0; i < m * n; i++) { h_r[i] = distr(generator); } unsigned int count = 0; for (unsigned int i = 0; i < n - 1; i+=10) { for (unsigned int j = i + 1; j < n; j+=100) { h_r[i+count*n] = 0.10; h_r[j+count*n] = 0.15; count++; if (count == m) break; } if (count == m) break; } unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int n_blocks = cudautils::get_num_blocks(n, n_block_size); unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 dim_blocks(n_blocks, num_m_blocks_y, num_m_blocks_z); thrust::device_vector<double> r(h_r); thrust::device_vector<double> val(2 * n_blocks * m); thrust::device_vector<unsigned int> idx(2 * n_blocks * m); hipLaunchKernelGGL(( cudautils::get_two_mins), dim3(dim_blocks), dim3(defaults::num_threads_in_twotops_func), 0, 0, n, 0, thrust::raw_pointer_cast(r.data()), thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_val(val); thrust::host_vector<double> h_idx(idx); count = 0; for (unsigned int i = 0; i < n - 1; i+=10) { for (unsigned int j = i + 1; j < n; j+=100) { double val1 = std::numeric_limits<double>::max(); double val2 = std::numeric_limits<double>::max(); unsigned int idx1, idx2; for (unsigned int k = 0; k < n_blocks; k++) { double val_tmp1 = h_val[0+k*2+count*2*n_blocks]; double val_tmp2 = h_val[1+k*2+count*2*n_blocks]; unsigned int idx_tmp1 = h_idx[0+k*2+count*2*n_blocks]; unsigned int idx_tmp2 = h_idx[1+k*2+count*2*n_blocks]; if (val_tmp1 < val1) { val2 = val1; idx2 = idx1; val1 = val_tmp1; idx1 = idx_tmp1; } else if (val_tmp1 < val2) { val2 = val_tmp1; idx2 = idx_tmp1; } if (val_tmp2 < val1) { val2 = val1; idx2 = idx1; val1 = val_tmp2; idx1 = idx_tmp2; } else if (val_tmp2 < val2) { val2 = val_tmp2; idx2 = idx_tmp2; } } ASSERT_TRUE( (val1 == 0.10 && val2 == 0.15 && idx1 == i && idx2 == j) || (val2 == 0.10 && val1 == 0.15 && idx2 == i && idx1 == j)); count++; if (count == m) break; } if (count == m) break; } } TEST_F(NearestNeighborSearchCudaKernelTest, GetTwoMinsWithIndex1Test) { unsigned int m = 12; thrust::host_vector<double> h_val(4 * m); thrust::host_vector<unsigned int> h_idx(4 * m); std::default_random_engine generator; std::uniform_real_distribution<double> distr(0.2, 1.0); for (unsigned int i = 0; i < 4 * m; i++) { h_val[i] = distr(generator); h_idx[i] = 100; } // [a, b, c, d] --> [a, b] < [c, d] // min: a, b; a < b -> no movement h_val[0+0*4] = 0.10;//a h_val[1+0*4] = 0.15;//b h_idx[0+0*4] = 0; h_idx[1+0*4] = 1; // min: a, b; a > b -> no movement h_val[0+1*4] = 0.15;//a h_val[1+1*4] = 0.10;//b h_idx[0+1*4] = 0; h_idx[1+1*4] = 1; // min: a, c; a < c h_val[0+2*4] = 0.10;//a h_val[2+2*4] = 0.15;//c h_idx[0+2*4] = 0; h_idx[2+2*4] = 2; // min: a, c; a > c h_val[0+3*4] = 0.15;//a h_val[2+3*4] = 0.10;//c h_idx[0+3*4] = 0; h_idx[2+3*4] = 2; // min: a, d; a < d h_val[0+4*4] = 0.10;//a h_val[3+4*4] = 0.15;//d h_idx[0+4*4] = 0; h_idx[3+4*4] = 3; // min: a, d; a > d h_val[0+5*4] = 0.15;//a h_val[3+5*4] = 0.10;//d h_idx[0+5*4] = 0; h_idx[3+5*4] = 3; // min: b, c; b < c h_val[1+6*4] = 0.10;//b h_val[2+6*4] = 0.15;//c h_idx[1+6*4] = 1; h_idx[2+6*4] = 2; // min: b, c; b > c h_val[1+7*4] = 0.15;//b h_val[2+7*4] = 0.10;//c h_idx[1+7*4] = 1; h_idx[2+7*4] = 2; // min: b, d; b < d h_val[1+8*4] = 0.10;//b h_val[3+8*4] = 0.15;//d h_idx[1+8*4] = 1; h_idx[3+8*4] = 3; // min: b, d; b > d h_val[1+9*4] = 0.15;//b h_val[3+9*4] = 0.10;//d h_idx[1+9*4] = 1; h_idx[3+9*4] = 3; // min: c, d; c < d h_val[2+10*4] = 0.10;//c h_val[3+10*4] = 0.15;//d h_idx[2+10*4] = 2; h_idx[3+10*4] = 3; // min: c, d; c > d h_val[2+11*4] = 0.15;//c h_val[3+11*4] = 0.10;//d h_idx[2+11*4] = 2; h_idx[3+11*4] = 3; unsigned int n_blocks = 4; unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 dim_blocks(n_blocks, num_m_blocks_y, num_m_blocks_z); thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); hipLaunchKernelGGL(( cudautils::get_two_mins_with_index), dim3(dim_blocks), dim3(defaults::num_threads_in_twotops_func), 0, 0, 4, n_blocks, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); ASSERT_EQ(0.10, h_ret_val[0+0*4]); ASSERT_EQ(0.15, h_ret_val[1+0*4]); ASSERT_EQ(0.15, h_ret_val[0+1*4]); ASSERT_EQ(0.10, h_ret_val[1+1*4]); ASSERT_EQ(0.10, h_ret_val[0+2*4]); ASSERT_EQ(0.15, h_ret_val[1+2*4]); ASSERT_EQ(0.10, h_ret_val[0+3*4]); ASSERT_EQ(0.15, h_ret_val[1+3*4]); ASSERT_EQ(0.10, h_ret_val[0+4*4]); ASSERT_EQ(0.15, h_ret_val[1+4*4]); ASSERT_EQ(0.15, h_ret_val[0+5*4]); ASSERT_EQ(0.10, h_ret_val[1+5*4]); ASSERT_EQ(0.15, h_ret_val[0+6*4]); ASSERT_EQ(0.10, h_ret_val[1+6*4]); ASSERT_EQ(0.10, h_ret_val[0+7*4]); ASSERT_EQ(0.15, h_ret_val[1+7*4]); ASSERT_EQ(0.15, h_ret_val[0+8*4]); ASSERT_EQ(0.10, h_ret_val[1+8*4]); ASSERT_EQ(0.15, h_ret_val[0+9*4]); ASSERT_EQ(0.10, h_ret_val[1+9*4]); ASSERT_EQ(0.10, h_ret_val[0+10*4]); ASSERT_EQ(0.15, h_ret_val[1+10*4]); ASSERT_EQ(0.15, h_ret_val[0+11*4]); ASSERT_EQ(0.10, h_ret_val[1+11*4]); ASSERT_EQ(0, h_ret_idx[0+0*4]); ASSERT_EQ(1, h_ret_idx[1+0*4]); ASSERT_EQ(0, h_ret_idx[0+1*4]); ASSERT_EQ(1, h_ret_idx[1+1*4]); ASSERT_EQ(0, h_ret_idx[0+2*4]); ASSERT_EQ(2, h_ret_idx[1+2*4]); ASSERT_EQ(2, h_ret_idx[0+3*4]); ASSERT_EQ(0, h_ret_idx[1+3*4]); ASSERT_EQ(0, h_ret_idx[0+4*4]); ASSERT_EQ(3, h_ret_idx[1+4*4]); ASSERT_EQ(0, h_ret_idx[0+5*4]); ASSERT_EQ(3, h_ret_idx[1+5*4]); ASSERT_EQ(2, h_ret_idx[0+6*4]); ASSERT_EQ(1, h_ret_idx[1+6*4]); ASSERT_EQ(2, h_ret_idx[0+7*4]); ASSERT_EQ(1, h_ret_idx[1+7*4]); ASSERT_EQ(3, h_ret_idx[0+8*4]); ASSERT_EQ(1, h_ret_idx[1+8*4]); ASSERT_EQ(1, h_ret_idx[0+9*4]); ASSERT_EQ(3, h_ret_idx[1+9*4]); ASSERT_EQ(2, h_ret_idx[0+10*4]); ASSERT_EQ(3, h_ret_idx[1+10*4]); ASSERT_EQ(2, h_ret_idx[0+11*4]); ASSERT_EQ(3, h_ret_idx[1+11*4]); } TEST_F(NearestNeighborSearchCudaKernelTest, GetTwoMinsWithIndex2Test) { unsigned int n = 40; unsigned int m = n * (n - 1) / 2; thrust::host_vector<double> h_val(n * m); thrust::host_vector<unsigned int> h_idx(n * m); std::default_random_engine generator; std::uniform_real_distribution<double> distr(0.2, 1.0); for (unsigned int i = 0; i < n * m; i++) { h_val[i] = distr(generator); h_idx[i] = 100; } unsigned int count = 0; for (unsigned int i = 0; i < n - 1; i++) { for (unsigned int j = i + 1; j < n; j++) { h_val[i+count*n] = 0.10; h_val[j+count*n] = 0.15; h_idx[i+count*n] = i; h_idx[j+count*n] = j; count++; } } unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int n_blocks = cudautils::get_num_blocks(n, n_block_size); unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 dim_blocks(n_blocks, num_m_blocks_y, num_m_blocks_z); thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); hipLaunchKernelGGL(( cudautils::get_two_mins_with_index), dim3(dim_blocks), dim3(defaults::num_threads_in_twotops_func), 0, 0, n, n, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); count = 0; for (unsigned int i = 0; i < n - 1; i++) { for (unsigned int j = i + 1; j < n; j++) { double val1 = h_ret_val[0+count*n]; double val2 = h_ret_val[1+count*n]; unsigned int idx1 = h_ret_idx[0+count*n]; unsigned int idx2 = h_ret_idx[1+count*n]; ASSERT_TRUE( (val1 == 0.10 && val2 == 0.15 && idx1 == i && idx2 == j) || (val2 == 0.10 && val1 == 0.15 && idx2 == i && idx1 == j)); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, GetTwoMinsWithIndex3Test) { unsigned int n = 120; unsigned int m = n * (n - 1) / 2; thrust::host_vector<double> h_val(n * m); thrust::host_vector<unsigned int> h_idx(n * m); std::default_random_engine generator; std::uniform_real_distribution<double> distr(0.2, 1.0); for (unsigned int i = 0; i < n * m; i++) { h_val[i] = distr(generator); h_idx[i] = std::numeric_limits<unsigned int>::max(); } unsigned int count = 0; for (unsigned int i = 0; i < n - 1; i++) { for (unsigned int j = i + 1; j < n; j++) { h_val[i+count*n] = 0.10; h_val[j+count*n] = 0.15; h_idx[i+count*n] = i; h_idx[j+count*n] = j; count++; } } unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int n_blocks = cudautils::get_num_blocks(n, n_block_size); unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 dim_blocks(n_blocks, num_m_blocks_y, num_m_blocks_z); thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); hipLaunchKernelGGL(( cudautils::get_two_mins_with_index), dim3(dim_blocks), dim3(defaults::num_threads_in_twotops_func), 0, 0, n, n, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); count = 0; for (unsigned int i = 0; i < n - 1; i++) { for (unsigned int j = i + 1; j < n; j++) { double val1 = h_ret_val[0+count*n]; double val2 = h_ret_val[1+count*n]; unsigned int idx1 = h_ret_idx[0+count*n]; unsigned int idx2 = h_ret_idx[1+count*n]; ASSERT_TRUE( (val1 == 0.10 && val2 == 0.15 && idx1 == i && idx2 == j) || (val2 == 0.10 && val1 == 0.15 && idx2 == i && idx1 == j)); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, GetTwoMinsWithIndex4Test) { unsigned int n = 2000; unsigned int m = 2000; thrust::host_vector<double> h_val(n * m); thrust::host_vector<unsigned int> h_idx(n * m); std::default_random_engine generator; std::uniform_real_distribution<double> distr(0.2, 1.0); for (unsigned int i = 0; i < n * m; i++) { h_val[i] = distr(generator); h_idx[i] = std::numeric_limits<unsigned int>::max(); } unsigned int count = 0; for (unsigned int i = 0; i < n - 1; i+=10) { for (unsigned int j = i + 1; j < n; j+=100) { h_val[i+count*n] = 0.10; h_val[j+count*n] = 0.15; h_idx[i+count*n] = i; h_idx[j+count*n] = j; count++; if (count == m) break; } if (count == m) break; } unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int n_blocks = cudautils::get_num_blocks(n, n_block_size); unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 dim_blocks(n_blocks, num_m_blocks_y, num_m_blocks_z); thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); hipLaunchKernelGGL(( cudautils::get_two_mins_with_index), dim3(dim_blocks), dim3(defaults::num_threads_in_twotops_func), 0, 0, n, n, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); count = 0; for (unsigned int i = 0; i < n - 1; i+=10) { for (unsigned int j = i + 1; j < n; j+=100) { double val1 = std::numeric_limits<double>::max(); double val2 = std::numeric_limits<double>::max(); unsigned int idx1, idx2; for (unsigned int k = 0; k < n_blocks; k++) { double val_tmp1 = h_ret_val[0+k*n_block_size+count*n]; double val_tmp2 = h_ret_val[1+k*n_block_size+count*n]; unsigned int idx_tmp1 = h_ret_idx[0+k*n_block_size+count*n]; unsigned int idx_tmp2 = h_ret_idx[1+k*n_block_size+count*n]; if (val_tmp1 < val1) { val2 = val1; idx2 = idx1; val1 = val_tmp1; idx1 = idx_tmp1; } else if (val_tmp1 < val2) { val2 = val_tmp1; idx2 = idx_tmp1; } if (val_tmp2 < val1) { val2 = val1; idx2 = idx1; val1 = val_tmp2; idx1 = idx_tmp2; } else if (val_tmp2 < val2) { val2 = val_tmp2; idx2 = idx_tmp2; } } ASSERT_TRUE( (val1 == 0.10 && val2 == 0.15 && idx1 == i && idx2 == j) || (val2 == 0.10 && val1 == 0.15 && idx2 == i && idx1 == j)); count++; if (count == m) break; } if (count == m) break; } } TEST_F(NearestNeighborSearchCudaKernelTest, GatherValuesOnBlocks1Test) { unsigned int m = 5; unsigned int stride = 12; unsigned int n_size = 12; unsigned int block_size = 2; thrust::host_vector<double> h_val(stride * m); thrust::host_vector<unsigned int> h_idx(stride * m); for (unsigned int i = 0; i < stride * m; i++) { h_val[i] = std::numeric_limits<double>::max(); h_idx[i] = std::numeric_limits<unsigned int>::max(); } for (unsigned int i = 0; i < m; i++) { for (unsigned int j = 0; j < n_size; j += block_size) { h_val[j + i * stride] = 0.10; h_val[j + 1 + i * stride] = 0.15; h_idx[j + i * stride] = j; h_idx[j + 1 + i * stride] = j + 1; } } thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); hipLaunchKernelGGL(( cudautils::gather_values_on_blocks), dim3(m), dim3(4), 0, 0, stride, n_size, block_size, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); for (unsigned int i = 0; i < m; i++) { unsigned int count = 0; for (unsigned int j = 0; j < n_size; j += block_size) { ASSERT_EQ(0.10, h_ret_val[ 2 * count + i * stride]); ASSERT_EQ(0.15, h_ret_val[1 + 2 * count + i * stride]); ASSERT_EQ(j, h_ret_idx[ 2 * count + i * stride]); ASSERT_EQ(j + 1, h_ret_idx[1 + 2 * count + i * stride]); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, GatherValuesOnBlocks2Test) { unsigned int m = 5; unsigned int stride = 12; unsigned int n_size = 12; unsigned int block_size = 4; thrust::host_vector<double> h_val(stride * m); thrust::host_vector<unsigned int> h_idx(stride * m); for (unsigned int i = 0; i < stride * m; i++) { h_val[i] = std::numeric_limits<double>::max(); h_idx[i] = std::numeric_limits<unsigned int>::max(); } for (unsigned int i = 0; i < m; i++) { for (unsigned int j = 0; j < n_size; j += block_size) { h_val[j + i * stride] = 0.10; h_val[j + 1 + i * stride] = 0.15; h_idx[j + i * stride] = j; h_idx[j + 1 + i * stride] = j + 1; } } thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); hipLaunchKernelGGL(( cudautils::gather_values_on_blocks), dim3(m), dim3(4), 0, 0, stride, n_size, block_size, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); for (unsigned int i = 0; i < m; i++) { unsigned int count = 0; for (unsigned int j = 0; j < n_size; j += block_size) { ASSERT_EQ(0.10, h_ret_val[ 2 * count + i * stride]); ASSERT_EQ(0.15, h_ret_val[1 + 2 * count + i * stride]); ASSERT_EQ(j, h_ret_idx[ 2 * count + i * stride]); ASSERT_EQ(j + 1, h_ret_idx[1 + 2 * count + i * stride]); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, GatherValuesOnBlocks3Test) { unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int m = 5; unsigned int stride = 200; unsigned int n_size = 120; unsigned int block_size = 40; unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 blocks_gather(num_m_blocks_y, num_m_blocks_z, 1); thrust::host_vector<double> h_val(stride * m); thrust::host_vector<unsigned int> h_idx(stride * m); for (unsigned int i = 0; i < stride * m; i++) { h_val[i] = std::numeric_limits<double>::max(); h_idx[i] = std::numeric_limits<unsigned int>::max(); } for (unsigned int i = 0; i < m; i++) { for (unsigned int j = 0; j < n_size; j += block_size) { h_val[j + i * stride] = 0.10; h_val[j + 1 + i * stride] = 0.15; h_idx[j + i * stride] = j; h_idx[j + 1 + i * stride] = j + 1; } } thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); hipLaunchKernelGGL(( cudautils::gather_values_on_blocks), dim3(blocks_gather), dim3(n_block_size), 0, 0, stride, n_size, block_size, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); for (unsigned int i = 0; i < m; i++) { unsigned int count = 0; for (unsigned int j = 0; j < n_size; j += block_size) { ASSERT_EQ(0.10, h_ret_val[ 2 * count + i * stride]); ASSERT_EQ(0.15, h_ret_val[1 + 2 * count + i * stride]); ASSERT_EQ(j, h_ret_idx[ 2 * count + i * stride]); ASSERT_EQ(j + 1, h_ret_idx[1 + 2 * count + i * stride]); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, GatherValuesOnBlocks4Test) { unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int m = 5; unsigned int stride = 300; unsigned int n_size = 240; unsigned int block_size = 40; unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 blocks_gather(num_m_blocks_y, num_m_blocks_z, 1); thrust::host_vector<double> h_val(stride * m); thrust::host_vector<unsigned int> h_idx(stride * m); for (unsigned int i = 0; i < stride * m; i++) { h_val[i] = std::numeric_limits<double>::max(); h_idx[i] = std::numeric_limits<unsigned int>::max(); } for (unsigned int i = 0; i < m; i++) { for (unsigned int j = 0; j < n_size; j += block_size) { h_val[j + i * stride] = 0.10; h_val[j + 1 + i * stride] = 0.15; h_idx[j + i * stride] = j; h_idx[j + 1 + i * stride] = j + 1; } } thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); hipLaunchKernelGGL(( cudautils::gather_values_on_blocks), dim3(blocks_gather), dim3(n_block_size), 0, 0, stride, n_size, block_size, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); for (unsigned int i = 0; i < m; i++) { unsigned int count = 0; for (unsigned int j = 0; j < n_size; j += block_size) { ASSERT_EQ(0.10, h_ret_val[ 2 * count + i * stride]); ASSERT_EQ(0.15, h_ret_val[1 + 2 * count + i * stride]); ASSERT_EQ(j, h_ret_idx[ 2 * count + i * stride]); ASSERT_EQ(j + 1, h_ret_idx[1 + 2 * count + i * stride]); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, GatherValuesOnBlocks5Test) { unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int m = 500; unsigned int stride = 300; unsigned int n_size = 240; unsigned int block_size = 40; unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 blocks_gather(num_m_blocks_y, num_m_blocks_z, 1); thrust::host_vector<double> h_val(stride * m); thrust::host_vector<unsigned int> h_idx(stride * m); for (unsigned int i = 0; i < stride * m; i++) { h_val[i] = std::numeric_limits<double>::max(); h_idx[i] = std::numeric_limits<unsigned int>::max(); } for (unsigned int i = 0; i < m; i++) { for (unsigned int j = 0; j < n_size; j += block_size) { h_val[j + i * stride] = 0.10; h_val[j + 1 + i * stride] = 0.15; h_idx[j + i * stride] = j; h_idx[j + 1 + i * stride] = j + 1; } } thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); hipLaunchKernelGGL(( cudautils::gather_values_on_blocks), dim3(blocks_gather), dim3(n_block_size), 0, 0, stride, n_size, block_size, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); for (unsigned int i = 0; i < m; i++) { unsigned int count = 0; for (unsigned int j = 0; j < n_size; j += block_size) { ASSERT_EQ(0.10, h_ret_val[ 2 * count + i * stride]); ASSERT_EQ(0.15, h_ret_val[1 + 2 * count + i * stride]); ASSERT_EQ(j, h_ret_idx[ 2 * count + i * stride]); ASSERT_EQ(j + 1, h_ret_idx[1 + 2 * count + i * stride]); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, SwapSort1Test) { unsigned int stride = 10; unsigned int m = 5; unsigned int total_size = stride * m; thrust::host_vector<double> h_val(total_size); thrust::host_vector<unsigned int> h_idx(total_size); for (unsigned int i = 0; i < total_size; i++) { h_val[i] = std::numeric_limits<double>::max(); h_idx[i] = std::numeric_limits<unsigned int>::max(); } for (unsigned int i = 0; i < m; i++) { h_val[ i * stride] = 0.15; h_val[1 + i * stride] = 0.10; h_idx[ i * stride] = i; h_idx[1 + i * stride] = i + 1; } thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); unsigned int num_blocks = cudautils::get_num_blocks(m, defaults::num_threads_in_swap_sort_func); hipLaunchKernelGGL(( cudautils::swap_sort), dim3(num_blocks), dim3(defaults::num_threads_in_swap_sort_func), 0, 0, stride, total_size, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); for (unsigned int i = 0; i < m; i++) { ASSERT_EQ(0.10, h_ret_val[ i * stride]); ASSERT_EQ(0.15, h_ret_val[1 + i * stride]); ASSERT_EQ(i + 1, h_ret_idx[ i * stride]); ASSERT_EQ(i, h_ret_idx[1 + i * stride]); } } TEST_F(NearestNeighborSearchCudaKernelTest, SwapSort2Test) { unsigned int stride = 10; unsigned int m = 201; unsigned int total_size = stride * m; thrust::host_vector<double> h_val(total_size); thrust::host_vector<unsigned int> h_idx(total_size); for (unsigned int i = 0; i < total_size; i++) { h_val[i] = std::numeric_limits<double>::max(); h_idx[i] = std::numeric_limits<unsigned int>::max(); } for (unsigned int i = 0; i < m; i++) { h_val[ i * stride] = 0.15; h_val[1 + i * stride] = 0.10; h_idx[ i * stride] = i; h_idx[1 + i * stride] = i + 1; } thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); unsigned int num_blocks = cudautils::get_num_blocks(m, defaults::num_threads_in_swap_sort_func); hipLaunchKernelGGL(( cudautils::swap_sort), dim3(num_blocks), dim3(defaults::num_threads_in_swap_sort_func), 0, 0, stride, total_size, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); for (unsigned int i = 0; i < m; i++) { ASSERT_EQ(0.10, h_ret_val[ i * stride]); ASSERT_EQ(0.15, h_ret_val[1 + i * stride]); ASSERT_EQ(i + 1, h_ret_idx[ i * stride]); ASSERT_EQ(i, h_ret_idx[1 + i * stride]); } } TEST_F(NearestNeighborSearchCudaKernelTest, SwapSort3Test) { unsigned int stride = 1000; unsigned int m = 2000; unsigned int total_size = stride * m; thrust::host_vector<double> h_val(total_size); thrust::host_vector<unsigned int> h_idx(total_size); for (unsigned int i = 0; i < total_size; i++) { h_val[i] = std::numeric_limits<double>::max(); h_idx[i] = std::numeric_limits<unsigned int>::max(); } for (unsigned int i = 0; i < m; i++) { h_val[ i * stride] = 0.15; h_val[1 + i * stride] = 0.10; h_idx[ i * stride] = i; h_idx[1 + i * stride] = i + 1; } thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); unsigned int num_blocks = cudautils::get_num_blocks(m, defaults::num_threads_in_swap_sort_func); hipLaunchKernelGGL(( cudautils::swap_sort), dim3(num_blocks), dim3(defaults::num_threads_in_swap_sort_func), 0, 0, stride, total_size, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); for (unsigned int i = 0; i < m; i++) { ASSERT_EQ(0.10, h_ret_val[ i * stride]); ASSERT_EQ(0.15, h_ret_val[1 + i * stride]); ASSERT_EQ(i + 1, h_ret_idx[ i * stride]); ASSERT_EQ(i, h_ret_idx[1 + i * stride]); } } }
cc5b06d34f980cf665b1f5026f98238e706f4ffa.cu
#include "gtest/gtest.h" #include "nnsearch2.h" #include "defaults.h" #include "spdlog/spdlog.h" #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <iostream> #include <vector> #include <string> #include <limits> #include <random> namespace { class NearestNeighborSearchCudaKernelTest : public ::testing::Test { protected: std::shared_ptr<spdlog::logger> logger_; NearestNeighborSearchCudaKernelTest() { logger_ = spdlog::get("console"); if (! logger_) { logger_ = spdlog::stdout_logger_mt("console"); } } virtual ~NearestNeighborSearchCudaKernelTest() { } template <typename T> void print_matrix_debug( const size_t rows, const size_t row_start, const size_t col_start, const size_t drows, const size_t dcols, T& val) { std::cout << " "; for (size_t j = 0; j < dcols; j++) { std::cout << std::setw(6) << col_start + j << ","; } std::cout << std::endl; for (size_t i = 0; i < drows; i++) { std::cout << std::setw(6) << i << "| "; for (size_t j = 0; j < dcols; j++) { size_t idx = row_start + i + (col_start + j) * rows; std::cout << std::setw(6) << val[idx] << ","; } std::cout << std::endl; } } }; TEST_F(NearestNeighborSearchCudaKernelTest, SumSquared1Test) { unsigned int m = 6; unsigned int k = 3; thrust::device_vector<double> x(m * k); thrust::device_vector<double> x2(m); thrust::sequence(x.begin(), x.end()); cudautils::sum_squared<<<1, 16>>>(m, k, thrust::raw_pointer_cast(x.data()), thrust::raw_pointer_cast(x2.data())); thrust::host_vector<double> h_x(x); thrust::host_vector<double> h_x2(x2); for (unsigned int i = 0; i < m; i++) { double sum = 0.0; for (unsigned int j = 0; j < k; j++) { sum += h_x[i + j * m] * h_x[i + j * m]; } ASSERT_EQ(sum, h_x2[i]); } } TEST_F(NearestNeighborSearchCudaKernelTest, SumSquared2Test) { unsigned int m = 6; unsigned int k = 30; thrust::device_vector<double> x(m * k); thrust::device_vector<double> x2(m); thrust::sequence(x.begin(), x.end()); cudautils::sum_squared<<<2, 16>>>(m, k, thrust::raw_pointer_cast(x.data()), thrust::raw_pointer_cast(x2.data())); thrust::host_vector<double> h_x(x); thrust::host_vector<double> h_x2(x2); for (unsigned int i = 0; i < m; i++) { double sum = 0.0; for (unsigned int j = 0; j < k; j++) { sum += h_x[i + j * m] * h_x[i + j * m]; } ASSERT_EQ(sum, h_x2[i]); } } TEST_F(NearestNeighborSearchCudaKernelTest, CalcSquaredNorm1Test) { unsigned int m = 4; unsigned int n = 6; unsigned int k = 3; thrust::device_vector<double> x(m * k); thrust::device_vector<double> x2(m); thrust::device_vector<double> y(n * k); thrust::device_vector<double> y2(n); thrust::device_vector<double> r(m * n); thrust::sequence(x.begin(), x.end()); thrust::sequence(y.begin(), y.end(), 1); unsigned int m_blocks = cudautils::get_num_blocks(m, 16); unsigned int n_blocks = cudautils::get_num_blocks(n, 16); cudautils::sum_squared<<<m_blocks, 16>>>(m, k, thrust::raw_pointer_cast(x.data()), thrust::raw_pointer_cast(x2.data())); cudautils::sum_squared<<<n_blocks, 16>>>(n, k, thrust::raw_pointer_cast(y.data()), thrust::raw_pointer_cast(y2.data())); m_blocks = cudautils::get_num_blocks(m, 32); n_blocks = cudautils::get_num_blocks(n, 32); dim3 dim_blocks(n_blocks, m_blocks, 1); dim3 dim_threads(32, 32, 1); cudautils::calc_squared_norm<<<dim_blocks, dim_threads>>>( m, n, k, thrust::raw_pointer_cast(x.data()), thrust::raw_pointer_cast(y.data()), thrust::raw_pointer_cast(x2.data()), thrust::raw_pointer_cast(y2.data()), thrust::raw_pointer_cast(r.data())); thrust::host_vector<double> h_x(x); thrust::host_vector<double> h_y(y); thrust::host_vector<double> h_r(r); for (unsigned int j = 0; j < n; j++) { for (unsigned int i = 0; i < m; i++) { double sum = 0.0; for (unsigned int w = 0; w < k; w++) { double diff = h_x[i + w * m] - h_y[j + w * n]; sum += diff * diff; } ASSERT_EQ(sum, h_r[j + i * n]); } } } TEST_F(NearestNeighborSearchCudaKernelTest, CalcSquaredNorm2Test) { unsigned int m = 40; unsigned int n = 6; unsigned int k = 3; thrust::device_vector<double> x(m * k); thrust::device_vector<double> x2(m); thrust::device_vector<double> y(n * k); thrust::device_vector<double> y2(n); thrust::device_vector<double> r(m * n); thrust::sequence(x.begin(), x.end()); thrust::sequence(y.begin(), y.end(), 1); unsigned int m_blocks = cudautils::get_num_blocks(m, 16); unsigned int n_blocks = cudautils::get_num_blocks(n, 16); cudautils::sum_squared<<<m_blocks, 16>>>(m, k, thrust::raw_pointer_cast(x.data()), thrust::raw_pointer_cast(x2.data())); cudautils::sum_squared<<<n_blocks, 16>>>(n, k, thrust::raw_pointer_cast(y.data()), thrust::raw_pointer_cast(y2.data())); m_blocks = cudautils::get_num_blocks(m, 32); n_blocks = cudautils::get_num_blocks(n, 32); dim3 dim_blocks(n_blocks, m_blocks, 1); dim3 dim_threads(32, 32, 1); cudautils::calc_squared_norm<<<dim_blocks, dim_threads>>>( m, n, k, thrust::raw_pointer_cast(x.data()), thrust::raw_pointer_cast(y.data()), thrust::raw_pointer_cast(x2.data()), thrust::raw_pointer_cast(y2.data()), thrust::raw_pointer_cast(r.data())); thrust::host_vector<double> h_x(x); thrust::host_vector<double> h_y(y); thrust::host_vector<double> h_r(r); for (unsigned int j = 0; j < n; j++) { for (unsigned int i = 0; i < m; i++) { double sum = 0.0; for (unsigned int w = 0; w < k; w++) { double diff = h_x[i + w * m] - h_y[j + w * n]; sum += diff * diff; } ASSERT_EQ(sum, h_r[j + i * n]); } } } TEST_F(NearestNeighborSearchCudaKernelTest, CalcSquaredNorm3Test) { unsigned int m = 40; unsigned int n = 60; unsigned int k = 3; thrust::device_vector<double> x(m * k); thrust::device_vector<double> x2(m); thrust::device_vector<double> y(n * k); thrust::device_vector<double> y2(n); thrust::device_vector<double> r(m * n); thrust::sequence(x.begin(), x.end()); thrust::sequence(y.begin(), y.end(), 1); unsigned int m_blocks = cudautils::get_num_blocks(m, 16); unsigned int n_blocks = cudautils::get_num_blocks(n, 16); cudautils::sum_squared<<<m_blocks, 16>>>(m, k, thrust::raw_pointer_cast(x.data()), thrust::raw_pointer_cast(x2.data())); cudautils::sum_squared<<<n_blocks, 16>>>(n, k, thrust::raw_pointer_cast(y.data()), thrust::raw_pointer_cast(y2.data())); m_blocks = cudautils::get_num_blocks(m, 32); n_blocks = cudautils::get_num_blocks(n, 32); dim3 dim_blocks(n_blocks, m_blocks, 1); dim3 dim_threads(32, 32, 1); cudautils::calc_squared_norm<<<dim_blocks, dim_threads>>>( m, n, k, thrust::raw_pointer_cast(x.data()), thrust::raw_pointer_cast(y.data()), thrust::raw_pointer_cast(x2.data()), thrust::raw_pointer_cast(y2.data()), thrust::raw_pointer_cast(r.data())); thrust::host_vector<double> h_x(x); thrust::host_vector<double> h_y(y); thrust::host_vector<double> h_r(r); for (unsigned int j = 0; j < n; j++) { for (unsigned int i = 0; i < m; i++) { double sum = 0.0; for (unsigned int w = 0; w < k; w++) { double diff = h_x[i + w * m] - h_y[j + w * n]; sum += diff * diff; } ASSERT_EQ(sum, h_r[j + i * n]); } } } TEST_F(NearestNeighborSearchCudaKernelTest, GetTwoMins1Test) { // check if it is correct of choosing two minimum valus from a set of four values unsigned int m = 12; unsigned int n = 4; thrust::host_vector<double> h_r(m * n); std::default_random_engine generator; std::uniform_real_distribution<double> distr(0.2, 1.0); for (unsigned int i = 0; i < m * n; i++) { h_r[i] = distr(generator); } // [a, b, c, d] --> [a, b] < [c, d] // min: a, b; a < b -> no movement h_r[0+0*n] = 0.10;//a h_r[1+0*n] = 0.15;//b // min: a, b; a > b -> no movement h_r[0+1*n] = 0.15;//a h_r[1+1*n] = 0.10;//b // min: a, c; a < c h_r[0+2*n] = 0.10;//a h_r[2+2*n] = 0.15;//c // min: a, c; a > c h_r[0+3*n] = 0.15;//a h_r[2+3*n] = 0.10;//c // min: a, d; a < d h_r[0+4*n] = 0.10;//a h_r[3+4*n] = 0.15;//d // min: a, d; a > d h_r[0+5*n] = 0.15;//a h_r[3+5*n] = 0.10;//d // min: b, c; b < c h_r[1+6*n] = 0.10;//b h_r[2+6*n] = 0.15;//c // min: b, c; b > c h_r[1+7*n] = 0.15;//b h_r[2+7*n] = 0.10;//c // min: b, d; b < d h_r[1+8*n] = 0.10;//b h_r[3+8*n] = 0.15;//d // min: b, d; b > d h_r[1+9*n] = 0.15;//b h_r[3+9*n] = 0.10;//d // min: c, d; c < d h_r[2+10*n] = 0.10;//c h_r[3+10*n] = 0.15;//d // min: c, d; c > d h_r[2+11*n] = 0.15;//c h_r[3+11*n] = 0.10;//d unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int n_blocks = cudautils::get_num_blocks(n, n_block_size); unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 dim_blocks(n_blocks, num_m_blocks_y, num_m_blocks_z); thrust::device_vector<double> r(h_r); thrust::device_vector<double> val(2 * m); thrust::device_vector<unsigned int> idx(2 * m); cudautils::get_two_mins<<<dim_blocks, defaults::num_threads_in_twotops_func>>>( n, 0, thrust::raw_pointer_cast(r.data()), thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_val(val); thrust::host_vector<double> h_idx(idx); ASSERT_EQ(0.10, h_val[0+0*2]); ASSERT_EQ(0.15, h_val[1+0*2]); ASSERT_EQ(0.15, h_val[0+1*2]); ASSERT_EQ(0.10, h_val[1+1*2]); ASSERT_EQ(0.10, h_val[0+2*2]); ASSERT_EQ(0.15, h_val[1+2*2]); ASSERT_EQ(0.10, h_val[0+3*2]); ASSERT_EQ(0.15, h_val[1+3*2]); ASSERT_EQ(0.10, h_val[0+4*2]); ASSERT_EQ(0.15, h_val[1+4*2]); ASSERT_EQ(0.15, h_val[0+5*2]); ASSERT_EQ(0.10, h_val[1+5*2]); ASSERT_EQ(0.15, h_val[0+6*2]); ASSERT_EQ(0.10, h_val[1+6*2]); ASSERT_EQ(0.10, h_val[0+7*2]); ASSERT_EQ(0.15, h_val[1+7*2]); ASSERT_EQ(0.15, h_val[0+8*2]); ASSERT_EQ(0.10, h_val[1+8*2]); ASSERT_EQ(0.15, h_val[0+9*2]); ASSERT_EQ(0.10, h_val[1+9*2]); ASSERT_EQ(0.10, h_val[0+10*2]); ASSERT_EQ(0.15, h_val[1+10*2]); ASSERT_EQ(0.15, h_val[0+11*2]); ASSERT_EQ(0.10, h_val[1+11*2]); ASSERT_EQ(0, h_idx[0+0*2]); ASSERT_EQ(1, h_idx[1+0*2]); ASSERT_EQ(0, h_idx[0+1*2]); ASSERT_EQ(1, h_idx[1+1*2]); ASSERT_EQ(0, h_idx[0+2*2]); ASSERT_EQ(2, h_idx[1+2*2]); ASSERT_EQ(2, h_idx[0+3*2]); ASSERT_EQ(0, h_idx[1+3*2]); ASSERT_EQ(0, h_idx[0+4*2]); ASSERT_EQ(3, h_idx[1+4*2]); ASSERT_EQ(0, h_idx[0+5*2]); ASSERT_EQ(3, h_idx[1+5*2]); ASSERT_EQ(2, h_idx[0+6*2]); ASSERT_EQ(1, h_idx[1+6*2]); ASSERT_EQ(2, h_idx[0+7*2]); ASSERT_EQ(1, h_idx[1+7*2]); ASSERT_EQ(3, h_idx[0+8*2]); ASSERT_EQ(1, h_idx[1+8*2]); ASSERT_EQ(1, h_idx[0+9*2]); ASSERT_EQ(3, h_idx[1+9*2]); ASSERT_EQ(2, h_idx[0+10*2]); ASSERT_EQ(3, h_idx[1+10*2]); ASSERT_EQ(2, h_idx[0+11*2]); ASSERT_EQ(3, h_idx[1+11*2]); } TEST_F(NearestNeighborSearchCudaKernelTest, GetTwoMins2Test) { // check if it is correct using n; 1*32 < n < 2*32 in a case of within one set of threads unsigned int n = 40; unsigned int m = n * (n - 1) / 2; thrust::host_vector<double> h_r(m * n); std::default_random_engine generator; std::uniform_real_distribution<double> distr(0.2, 1.0); for (unsigned int i = 0; i < m * n; i++) { h_r[i] = distr(generator); } unsigned int count = 0; for (unsigned int i = 0; i < n - 1; i++) { for (unsigned int j = i + 1; j < n; j++) { h_r[i+count*n] = 0.10; h_r[j+count*n] = 0.15; count++; } } unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int n_blocks = cudautils::get_num_blocks(n, n_block_size); unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 dim_blocks(n_blocks, num_m_blocks_y, num_m_blocks_z); thrust::device_vector<double> r(h_r); thrust::device_vector<double> val(2 * m); thrust::device_vector<unsigned int> idx(2 * m); cudautils::get_two_mins<<<dim_blocks, defaults::num_threads_in_twotops_func>>>( n, 0, thrust::raw_pointer_cast(r.data()), thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_val(val); thrust::host_vector<double> h_idx(idx); count = 0; for (unsigned int i = 0; i < n - 1; i++) { for (unsigned int j = i + 1; j < n; j++) { double val1 = h_val[0+count*2]; double val2 = h_val[1+count*2]; unsigned int idx1 = h_idx[0+count*2]; unsigned int idx2 = h_idx[1+count*2]; ASSERT_TRUE( (val1 == 0.10 && val2 == 0.15 && idx1 == i && idx2 == j) || (val2 == 0.10 && val1 == 0.15 && idx2 == i && idx1 == j)); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, GetTwoMins3Test) { // check if it is correct using n; 3*32 < n < 4*32 in a case of multi sets of threads but within one block unsigned int n = 120; unsigned int m = n * (n - 1) / 2; thrust::host_vector<double> h_r(m * n); std::default_random_engine generator; std::uniform_real_distribution<double> distr(0.2, 1.0); for (unsigned int i = 0; i < m * n; i++) { h_r[i] = distr(generator); } unsigned int count = 0; for (unsigned int i = 0; i < n - 1; i++) { for (unsigned int j = i + 1; j < n; j++) { h_r[i+count*n] = 0.10; h_r[j+count*n] = 0.15; count++; } } unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int n_blocks = cudautils::get_num_blocks(n, n_block_size); unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 dim_blocks(n_blocks, num_m_blocks_y, num_m_blocks_z); thrust::device_vector<double> r(h_r); thrust::device_vector<double> val(2 * m); thrust::device_vector<unsigned int> idx(2 * m); cudautils::get_two_mins<<<dim_blocks, defaults::num_threads_in_twotops_func>>>( n, 0, thrust::raw_pointer_cast(r.data()), thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_val(val); thrust::host_vector<double> h_idx(idx); count = 0; for (unsigned int i = 0; i < n - 1; i++) { for (unsigned int j = i + 1; j < n; j++) { double val1 = h_val[0+count*2]; double val2 = h_val[1+count*2]; unsigned int idx1 = h_idx[0+count*2]; unsigned int idx2 = h_idx[1+count*2]; ASSERT_TRUE( (val1 == 0.10 && val2 == 0.15 && idx1 == i && idx2 == j) || (val2 == 0.10 && val1 == 0.15 && idx2 == i && idx1 == j)); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, GetTwoMins4Test) { // check if it is correct using n > 2*32 in a case of multi blocks unsigned int n = 2000; unsigned int m = 2000; thrust::host_vector<double> h_r(m * n); std::default_random_engine generator; std::uniform_real_distribution<double> distr(0.2, 1.0); for (unsigned int i = 0; i < m * n; i++) { h_r[i] = distr(generator); } unsigned int count = 0; for (unsigned int i = 0; i < n - 1; i+=10) { for (unsigned int j = i + 1; j < n; j+=100) { h_r[i+count*n] = 0.10; h_r[j+count*n] = 0.15; count++; if (count == m) break; } if (count == m) break; } unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int n_blocks = cudautils::get_num_blocks(n, n_block_size); unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 dim_blocks(n_blocks, num_m_blocks_y, num_m_blocks_z); thrust::device_vector<double> r(h_r); thrust::device_vector<double> val(2 * n_blocks * m); thrust::device_vector<unsigned int> idx(2 * n_blocks * m); cudautils::get_two_mins<<<dim_blocks, defaults::num_threads_in_twotops_func>>>( n, 0, thrust::raw_pointer_cast(r.data()), thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_val(val); thrust::host_vector<double> h_idx(idx); count = 0; for (unsigned int i = 0; i < n - 1; i+=10) { for (unsigned int j = i + 1; j < n; j+=100) { double val1 = std::numeric_limits<double>::max(); double val2 = std::numeric_limits<double>::max(); unsigned int idx1, idx2; for (unsigned int k = 0; k < n_blocks; k++) { double val_tmp1 = h_val[0+k*2+count*2*n_blocks]; double val_tmp2 = h_val[1+k*2+count*2*n_blocks]; unsigned int idx_tmp1 = h_idx[0+k*2+count*2*n_blocks]; unsigned int idx_tmp2 = h_idx[1+k*2+count*2*n_blocks]; if (val_tmp1 < val1) { val2 = val1; idx2 = idx1; val1 = val_tmp1; idx1 = idx_tmp1; } else if (val_tmp1 < val2) { val2 = val_tmp1; idx2 = idx_tmp1; } if (val_tmp2 < val1) { val2 = val1; idx2 = idx1; val1 = val_tmp2; idx1 = idx_tmp2; } else if (val_tmp2 < val2) { val2 = val_tmp2; idx2 = idx_tmp2; } } ASSERT_TRUE( (val1 == 0.10 && val2 == 0.15 && idx1 == i && idx2 == j) || (val2 == 0.10 && val1 == 0.15 && idx2 == i && idx1 == j)); count++; if (count == m) break; } if (count == m) break; } } TEST_F(NearestNeighborSearchCudaKernelTest, GetTwoMinsWithIndex1Test) { unsigned int m = 12; thrust::host_vector<double> h_val(4 * m); thrust::host_vector<unsigned int> h_idx(4 * m); std::default_random_engine generator; std::uniform_real_distribution<double> distr(0.2, 1.0); for (unsigned int i = 0; i < 4 * m; i++) { h_val[i] = distr(generator); h_idx[i] = 100; } // [a, b, c, d] --> [a, b] < [c, d] // min: a, b; a < b -> no movement h_val[0+0*4] = 0.10;//a h_val[1+0*4] = 0.15;//b h_idx[0+0*4] = 0; h_idx[1+0*4] = 1; // min: a, b; a > b -> no movement h_val[0+1*4] = 0.15;//a h_val[1+1*4] = 0.10;//b h_idx[0+1*4] = 0; h_idx[1+1*4] = 1; // min: a, c; a < c h_val[0+2*4] = 0.10;//a h_val[2+2*4] = 0.15;//c h_idx[0+2*4] = 0; h_idx[2+2*4] = 2; // min: a, c; a > c h_val[0+3*4] = 0.15;//a h_val[2+3*4] = 0.10;//c h_idx[0+3*4] = 0; h_idx[2+3*4] = 2; // min: a, d; a < d h_val[0+4*4] = 0.10;//a h_val[3+4*4] = 0.15;//d h_idx[0+4*4] = 0; h_idx[3+4*4] = 3; // min: a, d; a > d h_val[0+5*4] = 0.15;//a h_val[3+5*4] = 0.10;//d h_idx[0+5*4] = 0; h_idx[3+5*4] = 3; // min: b, c; b < c h_val[1+6*4] = 0.10;//b h_val[2+6*4] = 0.15;//c h_idx[1+6*4] = 1; h_idx[2+6*4] = 2; // min: b, c; b > c h_val[1+7*4] = 0.15;//b h_val[2+7*4] = 0.10;//c h_idx[1+7*4] = 1; h_idx[2+7*4] = 2; // min: b, d; b < d h_val[1+8*4] = 0.10;//b h_val[3+8*4] = 0.15;//d h_idx[1+8*4] = 1; h_idx[3+8*4] = 3; // min: b, d; b > d h_val[1+9*4] = 0.15;//b h_val[3+9*4] = 0.10;//d h_idx[1+9*4] = 1; h_idx[3+9*4] = 3; // min: c, d; c < d h_val[2+10*4] = 0.10;//c h_val[3+10*4] = 0.15;//d h_idx[2+10*4] = 2; h_idx[3+10*4] = 3; // min: c, d; c > d h_val[2+11*4] = 0.15;//c h_val[3+11*4] = 0.10;//d h_idx[2+11*4] = 2; h_idx[3+11*4] = 3; unsigned int n_blocks = 4; unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 dim_blocks(n_blocks, num_m_blocks_y, num_m_blocks_z); thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); cudautils::get_two_mins_with_index<<<dim_blocks, defaults::num_threads_in_twotops_func>>>( 4, n_blocks, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); ASSERT_EQ(0.10, h_ret_val[0+0*4]); ASSERT_EQ(0.15, h_ret_val[1+0*4]); ASSERT_EQ(0.15, h_ret_val[0+1*4]); ASSERT_EQ(0.10, h_ret_val[1+1*4]); ASSERT_EQ(0.10, h_ret_val[0+2*4]); ASSERT_EQ(0.15, h_ret_val[1+2*4]); ASSERT_EQ(0.10, h_ret_val[0+3*4]); ASSERT_EQ(0.15, h_ret_val[1+3*4]); ASSERT_EQ(0.10, h_ret_val[0+4*4]); ASSERT_EQ(0.15, h_ret_val[1+4*4]); ASSERT_EQ(0.15, h_ret_val[0+5*4]); ASSERT_EQ(0.10, h_ret_val[1+5*4]); ASSERT_EQ(0.15, h_ret_val[0+6*4]); ASSERT_EQ(0.10, h_ret_val[1+6*4]); ASSERT_EQ(0.10, h_ret_val[0+7*4]); ASSERT_EQ(0.15, h_ret_val[1+7*4]); ASSERT_EQ(0.15, h_ret_val[0+8*4]); ASSERT_EQ(0.10, h_ret_val[1+8*4]); ASSERT_EQ(0.15, h_ret_val[0+9*4]); ASSERT_EQ(0.10, h_ret_val[1+9*4]); ASSERT_EQ(0.10, h_ret_val[0+10*4]); ASSERT_EQ(0.15, h_ret_val[1+10*4]); ASSERT_EQ(0.15, h_ret_val[0+11*4]); ASSERT_EQ(0.10, h_ret_val[1+11*4]); ASSERT_EQ(0, h_ret_idx[0+0*4]); ASSERT_EQ(1, h_ret_idx[1+0*4]); ASSERT_EQ(0, h_ret_idx[0+1*4]); ASSERT_EQ(1, h_ret_idx[1+1*4]); ASSERT_EQ(0, h_ret_idx[0+2*4]); ASSERT_EQ(2, h_ret_idx[1+2*4]); ASSERT_EQ(2, h_ret_idx[0+3*4]); ASSERT_EQ(0, h_ret_idx[1+3*4]); ASSERT_EQ(0, h_ret_idx[0+4*4]); ASSERT_EQ(3, h_ret_idx[1+4*4]); ASSERT_EQ(0, h_ret_idx[0+5*4]); ASSERT_EQ(3, h_ret_idx[1+5*4]); ASSERT_EQ(2, h_ret_idx[0+6*4]); ASSERT_EQ(1, h_ret_idx[1+6*4]); ASSERT_EQ(2, h_ret_idx[0+7*4]); ASSERT_EQ(1, h_ret_idx[1+7*4]); ASSERT_EQ(3, h_ret_idx[0+8*4]); ASSERT_EQ(1, h_ret_idx[1+8*4]); ASSERT_EQ(1, h_ret_idx[0+9*4]); ASSERT_EQ(3, h_ret_idx[1+9*4]); ASSERT_EQ(2, h_ret_idx[0+10*4]); ASSERT_EQ(3, h_ret_idx[1+10*4]); ASSERT_EQ(2, h_ret_idx[0+11*4]); ASSERT_EQ(3, h_ret_idx[1+11*4]); } TEST_F(NearestNeighborSearchCudaKernelTest, GetTwoMinsWithIndex2Test) { unsigned int n = 40; unsigned int m = n * (n - 1) / 2; thrust::host_vector<double> h_val(n * m); thrust::host_vector<unsigned int> h_idx(n * m); std::default_random_engine generator; std::uniform_real_distribution<double> distr(0.2, 1.0); for (unsigned int i = 0; i < n * m; i++) { h_val[i] = distr(generator); h_idx[i] = 100; } unsigned int count = 0; for (unsigned int i = 0; i < n - 1; i++) { for (unsigned int j = i + 1; j < n; j++) { h_val[i+count*n] = 0.10; h_val[j+count*n] = 0.15; h_idx[i+count*n] = i; h_idx[j+count*n] = j; count++; } } unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int n_blocks = cudautils::get_num_blocks(n, n_block_size); unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 dim_blocks(n_blocks, num_m_blocks_y, num_m_blocks_z); thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); cudautils::get_two_mins_with_index<<<dim_blocks, defaults::num_threads_in_twotops_func>>>( n, n, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); count = 0; for (unsigned int i = 0; i < n - 1; i++) { for (unsigned int j = i + 1; j < n; j++) { double val1 = h_ret_val[0+count*n]; double val2 = h_ret_val[1+count*n]; unsigned int idx1 = h_ret_idx[0+count*n]; unsigned int idx2 = h_ret_idx[1+count*n]; ASSERT_TRUE( (val1 == 0.10 && val2 == 0.15 && idx1 == i && idx2 == j) || (val2 == 0.10 && val1 == 0.15 && idx2 == i && idx1 == j)); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, GetTwoMinsWithIndex3Test) { unsigned int n = 120; unsigned int m = n * (n - 1) / 2; thrust::host_vector<double> h_val(n * m); thrust::host_vector<unsigned int> h_idx(n * m); std::default_random_engine generator; std::uniform_real_distribution<double> distr(0.2, 1.0); for (unsigned int i = 0; i < n * m; i++) { h_val[i] = distr(generator); h_idx[i] = std::numeric_limits<unsigned int>::max(); } unsigned int count = 0; for (unsigned int i = 0; i < n - 1; i++) { for (unsigned int j = i + 1; j < n; j++) { h_val[i+count*n] = 0.10; h_val[j+count*n] = 0.15; h_idx[i+count*n] = i; h_idx[j+count*n] = j; count++; } } unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int n_blocks = cudautils::get_num_blocks(n, n_block_size); unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 dim_blocks(n_blocks, num_m_blocks_y, num_m_blocks_z); thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); cudautils::get_two_mins_with_index<<<dim_blocks, defaults::num_threads_in_twotops_func>>>( n, n, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); count = 0; for (unsigned int i = 0; i < n - 1; i++) { for (unsigned int j = i + 1; j < n; j++) { double val1 = h_ret_val[0+count*n]; double val2 = h_ret_val[1+count*n]; unsigned int idx1 = h_ret_idx[0+count*n]; unsigned int idx2 = h_ret_idx[1+count*n]; ASSERT_TRUE( (val1 == 0.10 && val2 == 0.15 && idx1 == i && idx2 == j) || (val2 == 0.10 && val1 == 0.15 && idx2 == i && idx1 == j)); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, GetTwoMinsWithIndex4Test) { unsigned int n = 2000; unsigned int m = 2000; thrust::host_vector<double> h_val(n * m); thrust::host_vector<unsigned int> h_idx(n * m); std::default_random_engine generator; std::uniform_real_distribution<double> distr(0.2, 1.0); for (unsigned int i = 0; i < n * m; i++) { h_val[i] = distr(generator); h_idx[i] = std::numeric_limits<unsigned int>::max(); } unsigned int count = 0; for (unsigned int i = 0; i < n - 1; i+=10) { for (unsigned int j = i + 1; j < n; j+=100) { h_val[i+count*n] = 0.10; h_val[j+count*n] = 0.15; h_idx[i+count*n] = i; h_idx[j+count*n] = j; count++; if (count == m) break; } if (count == m) break; } unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int n_blocks = cudautils::get_num_blocks(n, n_block_size); unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 dim_blocks(n_blocks, num_m_blocks_y, num_m_blocks_z); thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); cudautils::get_two_mins_with_index<<<dim_blocks, defaults::num_threads_in_twotops_func>>>( n, n, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); count = 0; for (unsigned int i = 0; i < n - 1; i+=10) { for (unsigned int j = i + 1; j < n; j+=100) { double val1 = std::numeric_limits<double>::max(); double val2 = std::numeric_limits<double>::max(); unsigned int idx1, idx2; for (unsigned int k = 0; k < n_blocks; k++) { double val_tmp1 = h_ret_val[0+k*n_block_size+count*n]; double val_tmp2 = h_ret_val[1+k*n_block_size+count*n]; unsigned int idx_tmp1 = h_ret_idx[0+k*n_block_size+count*n]; unsigned int idx_tmp2 = h_ret_idx[1+k*n_block_size+count*n]; if (val_tmp1 < val1) { val2 = val1; idx2 = idx1; val1 = val_tmp1; idx1 = idx_tmp1; } else if (val_tmp1 < val2) { val2 = val_tmp1; idx2 = idx_tmp1; } if (val_tmp2 < val1) { val2 = val1; idx2 = idx1; val1 = val_tmp2; idx1 = idx_tmp2; } else if (val_tmp2 < val2) { val2 = val_tmp2; idx2 = idx_tmp2; } } ASSERT_TRUE( (val1 == 0.10 && val2 == 0.15 && idx1 == i && idx2 == j) || (val2 == 0.10 && val1 == 0.15 && idx2 == i && idx1 == j)); count++; if (count == m) break; } if (count == m) break; } } TEST_F(NearestNeighborSearchCudaKernelTest, GatherValuesOnBlocks1Test) { unsigned int m = 5; unsigned int stride = 12; unsigned int n_size = 12; unsigned int block_size = 2; thrust::host_vector<double> h_val(stride * m); thrust::host_vector<unsigned int> h_idx(stride * m); for (unsigned int i = 0; i < stride * m; i++) { h_val[i] = std::numeric_limits<double>::max(); h_idx[i] = std::numeric_limits<unsigned int>::max(); } for (unsigned int i = 0; i < m; i++) { for (unsigned int j = 0; j < n_size; j += block_size) { h_val[j + i * stride] = 0.10; h_val[j + 1 + i * stride] = 0.15; h_idx[j + i * stride] = j; h_idx[j + 1 + i * stride] = j + 1; } } thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); cudautils::gather_values_on_blocks<<<m, 4>>>( stride, n_size, block_size, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); for (unsigned int i = 0; i < m; i++) { unsigned int count = 0; for (unsigned int j = 0; j < n_size; j += block_size) { ASSERT_EQ(0.10, h_ret_val[ 2 * count + i * stride]); ASSERT_EQ(0.15, h_ret_val[1 + 2 * count + i * stride]); ASSERT_EQ(j, h_ret_idx[ 2 * count + i * stride]); ASSERT_EQ(j + 1, h_ret_idx[1 + 2 * count + i * stride]); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, GatherValuesOnBlocks2Test) { unsigned int m = 5; unsigned int stride = 12; unsigned int n_size = 12; unsigned int block_size = 4; thrust::host_vector<double> h_val(stride * m); thrust::host_vector<unsigned int> h_idx(stride * m); for (unsigned int i = 0; i < stride * m; i++) { h_val[i] = std::numeric_limits<double>::max(); h_idx[i] = std::numeric_limits<unsigned int>::max(); } for (unsigned int i = 0; i < m; i++) { for (unsigned int j = 0; j < n_size; j += block_size) { h_val[j + i * stride] = 0.10; h_val[j + 1 + i * stride] = 0.15; h_idx[j + i * stride] = j; h_idx[j + 1 + i * stride] = j + 1; } } thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); cudautils::gather_values_on_blocks<<<m, 4>>>( stride, n_size, block_size, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); for (unsigned int i = 0; i < m; i++) { unsigned int count = 0; for (unsigned int j = 0; j < n_size; j += block_size) { ASSERT_EQ(0.10, h_ret_val[ 2 * count + i * stride]); ASSERT_EQ(0.15, h_ret_val[1 + 2 * count + i * stride]); ASSERT_EQ(j, h_ret_idx[ 2 * count + i * stride]); ASSERT_EQ(j + 1, h_ret_idx[1 + 2 * count + i * stride]); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, GatherValuesOnBlocks3Test) { unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int m = 5; unsigned int stride = 200; unsigned int n_size = 120; unsigned int block_size = 40; unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 blocks_gather(num_m_blocks_y, num_m_blocks_z, 1); thrust::host_vector<double> h_val(stride * m); thrust::host_vector<unsigned int> h_idx(stride * m); for (unsigned int i = 0; i < stride * m; i++) { h_val[i] = std::numeric_limits<double>::max(); h_idx[i] = std::numeric_limits<unsigned int>::max(); } for (unsigned int i = 0; i < m; i++) { for (unsigned int j = 0; j < n_size; j += block_size) { h_val[j + i * stride] = 0.10; h_val[j + 1 + i * stride] = 0.15; h_idx[j + i * stride] = j; h_idx[j + 1 + i * stride] = j + 1; } } thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); cudautils::gather_values_on_blocks<<<blocks_gather, n_block_size>>>( stride, n_size, block_size, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); for (unsigned int i = 0; i < m; i++) { unsigned int count = 0; for (unsigned int j = 0; j < n_size; j += block_size) { ASSERT_EQ(0.10, h_ret_val[ 2 * count + i * stride]); ASSERT_EQ(0.15, h_ret_val[1 + 2 * count + i * stride]); ASSERT_EQ(j, h_ret_idx[ 2 * count + i * stride]); ASSERT_EQ(j + 1, h_ret_idx[1 + 2 * count + i * stride]); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, GatherValuesOnBlocks4Test) { unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int m = 5; unsigned int stride = 300; unsigned int n_size = 240; unsigned int block_size = 40; unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 blocks_gather(num_m_blocks_y, num_m_blocks_z, 1); thrust::host_vector<double> h_val(stride * m); thrust::host_vector<unsigned int> h_idx(stride * m); for (unsigned int i = 0; i < stride * m; i++) { h_val[i] = std::numeric_limits<double>::max(); h_idx[i] = std::numeric_limits<unsigned int>::max(); } for (unsigned int i = 0; i < m; i++) { for (unsigned int j = 0; j < n_size; j += block_size) { h_val[j + i * stride] = 0.10; h_val[j + 1 + i * stride] = 0.15; h_idx[j + i * stride] = j; h_idx[j + 1 + i * stride] = j + 1; } } thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); cudautils::gather_values_on_blocks<<<blocks_gather, n_block_size>>>( stride, n_size, block_size, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); for (unsigned int i = 0; i < m; i++) { unsigned int count = 0; for (unsigned int j = 0; j < n_size; j += block_size) { ASSERT_EQ(0.10, h_ret_val[ 2 * count + i * stride]); ASSERT_EQ(0.15, h_ret_val[1 + 2 * count + i * stride]); ASSERT_EQ(j, h_ret_idx[ 2 * count + i * stride]); ASSERT_EQ(j + 1, h_ret_idx[1 + 2 * count + i * stride]); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, GatherValuesOnBlocks5Test) { unsigned int n_block_size = 2 * defaults::num_threads_in_twotops_func; unsigned int m = 500; unsigned int stride = 300; unsigned int n_size = 240; unsigned int block_size = 40; unsigned int num_m_blocks_z = cudautils::get_num_blocks(m, defaults::num_blocks_y_in_twotops_func); unsigned int num_m_blocks_y = (num_m_blocks_z == 1) ? m : defaults::num_blocks_y_in_twotops_func; dim3 blocks_gather(num_m_blocks_y, num_m_blocks_z, 1); thrust::host_vector<double> h_val(stride * m); thrust::host_vector<unsigned int> h_idx(stride * m); for (unsigned int i = 0; i < stride * m; i++) { h_val[i] = std::numeric_limits<double>::max(); h_idx[i] = std::numeric_limits<unsigned int>::max(); } for (unsigned int i = 0; i < m; i++) { for (unsigned int j = 0; j < n_size; j += block_size) { h_val[j + i * stride] = 0.10; h_val[j + 1 + i * stride] = 0.15; h_idx[j + i * stride] = j; h_idx[j + 1 + i * stride] = j + 1; } } thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); cudautils::gather_values_on_blocks<<<blocks_gather, n_block_size>>>( stride, n_size, block_size, m, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); for (unsigned int i = 0; i < m; i++) { unsigned int count = 0; for (unsigned int j = 0; j < n_size; j += block_size) { ASSERT_EQ(0.10, h_ret_val[ 2 * count + i * stride]); ASSERT_EQ(0.15, h_ret_val[1 + 2 * count + i * stride]); ASSERT_EQ(j, h_ret_idx[ 2 * count + i * stride]); ASSERT_EQ(j + 1, h_ret_idx[1 + 2 * count + i * stride]); count++; } } } TEST_F(NearestNeighborSearchCudaKernelTest, SwapSort1Test) { unsigned int stride = 10; unsigned int m = 5; unsigned int total_size = stride * m; thrust::host_vector<double> h_val(total_size); thrust::host_vector<unsigned int> h_idx(total_size); for (unsigned int i = 0; i < total_size; i++) { h_val[i] = std::numeric_limits<double>::max(); h_idx[i] = std::numeric_limits<unsigned int>::max(); } for (unsigned int i = 0; i < m; i++) { h_val[ i * stride] = 0.15; h_val[1 + i * stride] = 0.10; h_idx[ i * stride] = i; h_idx[1 + i * stride] = i + 1; } thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); unsigned int num_blocks = cudautils::get_num_blocks(m, defaults::num_threads_in_swap_sort_func); cudautils::swap_sort<<<num_blocks, defaults::num_threads_in_swap_sort_func>>>( stride, total_size, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); for (unsigned int i = 0; i < m; i++) { ASSERT_EQ(0.10, h_ret_val[ i * stride]); ASSERT_EQ(0.15, h_ret_val[1 + i * stride]); ASSERT_EQ(i + 1, h_ret_idx[ i * stride]); ASSERT_EQ(i, h_ret_idx[1 + i * stride]); } } TEST_F(NearestNeighborSearchCudaKernelTest, SwapSort2Test) { unsigned int stride = 10; unsigned int m = 201; unsigned int total_size = stride * m; thrust::host_vector<double> h_val(total_size); thrust::host_vector<unsigned int> h_idx(total_size); for (unsigned int i = 0; i < total_size; i++) { h_val[i] = std::numeric_limits<double>::max(); h_idx[i] = std::numeric_limits<unsigned int>::max(); } for (unsigned int i = 0; i < m; i++) { h_val[ i * stride] = 0.15; h_val[1 + i * stride] = 0.10; h_idx[ i * stride] = i; h_idx[1 + i * stride] = i + 1; } thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); unsigned int num_blocks = cudautils::get_num_blocks(m, defaults::num_threads_in_swap_sort_func); cudautils::swap_sort<<<num_blocks, defaults::num_threads_in_swap_sort_func>>>( stride, total_size, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); for (unsigned int i = 0; i < m; i++) { ASSERT_EQ(0.10, h_ret_val[ i * stride]); ASSERT_EQ(0.15, h_ret_val[1 + i * stride]); ASSERT_EQ(i + 1, h_ret_idx[ i * stride]); ASSERT_EQ(i, h_ret_idx[1 + i * stride]); } } TEST_F(NearestNeighborSearchCudaKernelTest, SwapSort3Test) { unsigned int stride = 1000; unsigned int m = 2000; unsigned int total_size = stride * m; thrust::host_vector<double> h_val(total_size); thrust::host_vector<unsigned int> h_idx(total_size); for (unsigned int i = 0; i < total_size; i++) { h_val[i] = std::numeric_limits<double>::max(); h_idx[i] = std::numeric_limits<unsigned int>::max(); } for (unsigned int i = 0; i < m; i++) { h_val[ i * stride] = 0.15; h_val[1 + i * stride] = 0.10; h_idx[ i * stride] = i; h_idx[1 + i * stride] = i + 1; } thrust::device_vector<double> val(h_val); thrust::device_vector<unsigned int> idx(h_idx); unsigned int num_blocks = cudautils::get_num_blocks(m, defaults::num_threads_in_swap_sort_func); cudautils::swap_sort<<<num_blocks, defaults::num_threads_in_swap_sort_func>>>( stride, total_size, thrust::raw_pointer_cast(val.data()), thrust::raw_pointer_cast(idx.data())); thrust::host_vector<double> h_ret_val(val); thrust::host_vector<unsigned int> h_ret_idx(idx); for (unsigned int i = 0; i < m; i++) { ASSERT_EQ(0.10, h_ret_val[ i * stride]); ASSERT_EQ(0.15, h_ret_val[1 + i * stride]); ASSERT_EQ(i + 1, h_ret_idx[ i * stride]); ASSERT_EQ(i, h_ret_idx[1 + i * stride]); } } }
66bd3734164698cd0101c74abc952ebfa12aa8aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> #include <algorithm> #define N 1000 #define N_MAX 1000000 __global__ void add_vectors(int *inputs, long *outputs) { int i = inputs[threadIdx.x]; int max = i + N; while (i < max){ int length = 1; long start = i; if (start == 0){ start = 1; } while (start != 1){ if (start % 2 == 0){ start = start / 2; } else{ start = 3 * start + 1; } length += 1; } outputs[i] = length; i += 1; } } int main(void) { int *inputs; long *outputs; int *d_inputs; long *d_outputs; int size = N * sizeof(int); int size_max = N_MAX * sizeof(long); inputs = (int *)malloc(size); outputs = (long *)malloc(size_max); for (int i = 0; i < N; ++i){ inputs[i] = N*i; } hipMalloc(&d_inputs, size); hipMalloc(&d_outputs, size_max); hipMemcpy(d_inputs, inputs, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( add_vectors), dim3(1), dim3(N), 0, 0, d_inputs, d_outputs); hipMemcpy(outputs, d_outputs, size_max, hipMemcpyDeviceToHost); hipFree(d_inputs); hipFree(d_outputs); // for (int i = 0; i < 10000; ++i){ // std::cout << outputs[i] << std::endl; // } std::cout << "------------" << std::endl; long max = 0; int ind_max = 0; for (int i = 0; i < N_MAX + 1; i++){ if (outputs[i] > max) { max = outputs[i]; ind_max = i; } } std::cout << max << std::endl; std::cout << ind_max << std::endl; free(inputs); free(outputs); return 0; }
66bd3734164698cd0101c74abc952ebfa12aa8aa.cu
#include <iostream> #include <math.h> #include <algorithm> #define N 1000 #define N_MAX 1000000 __global__ void add_vectors(int *inputs, long *outputs) { int i = inputs[threadIdx.x]; int max = i + N; while (i < max){ int length = 1; long start = i; if (start == 0){ start = 1; } while (start != 1){ if (start % 2 == 0){ start = start / 2; } else{ start = 3 * start + 1; } length += 1; } outputs[i] = length; i += 1; } } int main(void) { int *inputs; long *outputs; int *d_inputs; long *d_outputs; int size = N * sizeof(int); int size_max = N_MAX * sizeof(long); inputs = (int *)malloc(size); outputs = (long *)malloc(size_max); for (int i = 0; i < N; ++i){ inputs[i] = N*i; } cudaMalloc(&d_inputs, size); cudaMalloc(&d_outputs, size_max); cudaMemcpy(d_inputs, inputs, size, cudaMemcpyHostToDevice); add_vectors<<<1, N>>>(d_inputs, d_outputs); cudaMemcpy(outputs, d_outputs, size_max, cudaMemcpyDeviceToHost); cudaFree(d_inputs); cudaFree(d_outputs); // for (int i = 0; i < 10000; ++i){ // std::cout << outputs[i] << std::endl; // } std::cout << "------------" << std::endl; long max = 0; int ind_max = 0; for (int i = 0; i < N_MAX + 1; i++){ if (outputs[i] > max) { max = outputs[i]; ind_max = i; } } std::cout << max << std::endl; std::cout << ind_max << std::endl; free(inputs); free(outputs); return 0; }
9fa7a0d4e9fc6d48ae9fe901d22e16f9b8600a9a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Written by Vasily Volkov. // Copyright (c) 2008-2009, The Regents of the University of California. // All rights reserved. #include "codelets.h" __global__ void FFT512_device( float2 *work ); #define rank 8 __global__ void FFT8_device_( float2 *work ) { int tid = threadIdx.x; int bid = blockIdx.y * gridDim.x + blockIdx.x; int lo = bid & (4096/rank/64-1); int hi = bid &~(4096/rank/64-1); int i = lo*64 + tid; work += hi * (rank*64) + i; float2 a[rank]; load<rank>( a, work, 512 ); FFT8( a ); twiddle<rank>( a, i, 4096 ); store<rank>( a, work, 512 ); } extern "C" void FFT4096( float2 *work, int batch ) { hipLaunchKernelGGL(( FFT8_device_), dim3(grid2D(batch*(4096/rank)/64)), dim3(64) , 0, 0, work ); hipLaunchKernelGGL(( FFT512_device), dim3(grid2D(batch*rank)), dim3(64) , 0, 0, work ); }
9fa7a0d4e9fc6d48ae9fe901d22e16f9b8600a9a.cu
// Written by Vasily Volkov. // Copyright (c) 2008-2009, The Regents of the University of California. // All rights reserved. #include "codelets.h" __global__ void FFT512_device( float2 *work ); #define rank 8 __global__ void FFT8_device_( float2 *work ) { int tid = threadIdx.x; int bid = blockIdx.y * gridDim.x + blockIdx.x; int lo = bid & (4096/rank/64-1); int hi = bid &~(4096/rank/64-1); int i = lo*64 + tid; work += hi * (rank*64) + i; float2 a[rank]; load<rank>( a, work, 512 ); FFT8( a ); twiddle<rank>( a, i, 4096 ); store<rank>( a, work, 512 ); } extern "C" void FFT4096( float2 *work, int batch ) { FFT8_device_<<< grid2D(batch*(4096/rank)/64), 64 >>>( work ); FFT512_device<<< grid2D(batch*rank), 64 >>>( work ); }
1d87780376fb25ca6c356325e1ae53b25d406ed8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <array> #include <cstddef> #include <bit> #include "cuda_superbee_kernels.h" #include "cuda_utils.h" #include <iostream> template <typename DTYPE> __device__ inline DTYPE limiter(DTYPE cr) { return max(0.0, max(min(1.0, 2.0 * cr), min(2.0, cr))); } template <typename DTYPE> __device__ inline DTYPE calcFlux ( const DTYPE velfac, const DTYPE velS, const DTYPE dt_tracer, const DTYPE dx, const DTYPE varS, const DTYPE varSM1, const DTYPE varSP1, const DTYPE varSP2, const DTYPE maskS, const DTYPE maskSM1, const DTYPE maskSP1 ) { const DTYPE scaledVel = velfac * velS; const DTYPE uCFL = abs(scaledVel * dt_tracer / dx); const DTYPE rjp = (varSP2 - varSP1) * maskSP1; const DTYPE rj = (varSP1 - varS) * maskS; const DTYPE rjm = (varS - varSM1) * maskSM1; DTYPE cr; DTYPE divisor = rj; const DTYPE epsilon = 1e-20; if (abs(divisor) < epsilon) { divisor = epsilon; } if (velS > 0) { cr = rjm / divisor; } else { cr = rjp / divisor; } cr = limiter(cr); return scaledVel * (varSP1 + varS) * 0.5 - abs(scaledVel) * ((1.0 - cr) + uCFL * cr) * rj * 0.5; } #define TILE 8 template <typename DTYPE> __global__ void SuperbeeKernelTiled( const DTYPE *var, const DTYPE *u_wgrid, const DTYPE *v_wgrid, const DTYPE *w_wgrid, const DTYPE *maskW, const DTYPE *dxt, const DTYPE *dyt, const DTYPE *dzw, const DTYPE *cost, const DTYPE *cosu, const DTYPE *dt_tracer, DTYPE *flux_east, DTYPE *flux_north, DTYPE *flux_top, int dim1, int dim2, int dim3 ){ const int EXT_TILE = TILE+3; __shared__ DTYPE shared_var[EXT_TILE][EXT_TILE][EXT_TILE]; __shared__ char shared_mask[EXT_TILE][EXT_TILE][EXT_TILE]; const int loadSize = (EXT_TILE) * (EXT_TILE) * (EXT_TILE); const int dim1Stride = dim2 * dim3; const int dim2Stride = dim3; const int dim3Stride = 1; // We need to load more elements than we have threads, so it's easier to have the flat indices const int flatThreadIdx = threadIdx.x + (blockDim.x * threadIdx.y) + (threadIdx.z * blockDim.x * blockDim.y); // dim3 offset into global mem (-1 to account for stencil start) int global_startX = (blockIdx.x * blockDim.x - 1); // dim2 offset into global mem int global_startY = (blockIdx.y * blockDim.y - 1); // dim1 offset into global mem int global_startZ = (blockIdx.z * blockDim.z - 1); for (int i = flatThreadIdx ; i < loadSize ; i += blockDim.x * blockDim.y * blockDim.z) { // local offset int blockX = i % (EXT_TILE); // x % y = x- (x/y * y) // local offset int blockY = (i / (EXT_TILE)) % (EXT_TILE); // local offset int blockZ = i / ((EXT_TILE) * (EXT_TILE)); int globalX = global_startX + blockX; int globalY = global_startY + blockY; int globalZ = global_startZ + blockZ; // check bounds bool blockXvalid = globalX >= 0 && globalX < dim3; bool blockYvalid = globalY >= 0 && globalY < dim2; bool blockZvalid = globalZ >= 0 && globalZ < dim1; if (blockXvalid && blockYvalid && blockZvalid) { int globalIndx = globalZ * dim1Stride + globalY * dim2Stride + globalX; shared_var[blockZ][blockY][blockX] = var[globalIndx]; shared_mask[blockZ][blockY][blockX] = maskW[globalIndx]; } else { shared_var[blockZ][blockY][blockX] = 0; shared_mask[blockZ][blockY][blockX] = 0; } } __syncthreads(); int global_d1 = blockIdx.z * blockDim.z + threadIdx.z; int global_d2 = blockIdx.y * blockDim.y + threadIdx.y; int global_d3 = blockIdx.x * blockDim.x + threadIdx.x; int local_d1 = threadIdx.z + 1; int local_d2 = threadIdx.y + 1; int local_d3 = threadIdx.x + 1; int flatResultIdx = global_d1*dim1Stride + global_d2 * dim2Stride + global_d3 * dim3Stride; const DTYPE varS = shared_var[local_d1][local_d2][local_d3]; const char maskWs = shared_mask[local_d1][local_d2][local_d3]; DTYPE adv_fe = 0; DTYPE adv_fn = 0; DTYPE adv_ft = 0; if (global_d1 > 0 && global_d1 < dim1-2 && global_d2 > 1 && global_d2 < dim2-2 && global_d3 < dim3) { const DTYPE velS = u_wgrid[flatResultIdx]; const DTYPE varSM1 = shared_var[local_d1-1][local_d2][local_d3]; const DTYPE varSP1 = shared_var[local_d1+1][local_d2][local_d3]; const DTYPE varSP2 = shared_var[local_d1+2][local_d2][local_d3]; const char maskWm1 = shared_mask[local_d1-1][local_d2][local_d3]; const char maskWp1 = shared_mask[local_d1+1][local_d2][local_d3]; const char maskwp2 = shared_mask[local_d1+2][local_d2][local_d3]; const DTYPE maskUtr = maskWs * maskWp1; const DTYPE maskUtrP1 = maskWp1 * maskwp2; const DTYPE maskUtrM1 = maskWm1 * maskWs; const DTYPE dx = cost[global_d2] * dxt[global_d1]; adv_fe = calcFlux<DTYPE>(1, velS, *dt_tracer, dx, varS, varSM1, varSP1, varSP2, maskUtr, maskUtrM1, maskUtrP1); } if (global_d2 > 0 && global_d2 < dim2-2 && global_d1 > 1 && global_d1 < dim1-2 && global_d3 < dim3) { const DTYPE velS = v_wgrid[flatResultIdx]; const DTYPE varSM1 = shared_var[local_d1][local_d2-1][local_d3]; const DTYPE varSP1 = shared_var[local_d1][local_d2+1][local_d3]; const DTYPE varSP2 = shared_var[local_d1][local_d2+2][local_d3]; const char maskWm1 = shared_mask[local_d1][local_d2-1][local_d3]; const char maskWp1 = shared_mask[local_d1][local_d2+1][local_d3]; const char maskwp2 = shared_mask[local_d1][local_d2+2][local_d3]; const DTYPE maskVtr = maskWs * maskWp1; const DTYPE maskVtrP1 = maskWp1 * maskwp2; const DTYPE maskVtrM1 = maskWm1 * maskWs; const DTYPE dx = cost[global_d2] * dyt[global_d2]; adv_fn = calcFlux<DTYPE>(cosu[global_d2], velS, *dt_tracer, dx, varS, varSM1, varSP1, varSP2, maskVtr, maskVtrM1, maskVtrP1); } if (global_d3 < dim3-1 && global_d1 > 1 && global_d1 < dim1-2 && global_d2 > 1 && global_d2 < dim2-2) { const DTYPE velS = w_wgrid[flatResultIdx]; const DTYPE varSM1 = shared_var[local_d1][local_d2][local_d3-1]; const DTYPE varSP1 = shared_var[local_d1][local_d2][local_d3+1]; const DTYPE varSP2 = shared_var[local_d1][local_d2][local_d3+2]; const char maskWm1 = shared_mask[local_d1][local_d2][local_d3-1]; const char maskWp1 = shared_mask[local_d1][local_d2][local_d3+1]; const char maskwp2 = shared_mask[local_d1][local_d2][local_d3+2]; const DTYPE maskWtr = maskWs * maskWp1; const DTYPE maskWtrP1 = maskWp1 * maskwp2; const DTYPE maskWtrM1 = maskWm1 * maskWs; const DTYPE dx = dzw[global_d3]; adv_ft = calcFlux<DTYPE>(1, velS, *dt_tracer, dx, varS, varSM1, varSP1, varSP2, maskWtr, maskWtrM1, maskWtrP1); } if (global_d1 < dim1 && global_d2 < dim2 && global_d3 < dim3) { flux_east[flatResultIdx] = adv_fe; flux_north[flatResultIdx] = adv_fn; flux_top[flatResultIdx] = adv_ft; } } template <typename DTYPE> __global__ void SuperbeeKernel( const DTYPE *var, const DTYPE *u_wgrid, const DTYPE *v_wgrid, const DTYPE *w_wgrid, const DTYPE *maskW, const DTYPE *dxt, const DTYPE *dyt, const DTYPE *dzw, const DTYPE *cost, const DTYPE *cosu, const DTYPE *dt_tracer, DTYPE *flux_east, DTYPE *flux_north, DTYPE *flux_top, int dim1, int dim2, int dim3 ){ const int dim1Stride = dim2 * dim3; const int dim2Stride = dim3; const int dim3Stride = 1; for (std::int64_t index = blockIdx.x * blockDim.x + threadIdx.x; index < dim1Stride * dim1; index += blockDim.x * gridDim.x) { int x = index / dim1Stride; int y = (index / dim3) % dim2; int z = index % dim3; const int s = index; const DTYPE varS = var[s]; const DTYPE maskWs = maskW[s]; DTYPE adv_fe = 0; DTYPE adv_fn = 0; DTYPE adv_ft = 0; if (x > 0 && x < dim1-2 && y > 1 && y < dim2-2) { const DTYPE velS = u_wgrid[s]; DTYPE maskUtr = 0; DTYPE maskUtrP1 = 0; DTYPE maskUtrM1 = 0; const int s1m1 = index - dim1Stride; const int s1p1 = index + dim1Stride; const int s1p2 = index + 2*dim1Stride; const DTYPE maskWm1 = maskW[s1m1]; const DTYPE maskWp1 = maskW[s1p1]; const DTYPE maskwp2 = maskW[s1p2]; const DTYPE varSM1 = var[s1m1]; const DTYPE varSP1 = var[s1p1]; const DTYPE varSP2 = var[s1p2]; if (x < dim1-1) { maskUtr = maskWs * maskWp1; maskUtrP1 = maskWp1 * maskwp2; maskUtrM1 = maskWm1 * maskWs; } const DTYPE dx = cost[y] * dxt[x]; adv_fe = calcFlux<DTYPE>(1, velS, *dt_tracer, dx, varS, varSM1, varSP1, varSP2, maskUtr, maskUtrM1, maskUtrP1); } if (y > 0 && y < dim2-2 && x > 1 && x < dim1-2) { const DTYPE velS = v_wgrid[s]; DTYPE maskVtr = 0; DTYPE maskVtrP1 = 0; DTYPE maskVtrM1 = 0; const int s1m1 = index - dim2Stride; const int s1p1 = index + dim2Stride; const int s1p2 = index + 2*dim2Stride; const DTYPE maskWm1 = maskW[s1m1]; const DTYPE maskWp1 = maskW[s1p1]; const DTYPE maskwp2 = maskW[s1p2]; const DTYPE varSM1 = var[s1m1]; const DTYPE varSP1 = var[s1p1]; const DTYPE varSP2 = var[s1p2]; if (y < dim2-1) { maskVtr = maskWs * maskWp1; maskVtrP1 = maskWp1 * maskwp2; maskVtrM1 = maskWm1 * maskWs; } const DTYPE dx = cost[y] * dyt[y]; adv_fn = calcFlux<DTYPE>(cosu[y], velS, *dt_tracer, dx, varS, varSM1, varSP1, varSP2, maskVtr, maskVtrM1, maskVtrP1); } if (z < dim3-1 && x > 1 && x < dim1-2 && y > 1 && y < dim2-2) { const DTYPE velS = w_wgrid[s]; DTYPE maskWtr = 0; DTYPE maskWtrP1 = 0; DTYPE maskWtrM1 = 0; const int s1m1 = index - dim3Stride; const int s1p1 = index + dim3Stride; const int s1p2 = index + 2*dim3Stride; DTYPE maskWm1 = 0; DTYPE varSM1 = 0 ; if (z != 0) { maskWm1 = maskW[s1m1]; varSM1 = var[s1m1]; } DTYPE maskwp2 = 0; DTYPE varSP2 = 0; if (z < dim3-2) { maskwp2 = maskW[s1p2]; varSP2 = var[s1p2]; } const DTYPE varSP1 = var[s1p1]; const DTYPE maskWp1 = maskW[s1p1]; if (z < dim3-1) { maskWtr = maskWs * maskWp1; maskWtrP1 = maskWp1 * maskwp2; maskWtrM1 = maskWm1 * maskWs; } const DTYPE dx = dzw[z]; adv_ft = calcFlux<DTYPE>(1, velS, *dt_tracer, dx, varS, varSM1, varSP1, varSP2, maskWtr, maskWtrM1, maskWtrP1); } flux_east[index] = adv_fe; flux_north[index] = adv_fn; flux_top[index] = adv_ft; } } struct SuperbeeDescriptor { std::int64_t dim1; std::int64_t dim2; std::int64_t dim3; }; // Unpacks a descriptor object from a byte string. template <typename T> const T* UnpackDescriptor(const char* opaque, std::size_t opaque_len) { if (opaque_len != sizeof(T)) { throw std::runtime_error("Descriptor was not encoded correctly."); } return reinterpret_cast<const T*>(opaque); } template <typename DTYPE> void CudaSuperbeeTiled(hipStream_t stream, void** buffers, const char* opaque, std::size_t opaque_len) { const auto& descriptor = *UnpackDescriptor<SuperbeeDescriptor>(opaque, opaque_len); const std::int64_t xdim = descriptor.dim1; const std::int64_t ydim = descriptor.dim2; const std::int64_t zdim = descriptor.dim3; const std::int64_t allDims = xdim*ydim*zdim; const DTYPE* var = reinterpret_cast<const DTYPE*>(buffers[0]); const DTYPE* u_wgrid = reinterpret_cast<const DTYPE*>(buffers[1]); const DTYPE* v_wgrid = reinterpret_cast<const DTYPE*>(buffers[2]); const DTYPE* w_wgrid = reinterpret_cast<const DTYPE*>(buffers[3]); const DTYPE* maskW = reinterpret_cast<const DTYPE*>(buffers[4]); const DTYPE* dxt = reinterpret_cast<const DTYPE*>(buffers[5]); const DTYPE* dyt = reinterpret_cast<const DTYPE*>(buffers[6]); const DTYPE* dzw = reinterpret_cast<const DTYPE*>(buffers[7]); const DTYPE* cost = reinterpret_cast<const DTYPE*>(buffers[8]); const DTYPE* cosu = reinterpret_cast<const DTYPE*>(buffers[9]); const DTYPE* dt_tracer = reinterpret_cast<const DTYPE*>(buffers[10]); DTYPE* flux_east = reinterpret_cast<DTYPE*>(buffers[11]); // output1 DTYPE* flux_north = reinterpret_cast<DTYPE*>(buffers[12]); // output2 DTYPE* flux_top = reinterpret_cast<DTYPE*>(buffers[13]); // output3 dim3 blocksize(TILE, TILE, TILE); int numblocks1 = (xdim + TILE-1) / TILE; int numblocks2 = (ydim + TILE-1) / TILE; int numblocks3 = (zdim + TILE-1) / TILE; dim3 gridsize(numblocks3, numblocks2, numblocks1); hipLaunchKernelGGL(( SuperbeeKernelTiled<DTYPE>), dim3(gridsize), dim3(blocksize), 0, stream, var , u_wgrid , v_wgrid , w_wgrid , maskW , dxt , dyt , dzw , cost , cosu , dt_tracer , flux_east , flux_north , flux_top , xdim , ydim , zdim ); gpuErrchk(hipPeekAtLastError()); } template <typename DTYPE> void CudaSuperbee(hipStream_t stream, void** buffers, const char* opaque, std::size_t opaque_len) { const auto& descriptor = *UnpackDescriptor<SuperbeeDescriptor>(opaque, opaque_len); const std::int64_t dim1 = descriptor.dim1; const std::int64_t dim2 = descriptor.dim2; const std::int64_t dim3 = descriptor.dim3; const std::int64_t allDims = dim1*dim2*dim3; const DTYPE* var = reinterpret_cast<const DTYPE*>(buffers[0]); const DTYPE* u_wgrid = reinterpret_cast<const DTYPE*>(buffers[1]); const DTYPE* v_wgrid = reinterpret_cast<const DTYPE*>(buffers[2]); const DTYPE* w_wgrid = reinterpret_cast<const DTYPE*>(buffers[3]); const DTYPE* maskW = reinterpret_cast<const DTYPE*>(buffers[4]); const DTYPE* dxt = reinterpret_cast<const DTYPE*>(buffers[5]); const DTYPE* dyt = reinterpret_cast<const DTYPE*>(buffers[6]); const DTYPE* dzw = reinterpret_cast<const DTYPE*>(buffers[7]); const DTYPE* cost = reinterpret_cast<const DTYPE*>(buffers[8]); const DTYPE* cosu = reinterpret_cast<const DTYPE*>(buffers[9]); const DTYPE* dt_tracer = reinterpret_cast<const DTYPE*>(buffers[10]); DTYPE* flux_east = reinterpret_cast<DTYPE*>(buffers[11]); // output1 DTYPE* flux_north = reinterpret_cast<DTYPE*>(buffers[12]); // output2 DTYPE* flux_top = reinterpret_cast<DTYPE*>(buffers[13]); // output3 const int BLOCK_SIZE = 128; const std::int64_t grid_dim = std::min<std::int64_t>(1024, (allDims + BLOCK_SIZE - 1) / BLOCK_SIZE); hipLaunchKernelGGL(( SuperbeeKernel<DTYPE>), dim3(grid_dim), dim3(BLOCK_SIZE), 0, stream, var , u_wgrid , v_wgrid , w_wgrid , maskW , dxt , dyt , dzw , cost , cosu , dt_tracer , flux_east , flux_north , flux_top , dim1 , dim2 , dim3 ); gpuErrchk(hipPeekAtLastError()); } void CudaSuperbeeFloat(hipStream_t stream, void** buffers, const char* opaque, std::size_t opaque_len) { CudaSuperbeeTiled<float>(stream, buffers, opaque, opaque_len); } void CudaSuperbeeDouble(hipStream_t stream, void** buffers, const char* opaque, std::size_t opaque_len) { CudaSuperbeeTiled<double>(stream, buffers, opaque, opaque_len); }
1d87780376fb25ca6c356325e1ae53b25d406ed8.cu
#include <array> #include <cstddef> #include <bit> #include "cuda_superbee_kernels.h" #include "cuda_utils.h" #include <iostream> template <typename DTYPE> __device__ inline DTYPE limiter(DTYPE cr) { return max(0.0, max(min(1.0, 2.0 * cr), min(2.0, cr))); } template <typename DTYPE> __device__ inline DTYPE calcFlux ( const DTYPE velfac, const DTYPE velS, const DTYPE dt_tracer, const DTYPE dx, const DTYPE varS, const DTYPE varSM1, const DTYPE varSP1, const DTYPE varSP2, const DTYPE maskS, const DTYPE maskSM1, const DTYPE maskSP1 ) { const DTYPE scaledVel = velfac * velS; const DTYPE uCFL = abs(scaledVel * dt_tracer / dx); const DTYPE rjp = (varSP2 - varSP1) * maskSP1; const DTYPE rj = (varSP1 - varS) * maskS; const DTYPE rjm = (varS - varSM1) * maskSM1; DTYPE cr; DTYPE divisor = rj; const DTYPE epsilon = 1e-20; if (abs(divisor) < epsilon) { divisor = epsilon; } if (velS > 0) { cr = rjm / divisor; } else { cr = rjp / divisor; } cr = limiter(cr); return scaledVel * (varSP1 + varS) * 0.5 - abs(scaledVel) * ((1.0 - cr) + uCFL * cr) * rj * 0.5; } #define TILE 8 template <typename DTYPE> __global__ void SuperbeeKernelTiled( const DTYPE *var, const DTYPE *u_wgrid, const DTYPE *v_wgrid, const DTYPE *w_wgrid, const DTYPE *maskW, const DTYPE *dxt, const DTYPE *dyt, const DTYPE *dzw, const DTYPE *cost, const DTYPE *cosu, const DTYPE *dt_tracer, DTYPE *flux_east, DTYPE *flux_north, DTYPE *flux_top, int dim1, int dim2, int dim3 ){ const int EXT_TILE = TILE+3; __shared__ DTYPE shared_var[EXT_TILE][EXT_TILE][EXT_TILE]; __shared__ char shared_mask[EXT_TILE][EXT_TILE][EXT_TILE]; const int loadSize = (EXT_TILE) * (EXT_TILE) * (EXT_TILE); const int dim1Stride = dim2 * dim3; const int dim2Stride = dim3; const int dim3Stride = 1; // We need to load more elements than we have threads, so it's easier to have the flat indices const int flatThreadIdx = threadIdx.x + (blockDim.x * threadIdx.y) + (threadIdx.z * blockDim.x * blockDim.y); // dim3 offset into global mem (-1 to account for stencil start) int global_startX = (blockIdx.x * blockDim.x - 1); // dim2 offset into global mem int global_startY = (blockIdx.y * blockDim.y - 1); // dim1 offset into global mem int global_startZ = (blockIdx.z * blockDim.z - 1); for (int i = flatThreadIdx ; i < loadSize ; i += blockDim.x * blockDim.y * blockDim.z) { // local offset int blockX = i % (EXT_TILE); // x % y = x- (x/y * y) // local offset int blockY = (i / (EXT_TILE)) % (EXT_TILE); // local offset int blockZ = i / ((EXT_TILE) * (EXT_TILE)); int globalX = global_startX + blockX; int globalY = global_startY + blockY; int globalZ = global_startZ + blockZ; // check bounds bool blockXvalid = globalX >= 0 && globalX < dim3; bool blockYvalid = globalY >= 0 && globalY < dim2; bool blockZvalid = globalZ >= 0 && globalZ < dim1; if (blockXvalid && blockYvalid && blockZvalid) { int globalIndx = globalZ * dim1Stride + globalY * dim2Stride + globalX; shared_var[blockZ][blockY][blockX] = var[globalIndx]; shared_mask[blockZ][blockY][blockX] = maskW[globalIndx]; } else { shared_var[blockZ][blockY][blockX] = 0; shared_mask[blockZ][blockY][blockX] = 0; } } __syncthreads(); int global_d1 = blockIdx.z * blockDim.z + threadIdx.z; int global_d2 = blockIdx.y * blockDim.y + threadIdx.y; int global_d3 = blockIdx.x * blockDim.x + threadIdx.x; int local_d1 = threadIdx.z + 1; int local_d2 = threadIdx.y + 1; int local_d3 = threadIdx.x + 1; int flatResultIdx = global_d1*dim1Stride + global_d2 * dim2Stride + global_d3 * dim3Stride; const DTYPE varS = shared_var[local_d1][local_d2][local_d3]; const char maskWs = shared_mask[local_d1][local_d2][local_d3]; DTYPE adv_fe = 0; DTYPE adv_fn = 0; DTYPE adv_ft = 0; if (global_d1 > 0 && global_d1 < dim1-2 && global_d2 > 1 && global_d2 < dim2-2 && global_d3 < dim3) { const DTYPE velS = u_wgrid[flatResultIdx]; const DTYPE varSM1 = shared_var[local_d1-1][local_d2][local_d3]; const DTYPE varSP1 = shared_var[local_d1+1][local_d2][local_d3]; const DTYPE varSP2 = shared_var[local_d1+2][local_d2][local_d3]; const char maskWm1 = shared_mask[local_d1-1][local_d2][local_d3]; const char maskWp1 = shared_mask[local_d1+1][local_d2][local_d3]; const char maskwp2 = shared_mask[local_d1+2][local_d2][local_d3]; const DTYPE maskUtr = maskWs * maskWp1; const DTYPE maskUtrP1 = maskWp1 * maskwp2; const DTYPE maskUtrM1 = maskWm1 * maskWs; const DTYPE dx = cost[global_d2] * dxt[global_d1]; adv_fe = calcFlux<DTYPE>(1, velS, *dt_tracer, dx, varS, varSM1, varSP1, varSP2, maskUtr, maskUtrM1, maskUtrP1); } if (global_d2 > 0 && global_d2 < dim2-2 && global_d1 > 1 && global_d1 < dim1-2 && global_d3 < dim3) { const DTYPE velS = v_wgrid[flatResultIdx]; const DTYPE varSM1 = shared_var[local_d1][local_d2-1][local_d3]; const DTYPE varSP1 = shared_var[local_d1][local_d2+1][local_d3]; const DTYPE varSP2 = shared_var[local_d1][local_d2+2][local_d3]; const char maskWm1 = shared_mask[local_d1][local_d2-1][local_d3]; const char maskWp1 = shared_mask[local_d1][local_d2+1][local_d3]; const char maskwp2 = shared_mask[local_d1][local_d2+2][local_d3]; const DTYPE maskVtr = maskWs * maskWp1; const DTYPE maskVtrP1 = maskWp1 * maskwp2; const DTYPE maskVtrM1 = maskWm1 * maskWs; const DTYPE dx = cost[global_d2] * dyt[global_d2]; adv_fn = calcFlux<DTYPE>(cosu[global_d2], velS, *dt_tracer, dx, varS, varSM1, varSP1, varSP2, maskVtr, maskVtrM1, maskVtrP1); } if (global_d3 < dim3-1 && global_d1 > 1 && global_d1 < dim1-2 && global_d2 > 1 && global_d2 < dim2-2) { const DTYPE velS = w_wgrid[flatResultIdx]; const DTYPE varSM1 = shared_var[local_d1][local_d2][local_d3-1]; const DTYPE varSP1 = shared_var[local_d1][local_d2][local_d3+1]; const DTYPE varSP2 = shared_var[local_d1][local_d2][local_d3+2]; const char maskWm1 = shared_mask[local_d1][local_d2][local_d3-1]; const char maskWp1 = shared_mask[local_d1][local_d2][local_d3+1]; const char maskwp2 = shared_mask[local_d1][local_d2][local_d3+2]; const DTYPE maskWtr = maskWs * maskWp1; const DTYPE maskWtrP1 = maskWp1 * maskwp2; const DTYPE maskWtrM1 = maskWm1 * maskWs; const DTYPE dx = dzw[global_d3]; adv_ft = calcFlux<DTYPE>(1, velS, *dt_tracer, dx, varS, varSM1, varSP1, varSP2, maskWtr, maskWtrM1, maskWtrP1); } if (global_d1 < dim1 && global_d2 < dim2 && global_d3 < dim3) { flux_east[flatResultIdx] = adv_fe; flux_north[flatResultIdx] = adv_fn; flux_top[flatResultIdx] = adv_ft; } } template <typename DTYPE> __global__ void SuperbeeKernel( const DTYPE *var, const DTYPE *u_wgrid, const DTYPE *v_wgrid, const DTYPE *w_wgrid, const DTYPE *maskW, const DTYPE *dxt, const DTYPE *dyt, const DTYPE *dzw, const DTYPE *cost, const DTYPE *cosu, const DTYPE *dt_tracer, DTYPE *flux_east, DTYPE *flux_north, DTYPE *flux_top, int dim1, int dim2, int dim3 ){ const int dim1Stride = dim2 * dim3; const int dim2Stride = dim3; const int dim3Stride = 1; for (std::int64_t index = blockIdx.x * blockDim.x + threadIdx.x; index < dim1Stride * dim1; index += blockDim.x * gridDim.x) { int x = index / dim1Stride; int y = (index / dim3) % dim2; int z = index % dim3; const int s = index; const DTYPE varS = var[s]; const DTYPE maskWs = maskW[s]; DTYPE adv_fe = 0; DTYPE adv_fn = 0; DTYPE adv_ft = 0; if (x > 0 && x < dim1-2 && y > 1 && y < dim2-2) { const DTYPE velS = u_wgrid[s]; DTYPE maskUtr = 0; DTYPE maskUtrP1 = 0; DTYPE maskUtrM1 = 0; const int s1m1 = index - dim1Stride; const int s1p1 = index + dim1Stride; const int s1p2 = index + 2*dim1Stride; const DTYPE maskWm1 = maskW[s1m1]; const DTYPE maskWp1 = maskW[s1p1]; const DTYPE maskwp2 = maskW[s1p2]; const DTYPE varSM1 = var[s1m1]; const DTYPE varSP1 = var[s1p1]; const DTYPE varSP2 = var[s1p2]; if (x < dim1-1) { maskUtr = maskWs * maskWp1; maskUtrP1 = maskWp1 * maskwp2; maskUtrM1 = maskWm1 * maskWs; } const DTYPE dx = cost[y] * dxt[x]; adv_fe = calcFlux<DTYPE>(1, velS, *dt_tracer, dx, varS, varSM1, varSP1, varSP2, maskUtr, maskUtrM1, maskUtrP1); } if (y > 0 && y < dim2-2 && x > 1 && x < dim1-2) { const DTYPE velS = v_wgrid[s]; DTYPE maskVtr = 0; DTYPE maskVtrP1 = 0; DTYPE maskVtrM1 = 0; const int s1m1 = index - dim2Stride; const int s1p1 = index + dim2Stride; const int s1p2 = index + 2*dim2Stride; const DTYPE maskWm1 = maskW[s1m1]; const DTYPE maskWp1 = maskW[s1p1]; const DTYPE maskwp2 = maskW[s1p2]; const DTYPE varSM1 = var[s1m1]; const DTYPE varSP1 = var[s1p1]; const DTYPE varSP2 = var[s1p2]; if (y < dim2-1) { maskVtr = maskWs * maskWp1; maskVtrP1 = maskWp1 * maskwp2; maskVtrM1 = maskWm1 * maskWs; } const DTYPE dx = cost[y] * dyt[y]; adv_fn = calcFlux<DTYPE>(cosu[y], velS, *dt_tracer, dx, varS, varSM1, varSP1, varSP2, maskVtr, maskVtrM1, maskVtrP1); } if (z < dim3-1 && x > 1 && x < dim1-2 && y > 1 && y < dim2-2) { const DTYPE velS = w_wgrid[s]; DTYPE maskWtr = 0; DTYPE maskWtrP1 = 0; DTYPE maskWtrM1 = 0; const int s1m1 = index - dim3Stride; const int s1p1 = index + dim3Stride; const int s1p2 = index + 2*dim3Stride; DTYPE maskWm1 = 0; DTYPE varSM1 = 0 ; if (z != 0) { maskWm1 = maskW[s1m1]; varSM1 = var[s1m1]; } DTYPE maskwp2 = 0; DTYPE varSP2 = 0; if (z < dim3-2) { maskwp2 = maskW[s1p2]; varSP2 = var[s1p2]; } const DTYPE varSP1 = var[s1p1]; const DTYPE maskWp1 = maskW[s1p1]; if (z < dim3-1) { maskWtr = maskWs * maskWp1; maskWtrP1 = maskWp1 * maskwp2; maskWtrM1 = maskWm1 * maskWs; } const DTYPE dx = dzw[z]; adv_ft = calcFlux<DTYPE>(1, velS, *dt_tracer, dx, varS, varSM1, varSP1, varSP2, maskWtr, maskWtrM1, maskWtrP1); } flux_east[index] = adv_fe; flux_north[index] = adv_fn; flux_top[index] = adv_ft; } } struct SuperbeeDescriptor { std::int64_t dim1; std::int64_t dim2; std::int64_t dim3; }; // Unpacks a descriptor object from a byte string. template <typename T> const T* UnpackDescriptor(const char* opaque, std::size_t opaque_len) { if (opaque_len != sizeof(T)) { throw std::runtime_error("Descriptor was not encoded correctly."); } return reinterpret_cast<const T*>(opaque); } template <typename DTYPE> void CudaSuperbeeTiled(cudaStream_t stream, void** buffers, const char* opaque, std::size_t opaque_len) { const auto& descriptor = *UnpackDescriptor<SuperbeeDescriptor>(opaque, opaque_len); const std::int64_t xdim = descriptor.dim1; const std::int64_t ydim = descriptor.dim2; const std::int64_t zdim = descriptor.dim3; const std::int64_t allDims = xdim*ydim*zdim; const DTYPE* var = reinterpret_cast<const DTYPE*>(buffers[0]); const DTYPE* u_wgrid = reinterpret_cast<const DTYPE*>(buffers[1]); const DTYPE* v_wgrid = reinterpret_cast<const DTYPE*>(buffers[2]); const DTYPE* w_wgrid = reinterpret_cast<const DTYPE*>(buffers[3]); const DTYPE* maskW = reinterpret_cast<const DTYPE*>(buffers[4]); const DTYPE* dxt = reinterpret_cast<const DTYPE*>(buffers[5]); const DTYPE* dyt = reinterpret_cast<const DTYPE*>(buffers[6]); const DTYPE* dzw = reinterpret_cast<const DTYPE*>(buffers[7]); const DTYPE* cost = reinterpret_cast<const DTYPE*>(buffers[8]); const DTYPE* cosu = reinterpret_cast<const DTYPE*>(buffers[9]); const DTYPE* dt_tracer = reinterpret_cast<const DTYPE*>(buffers[10]); DTYPE* flux_east = reinterpret_cast<DTYPE*>(buffers[11]); // output1 DTYPE* flux_north = reinterpret_cast<DTYPE*>(buffers[12]); // output2 DTYPE* flux_top = reinterpret_cast<DTYPE*>(buffers[13]); // output3 dim3 blocksize(TILE, TILE, TILE); int numblocks1 = (xdim + TILE-1) / TILE; int numblocks2 = (ydim + TILE-1) / TILE; int numblocks3 = (zdim + TILE-1) / TILE; dim3 gridsize(numblocks3, numblocks2, numblocks1); SuperbeeKernelTiled<DTYPE><<<gridsize, blocksize, 0, stream>>>( var , u_wgrid , v_wgrid , w_wgrid , maskW , dxt , dyt , dzw , cost , cosu , dt_tracer , flux_east , flux_north , flux_top , xdim , ydim , zdim ); gpuErrchk(cudaPeekAtLastError()); } template <typename DTYPE> void CudaSuperbee(cudaStream_t stream, void** buffers, const char* opaque, std::size_t opaque_len) { const auto& descriptor = *UnpackDescriptor<SuperbeeDescriptor>(opaque, opaque_len); const std::int64_t dim1 = descriptor.dim1; const std::int64_t dim2 = descriptor.dim2; const std::int64_t dim3 = descriptor.dim3; const std::int64_t allDims = dim1*dim2*dim3; const DTYPE* var = reinterpret_cast<const DTYPE*>(buffers[0]); const DTYPE* u_wgrid = reinterpret_cast<const DTYPE*>(buffers[1]); const DTYPE* v_wgrid = reinterpret_cast<const DTYPE*>(buffers[2]); const DTYPE* w_wgrid = reinterpret_cast<const DTYPE*>(buffers[3]); const DTYPE* maskW = reinterpret_cast<const DTYPE*>(buffers[4]); const DTYPE* dxt = reinterpret_cast<const DTYPE*>(buffers[5]); const DTYPE* dyt = reinterpret_cast<const DTYPE*>(buffers[6]); const DTYPE* dzw = reinterpret_cast<const DTYPE*>(buffers[7]); const DTYPE* cost = reinterpret_cast<const DTYPE*>(buffers[8]); const DTYPE* cosu = reinterpret_cast<const DTYPE*>(buffers[9]); const DTYPE* dt_tracer = reinterpret_cast<const DTYPE*>(buffers[10]); DTYPE* flux_east = reinterpret_cast<DTYPE*>(buffers[11]); // output1 DTYPE* flux_north = reinterpret_cast<DTYPE*>(buffers[12]); // output2 DTYPE* flux_top = reinterpret_cast<DTYPE*>(buffers[13]); // output3 const int BLOCK_SIZE = 128; const std::int64_t grid_dim = std::min<std::int64_t>(1024, (allDims + BLOCK_SIZE - 1) / BLOCK_SIZE); SuperbeeKernel<DTYPE><<<grid_dim, BLOCK_SIZE, 0, stream>>>( var , u_wgrid , v_wgrid , w_wgrid , maskW , dxt , dyt , dzw , cost , cosu , dt_tracer , flux_east , flux_north , flux_top , dim1 , dim2 , dim3 ); gpuErrchk(cudaPeekAtLastError()); } void CudaSuperbeeFloat(cudaStream_t stream, void** buffers, const char* opaque, std::size_t opaque_len) { CudaSuperbeeTiled<float>(stream, buffers, opaque, opaque_len); } void CudaSuperbeeDouble(cudaStream_t stream, void** buffers, const char* opaque, std::size_t opaque_len) { CudaSuperbeeTiled<double>(stream, buffers, opaque, opaque_len); }
a5b6efae1036a5cad5dd90a00eec8e09e285a772.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void stencil(float *src, float *dst, int size, float *stencilWeight) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += src[idx+i] * stencilWeight[i+10]; } dst[idx] = out; }
a5b6efae1036a5cad5dd90a00eec8e09e285a772.cu
#include "includes.h" __global__ void stencil(float *src, float *dst, int size, float *stencilWeight) { int idx = blockIdx.x * blockDim.x + threadIdx.x; idx += 11; if (idx >= size) return; float out = 0; #pragma unroll for(int i = -10;i < 10; i++) { out += src[idx+i] * stencilWeight[i+10]; } dst[idx] = out; }
af734e7aa08ab758eb2cac107193ab1669a03dbf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, const bool weight_by_label_freqs, const float* label_counts, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); if (weight_by_label_freqs) { loss[index] *= static_cast<Dtype>(label_counts[label_value]); } counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); const float* label_count_data = weight_by_label_freqs_ ? label_counts_.gpu_data() : NULL; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, weight_by_label_freqs_, label_count_data, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, const bool weight_by_label_freqs, const float* label_counts, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; if (weight_by_label_freqs) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] *= static_cast<Dtype>(label_counts[label_value]); } } counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); const float* label_count_data = weight_by_label_freqs_ ? label_counts_.gpu_data() : NULL; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, weight_by_label_freqs_, label_count_data, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
af734e7aa08ab758eb2cac107193ab1669a03dbf.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, const bool weight_by_label_freqs, const float* label_counts, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); if (weight_by_label_freqs) { loss[index] *= static_cast<Dtype>(label_counts[label_value]); } counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); const float* label_count_data = weight_by_label_freqs_ ? label_counts_.gpu_data() : NULL; // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, weight_by_label_freqs_, label_count_data, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, const bool weight_by_label_freqs, const float* label_counts, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; if (weight_by_label_freqs) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] *= static_cast<Dtype>(label_counts[label_value]); } } counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); const float* label_count_data = weight_by_label_freqs_ ? label_counts_.gpu_data() : NULL; // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, weight_by_label_freqs_, label_count_data, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
210c112799ce709a9d3339b94ffa770be5898b02.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "fdl.h" const float MAX_VELOCITY_MAGNITUDE_SQR = 10.0f; #define ELEM(data, cols, i, j) (data)[(size_t)(i) * (cols) + (j)] __global__ void fdl_kernel_2d(int n, Vector3 *positions, Vector3 *velocities, const bool *edge_matrix) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= n ) { return; } // define force constants const float K_r = 0.2f; const float K_s = 1.0f; const float L = 2.2f; // get node position float p_x = positions[i].x; float p_y = positions[i].y; // apply force from each node in the graph float v_x = velocities[i].x; float v_y = velocities[i].y; for ( int j = 0; j < n; j++ ) { float dx = positions[j].x - p_x; float dy = positions[j].y - p_y; float dist = sqrt(dx * dx + dy * dy); if ( dist != 0 ) { float force = ELEM(edge_matrix, n, i, j) ? K_s * (L - dist) / dist : K_r / (dist * dist * dist); v_x -= force * dx; v_y -= force * dy; } } // adjust velocity to not exceed a certain magnitude float v_magnitude_sqr = v_x * v_x + v_y * v_y; if ( v_magnitude_sqr > MAX_VELOCITY_MAGNITUDE_SQR ) { v_x *= sqrt(MAX_VELOCITY_MAGNITUDE_SQR / v_magnitude_sqr); v_y *= sqrt(MAX_VELOCITY_MAGNITUDE_SQR / v_magnitude_sqr); } __syncthreads(); // update node position positions[i].x += v_x; positions[i].y += v_y; velocities[i].x = 0.1f * v_x; velocities[i].y = 0.1f * v_y; } __global__ void fdl_kernel_3d(int n, Vector3 *positions, Vector3 *velocities, const bool *edge_matrix) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= n ) { return; } // define force constants const float K_r = 0.2f; const float K_s = 1.0f; const float L = 2.2f; // get node position float p_x = positions[i].x; float p_y = positions[i].y; float p_z = positions[i].z; // apply force from each node in the graph float v_x = velocities[i].x; float v_y = velocities[i].y; float v_z = velocities[i].z; for ( int j = 0; j < n; j++ ) { float dx = positions[j].x - p_x; float dy = positions[j].y - p_y; float dz = positions[j].z - p_z; float dist = sqrt(dx * dx + dy * dy + dz * dz); if ( dist != 0 ) { float force = ELEM(edge_matrix, n, i, j) ? K_s * (L - dist) / dist : K_r / (dist * dist * dist); v_x -= force * dx; v_y -= force * dy; v_z -= force * dz; } } // adjust velocity to not exceed a certain magnitude float v_magnitude_sqr = v_x * v_x + v_y * v_y + v_z * v_z; if ( v_magnitude_sqr > MAX_VELOCITY_MAGNITUDE_SQR ) { v_x *= sqrt(MAX_VELOCITY_MAGNITUDE_SQR / v_magnitude_sqr); v_y *= sqrt(MAX_VELOCITY_MAGNITUDE_SQR / v_magnitude_sqr); v_z *= sqrt(MAX_VELOCITY_MAGNITUDE_SQR / v_magnitude_sqr); } __syncthreads(); // update node position positions[i].x += v_x; positions[i].y += v_y; positions[i].z += v_z; velocities[i].x = 0.1f * v_x; velocities[i].y = 0.1f * v_y; velocities[i].z = 0.1f * v_z; } /** * Perform one iteration of 2D force-directed layout on a graph. * * @param n * @param positions * @param velocities * @param edge_matrix */ void fdl_2d_gpu(int n, Vector3 *positions, Vector3 *velocities, const bool *edge_matrix) { const int BLOCK_SIZE = 256; const int GRID_SIZE = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; hipLaunchKernelGGL(( fdl_kernel_2d), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, n, positions, velocities, edge_matrix); CUDA_SAFE_CALL(hipGetLastError()); } /** * Perform one iteration of 3D force-directed layout on a graph. * * @param n * @param positions * @param velocities * @param edge_matrix */ void fdl_3d_gpu(int n, Vector3 *positions, Vector3 *velocities, const bool *edge_matrix) { const int BLOCK_SIZE = 256; const int GRID_SIZE = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; hipLaunchKernelGGL(( fdl_kernel_3d), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, n, positions, velocities, edge_matrix); CUDA_SAFE_CALL(hipGetLastError()); }
210c112799ce709a9d3339b94ffa770be5898b02.cu
#include "fdl.h" const float MAX_VELOCITY_MAGNITUDE_SQR = 10.0f; #define ELEM(data, cols, i, j) (data)[(size_t)(i) * (cols) + (j)] __global__ void fdl_kernel_2d(int n, Vector3 *positions, Vector3 *velocities, const bool *edge_matrix) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= n ) { return; } // define force constants const float K_r = 0.2f; const float K_s = 1.0f; const float L = 2.2f; // get node position float p_x = positions[i].x; float p_y = positions[i].y; // apply force from each node in the graph float v_x = velocities[i].x; float v_y = velocities[i].y; for ( int j = 0; j < n; j++ ) { float dx = positions[j].x - p_x; float dy = positions[j].y - p_y; float dist = sqrt(dx * dx + dy * dy); if ( dist != 0 ) { float force = ELEM(edge_matrix, n, i, j) ? K_s * (L - dist) / dist : K_r / (dist * dist * dist); v_x -= force * dx; v_y -= force * dy; } } // adjust velocity to not exceed a certain magnitude float v_magnitude_sqr = v_x * v_x + v_y * v_y; if ( v_magnitude_sqr > MAX_VELOCITY_MAGNITUDE_SQR ) { v_x *= sqrt(MAX_VELOCITY_MAGNITUDE_SQR / v_magnitude_sqr); v_y *= sqrt(MAX_VELOCITY_MAGNITUDE_SQR / v_magnitude_sqr); } __syncthreads(); // update node position positions[i].x += v_x; positions[i].y += v_y; velocities[i].x = 0.1f * v_x; velocities[i].y = 0.1f * v_y; } __global__ void fdl_kernel_3d(int n, Vector3 *positions, Vector3 *velocities, const bool *edge_matrix) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= n ) { return; } // define force constants const float K_r = 0.2f; const float K_s = 1.0f; const float L = 2.2f; // get node position float p_x = positions[i].x; float p_y = positions[i].y; float p_z = positions[i].z; // apply force from each node in the graph float v_x = velocities[i].x; float v_y = velocities[i].y; float v_z = velocities[i].z; for ( int j = 0; j < n; j++ ) { float dx = positions[j].x - p_x; float dy = positions[j].y - p_y; float dz = positions[j].z - p_z; float dist = sqrt(dx * dx + dy * dy + dz * dz); if ( dist != 0 ) { float force = ELEM(edge_matrix, n, i, j) ? K_s * (L - dist) / dist : K_r / (dist * dist * dist); v_x -= force * dx; v_y -= force * dy; v_z -= force * dz; } } // adjust velocity to not exceed a certain magnitude float v_magnitude_sqr = v_x * v_x + v_y * v_y + v_z * v_z; if ( v_magnitude_sqr > MAX_VELOCITY_MAGNITUDE_SQR ) { v_x *= sqrt(MAX_VELOCITY_MAGNITUDE_SQR / v_magnitude_sqr); v_y *= sqrt(MAX_VELOCITY_MAGNITUDE_SQR / v_magnitude_sqr); v_z *= sqrt(MAX_VELOCITY_MAGNITUDE_SQR / v_magnitude_sqr); } __syncthreads(); // update node position positions[i].x += v_x; positions[i].y += v_y; positions[i].z += v_z; velocities[i].x = 0.1f * v_x; velocities[i].y = 0.1f * v_y; velocities[i].z = 0.1f * v_z; } /** * Perform one iteration of 2D force-directed layout on a graph. * * @param n * @param positions * @param velocities * @param edge_matrix */ void fdl_2d_gpu(int n, Vector3 *positions, Vector3 *velocities, const bool *edge_matrix) { const int BLOCK_SIZE = 256; const int GRID_SIZE = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; fdl_kernel_2d<<<GRID_SIZE, BLOCK_SIZE>>>( n, positions, velocities, edge_matrix); CUDA_SAFE_CALL(cudaGetLastError()); } /** * Perform one iteration of 3D force-directed layout on a graph. * * @param n * @param positions * @param velocities * @param edge_matrix */ void fdl_3d_gpu(int n, Vector3 *positions, Vector3 *velocities, const bool *edge_matrix) { const int BLOCK_SIZE = 256; const int GRID_SIZE = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; fdl_kernel_3d<<<GRID_SIZE, BLOCK_SIZE>>>( n, positions, velocities, edge_matrix); CUDA_SAFE_CALL(cudaGetLastError()); }
b62682baad4fd8d0efac56c8bbf3643ce0394cc6.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/manifold/tsne.h> #include <datasets/digits.h> #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <stdio.h> #include <stdlib.h> #include <common/device_buffer.hpp> #include <cuml/common/cuml_allocator.hpp> #include <cuml/common/logger.hpp> #include <iostream> #include <metrics/scores.cuh> #include <vector> using namespace MLCommon; using namespace MLCommon::Score; using namespace MLCommon::Distance; using namespace MLCommon::Datasets::Digits; using namespace ML; class TSNETest : public ::testing::Test { protected: void basicTest() { raft::handle_t handle; // Allocate memory device_buffer<float> X_d(handle.get_device_allocator(), handle.get_stream(), n * p); raft::update_device(X_d.data(), digits.data(), n * p, handle.get_stream()); CUDA_CHECK(hipStreamSynchronize(handle.get_stream())); device_buffer<float> Y_d(handle.get_device_allocator(), handle.get_stream(), n * 2); // Test Barnes Hut TSNE_fit(handle, X_d.data(), Y_d.data(), n, p, 2, 90, 0.5, 0.0025, 50, 100, 1e-5, 12, 250, 0.01, 200, 500, 1000, 1e-7, 0.5, 0.8, -1); // Move embeddings to host. // This can be used for printing if needed. float *embeddings_h = (float *)malloc(sizeof(float) * n * 2); assert(embeddings_h != NULL); raft::update_host(&embeddings_h[0], Y_d.data(), n * 2, handle.get_stream()); CUDA_CHECK(hipStreamSynchronize(handle.get_stream())); // Transpose the data int k = 0; float C_contiguous_embedding[n * 2]; for (int i = 0; i < n; i++) { for (int j = 0; j < 2; j++) C_contiguous_embedding[k++] = embeddings_h[j * n + i]; } // Move transposed embeddings back to device, as trustworthiness requires C contiguous format raft::update_device(Y_d.data(), C_contiguous_embedding, n * 2, handle.get_stream()); CUDA_CHECK(hipStreamSynchronize(handle.get_stream())); // Test trustworthiness score_bh = trustworthiness_score<float, raft::distance::DistanceType::EucUnexpandedL2Sqrt>( X_d.data(), Y_d.data(), n, p, 2, 5, handle.get_device_allocator(), handle.get_stream()); // Test Exact TSNE TSNE_fit(handle, X_d.data(), Y_d.data(), n, p, 2, 90, 0.5, 0.0025, 50, 100, 1e-5, 12, 250, 0.01, 200, 500, 1000, 1e-7, 0.5, 0.8, -1, CUML_LEVEL_INFO, false, false); raft::update_host(&embeddings_h[0], Y_d.data(), n * 2, handle.get_stream()); CUDA_CHECK(hipStreamSynchronize(handle.get_stream())); // Move embeddings to host. // This can be used for printing if needed. k = 0; for (int i = 0; i < n; i++) { for (int j = 0; j < 2; j++) C_contiguous_embedding[k++] = embeddings_h[j * n + i]; } // Move transposed embeddings back to device, as trustworthiness requires C contiguous format raft::update_device(Y_d.data(), C_contiguous_embedding, n * 2, handle.get_stream()); CUDA_CHECK(hipStreamSynchronize(handle.get_stream())); // Test trustworthiness score_exact = trustworthiness_score<float, raft::distance::DistanceType::EucUnexpandedL2Sqrt>( X_d.data(), Y_d.data(), n, p, 2, 5, handle.get_device_allocator(), handle.get_stream()); // Free space free(embeddings_h); } void SetUp() override { basicTest(); } void TearDown() override {} protected: int n = 1797; int p = 64; double score_bh; double score_exact; }; typedef TSNETest TSNETestF; TEST_F(TSNETestF, Result) { if (score_bh < 0.98) CUML_LOG_DEBUG("BH score = %f", score_bh); if (score_exact < 0.98) CUML_LOG_DEBUG("Exact score = %f", score_exact); ASSERT_TRUE(0.98 < score_bh && 0.98 < score_exact); }
b62682baad4fd8d0efac56c8bbf3643ce0394cc6.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/manifold/tsne.h> #include <datasets/digits.h> #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <stdio.h> #include <stdlib.h> #include <common/device_buffer.hpp> #include <cuml/common/cuml_allocator.hpp> #include <cuml/common/logger.hpp> #include <iostream> #include <metrics/scores.cuh> #include <vector> using namespace MLCommon; using namespace MLCommon::Score; using namespace MLCommon::Distance; using namespace MLCommon::Datasets::Digits; using namespace ML; class TSNETest : public ::testing::Test { protected: void basicTest() { raft::handle_t handle; // Allocate memory device_buffer<float> X_d(handle.get_device_allocator(), handle.get_stream(), n * p); raft::update_device(X_d.data(), digits.data(), n * p, handle.get_stream()); CUDA_CHECK(cudaStreamSynchronize(handle.get_stream())); device_buffer<float> Y_d(handle.get_device_allocator(), handle.get_stream(), n * 2); // Test Barnes Hut TSNE_fit(handle, X_d.data(), Y_d.data(), n, p, 2, 90, 0.5, 0.0025, 50, 100, 1e-5, 12, 250, 0.01, 200, 500, 1000, 1e-7, 0.5, 0.8, -1); // Move embeddings to host. // This can be used for printing if needed. float *embeddings_h = (float *)malloc(sizeof(float) * n * 2); assert(embeddings_h != NULL); raft::update_host(&embeddings_h[0], Y_d.data(), n * 2, handle.get_stream()); CUDA_CHECK(cudaStreamSynchronize(handle.get_stream())); // Transpose the data int k = 0; float C_contiguous_embedding[n * 2]; for (int i = 0; i < n; i++) { for (int j = 0; j < 2; j++) C_contiguous_embedding[k++] = embeddings_h[j * n + i]; } // Move transposed embeddings back to device, as trustworthiness requires C contiguous format raft::update_device(Y_d.data(), C_contiguous_embedding, n * 2, handle.get_stream()); CUDA_CHECK(cudaStreamSynchronize(handle.get_stream())); // Test trustworthiness score_bh = trustworthiness_score<float, raft::distance::DistanceType::EucUnexpandedL2Sqrt>( X_d.data(), Y_d.data(), n, p, 2, 5, handle.get_device_allocator(), handle.get_stream()); // Test Exact TSNE TSNE_fit(handle, X_d.data(), Y_d.data(), n, p, 2, 90, 0.5, 0.0025, 50, 100, 1e-5, 12, 250, 0.01, 200, 500, 1000, 1e-7, 0.5, 0.8, -1, CUML_LEVEL_INFO, false, false); raft::update_host(&embeddings_h[0], Y_d.data(), n * 2, handle.get_stream()); CUDA_CHECK(cudaStreamSynchronize(handle.get_stream())); // Move embeddings to host. // This can be used for printing if needed. k = 0; for (int i = 0; i < n; i++) { for (int j = 0; j < 2; j++) C_contiguous_embedding[k++] = embeddings_h[j * n + i]; } // Move transposed embeddings back to device, as trustworthiness requires C contiguous format raft::update_device(Y_d.data(), C_contiguous_embedding, n * 2, handle.get_stream()); CUDA_CHECK(cudaStreamSynchronize(handle.get_stream())); // Test trustworthiness score_exact = trustworthiness_score<float, raft::distance::DistanceType::EucUnexpandedL2Sqrt>( X_d.data(), Y_d.data(), n, p, 2, 5, handle.get_device_allocator(), handle.get_stream()); // Free space free(embeddings_h); } void SetUp() override { basicTest(); } void TearDown() override {} protected: int n = 1797; int p = 64; double score_bh; double score_exact; }; typedef TSNETest TSNETestF; TEST_F(TSNETestF, Result) { if (score_bh < 0.98) CUML_LOG_DEBUG("BH score = %f", score_bh); if (score_exact < 0.98) CUML_LOG_DEBUG("Exact score = %f", score_exact); ASSERT_TRUE(0.98 < score_bh && 0.98 < score_exact); }
6400f17f61c35982985d1c43065c6665aff2ec65.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include <glm/mat3x3.hpp> #include <glm/vec3.hpp> #include "svd.h" #include "cuda_error.h" using namespace glm; __global__ void test_kernel(mat3 *matrix, vec3 *s, mat3 *v) { printf("%p %f\n", s, (*s)[2]); if(threadIdx.x == 0) { dsvd<3, 3>(*matrix, *s, *v); } } void test(mat3 &m, vec3 &s, mat3 &v) { // printf("%f %f %f\n", m[0][0], m[1][1], m[2][2]); mat3* dm; vec3* ds; mat3* dv; CudaSafeCall(hipMalloc(&dm, sizeof(mat3))); CudaSafeCall(hipMalloc(&ds, sizeof(vec3))); CudaSafeCall(hipMalloc(&dv, sizeof(mat3))); std::cout << ds << std::endl; CudaSafeCall(hipMemcpy(dm, &m, sizeof(mat3), hipMemcpyHostToDevice)); CudaSafeCall(hipMemset(ds, 0, sizeof(vec3))); CudaSafeCall(hipMemset(dv, 0, sizeof(mat3))); hipLaunchKernelGGL(( Launch(test_kernel), dim3(1), dim3(1), 0, 0, dm, ds, dv)); CudaSafeCall(hipMemcpy(&m, dm, sizeof(mat3), hipMemcpyDeviceToHost)); CudaSafeCall(hipMemcpy(&s, ds, sizeof(vec3), hipMemcpyDeviceToHost)); CudaSafeCall(hipMemcpy(&v, dv, sizeof(mat3), hipMemcpyDeviceToHost)); }
6400f17f61c35982985d1c43065c6665aff2ec65.cu
#include <iostream> #include <cuda_runtime.h> #include <glm/mat3x3.hpp> #include <glm/vec3.hpp> #include "svd.h" #include "cuda_error.h" using namespace glm; __global__ void test_kernel(mat3 *matrix, vec3 *s, mat3 *v) { printf("%p %f\n", s, (*s)[2]); if(threadIdx.x == 0) { dsvd<3, 3>(*matrix, *s, *v); } } void test(mat3 &m, vec3 &s, mat3 &v) { // printf("%f %f %f\n", m[0][0], m[1][1], m[2][2]); mat3* dm; vec3* ds; mat3* dv; CudaSafeCall(cudaMalloc(&dm, sizeof(mat3))); CudaSafeCall(cudaMalloc(&ds, sizeof(vec3))); CudaSafeCall(cudaMalloc(&dv, sizeof(mat3))); std::cout << ds << std::endl; CudaSafeCall(cudaMemcpy(dm, &m, sizeof(mat3), cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemset(ds, 0, sizeof(vec3))); CudaSafeCall(cudaMemset(dv, 0, sizeof(mat3))); Launch(test_kernel<<<1, 1>>>(dm, ds, dv)); CudaSafeCall(cudaMemcpy(&m, dm, sizeof(mat3), cudaMemcpyDeviceToHost)); CudaSafeCall(cudaMemcpy(&s, ds, sizeof(vec3), cudaMemcpyDeviceToHost)); CudaSafeCall(cudaMemcpy(&v, dv, sizeof(mat3), cudaMemcpyDeviceToHost)); }
c8e408e3d4259df816cbc8447cf390d950f965e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <stdio.h> #include "helper_cuda.h" #include "Mandelbrot_kernel.h" #include "Mandelbrot_kernel.cuh" // The Mandelbrot CUDA GPU thread function /* Version using software scheduling of thread blocks. The idea here is to launch of fixed number of worker blocks to fill the machine, and have each block loop over the available work until it is all done. We use a counter in global memory to keep track of which blocks have been completed. The counter is incremented atomically by each worker block. This method can achieve higher performance when blocks take a wide range of different times to complete. */ __device__ unsigned int blockCounter; // global counter, initialized to zero before kernel launch template<class T> __global__ void Mandelbrot0(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff, const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while (1) { __syncthreads(); // (needed to avoid race condition between threadblocks after first iteration) if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); if (blockIndex >= numBlocks) { break; // finish } // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Calculate the location const T xPos = (T)ix * scale + xOff; const T yPos = (T)iy * scale + yOff; // Calculate the Mandelbrot index for the current location int m = CalcMandelbrot<T>(xPos, yPos, xJP, yJP, crunch, isJ); // int m = blockIdx.x; // uncomment to see scheduling order m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int pixel = imageW * iy + ix; if (frame == 0) { color.w = 0; dst[pixel] = color; } else { int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1; dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1; dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1; } } } } // Mandelbrot0 // The Mandelbrot CUDA GPU thread function (double single version) __global__ void MandelbrotDS0(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1, const float yOff0, const float yOff1, const float xJP, const float yJP, const float scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while (1) { __syncthreads(); // (needed to avoid race condition between threadblocks after first iteration) if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); if (blockIndex >= numBlocks) { break; // finish } // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Calculate the location float xPos0 = (float)ix * scale; float xPos1 = 0.0f; float yPos0 = (float)iy * scale; float yPos1 = 0.0f; dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1); dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1); // Calculate the Mandelbrot index for the current location int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int pixel = imageW * iy + ix; if (frame == 0) { color.w = 0; dst[pixel] = color; } else { int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1; dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1; dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1; } } } } // MandelbrotDS0 // The Mandelbrot secondary AA pass CUDA GPU thread function template<class T> __global__ void Mandelbrot1(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff, const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while (1) { __syncthreads(); // (needed to avoid race condition between threadblocks after first iteration) if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); if (blockIndex >= numBlocks) { break; // finish } // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Get the current pixel color int pixel = imageW * iy + ix; uchar4 pixelColor = dst[pixel]; int count = 0; // Search for pixels out of tolerance surrounding the current pixel if (ix > 0) { count += CheckColors(pixelColor, dst[pixel - 1]); } if (ix + 1 < imageW) { count += CheckColors(pixelColor, dst[pixel + 1]); } if (iy > 0) { count += CheckColors(pixelColor, dst[pixel - imageW]); } if (iy + 1 < imageH) { count += CheckColors(pixelColor, dst[pixel + imageW]); } if (count) { // Calculate the location const T xPos = (T)ix * scale + xOff; const T yPos = (T)iy * scale + yOff; // Calculate the Mandelbrot index for the current location int m = CalcMandelbrot(xPos, yPos, xJP, yJP, crunch, isJ); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1; dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1; dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1; } } } } // Mandelbrot1 // The Mandelbrot secondary AA pass CUDA GPU thread function (double single version) __global__ void MandelbrotDS1(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1, const float yOff0, const float yOff1, const float xJP, const float yJP, const float scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while (1) { __syncthreads(); // (needed to avoid race condition between threadblocks after first iteration) if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); if (blockIndex >= numBlocks) { break; // finish } // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Get the current pixel color int pixel = imageW * iy + ix; uchar4 pixelColor = dst[pixel]; int count = 0; // Search for pixels out of tolerance surrounding the current pixel if (ix > 0) { count += CheckColors(pixelColor, dst[pixel - 1]); } if (ix + 1 < imageW) { count += CheckColors(pixelColor, dst[pixel + 1]); } if (iy > 0) { count += CheckColors(pixelColor, dst[pixel - imageW]); } if (iy + 1 < imageH) { count += CheckColors(pixelColor, dst[pixel + imageW]); } if (count) { // Calculate the location float xPos0 = (float)ix * scale; float xPos1 = 0.0f; float yPos0 = (float)iy * scale; float yPos1 = 0.0f; dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1); dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1); // Calculate the Mandelbrot index for the current location int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1; dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1; dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1; } } } } // MandelbrotDS1 // The host CPU Mandebrot thread spawner void RunMandelbrot0(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff, const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame, const int animationFrame, const int mode, const int numSMs, const bool isJ, int version) { int blockdim_x_dynamic = BLOCKDIM_X; int blockdim_y_dynamic = BLOCKDIM_Y; //override original 1.x block dimensions on newer architectures, supporting 1024 threads/block with warpSize=32 if (version >= 20) { blockdim_x_dynamic = BLOCKDIM_X_SM20; blockdim_y_dynamic = BLOCKDIM_Y_SM20; } dim3 threads(blockdim_x_dynamic, blockdim_y_dynamic); dim3 grid(iDivUp(imageW, blockdim_x_dynamic), iDivUp(imageH, blockdim_y_dynamic)); // zero block counter unsigned int hBlockCounter = 0; checkCudaErrors(hipMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, hipMemcpyHostToDevice)); int numWorkerBlocks = numSMs; switch (mode) { default: case 0: hipLaunchKernelGGL(( Mandelbrot0<float>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, (float)xOff, (float)yOff, (float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ); break; case 1: float x0, x1, y0, y1; dsdeq(x0, x1, xOff); dsdeq(y0, y1, yOff); hipLaunchKernelGGL(( MandelbrotDS0), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, x0, x1, y0, y1, xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ); break; case 2: hipLaunchKernelGGL(( Mandelbrot0<double>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, xOff, yOff, xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ); break; } getLastCudaError("Mandelbrot0 kernel execution failed.\n"); } // RunMandelbrot0 // The host CPU Mandebrot thread spawner void RunMandelbrot1(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff, const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame, const int animationFrame, const int mode, const int numSMs, const bool isJ, int version) { int blockdim_x_dynamic = BLOCKDIM_X; int blockdim_y_dynamic = BLOCKDIM_Y; //override original 1.x block dimensions on newer architectures, supporting 1024 threads/block with warpSize=32 if (version >= 20) { blockdim_x_dynamic = BLOCKDIM_X_SM20; blockdim_y_dynamic = BLOCKDIM_Y_SM20; } dim3 threads(blockdim_x_dynamic, blockdim_y_dynamic); dim3 grid(iDivUp(imageW, blockdim_x_dynamic), iDivUp(imageH, blockdim_y_dynamic)); // zero block counter unsigned int hBlockCounter = 0; checkCudaErrors(hipMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, hipMemcpyHostToDevice)); int numWorkerBlocks = numSMs; switch (mode) { default: case 0: hipLaunchKernelGGL(( Mandelbrot1<float>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, (float)xOff, (float)yOff, (float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ); break; case 1: float x0, x1, y0, y1; dsdeq(x0, x1, xOff); dsdeq(y0, y1, yOff); hipLaunchKernelGGL(( MandelbrotDS1), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, x0, x1, y0, y1, xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ); break; case 2: hipLaunchKernelGGL(( Mandelbrot1<double>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, xOff, yOff, xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ); break; } getLastCudaError("Mandelbrot1 kernel execution failed.\n"); } // RunMandelbrot1
c8e408e3d4259df816cbc8447cf390d950f965e4.cu
/* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <stdio.h> #include "helper_cuda.h" #include "Mandelbrot_kernel.h" #include "Mandelbrot_kernel.cuh" // The Mandelbrot CUDA GPU thread function /* Version using software scheduling of thread blocks. The idea here is to launch of fixed number of worker blocks to fill the machine, and have each block loop over the available work until it is all done. We use a counter in global memory to keep track of which blocks have been completed. The counter is incremented atomically by each worker block. This method can achieve higher performance when blocks take a wide range of different times to complete. */ __device__ unsigned int blockCounter; // global counter, initialized to zero before kernel launch template<class T> __global__ void Mandelbrot0(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff, const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while (1) { __syncthreads(); // (needed to avoid race condition between threadblocks after first iteration) if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); if (blockIndex >= numBlocks) { break; // finish } // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Calculate the location const T xPos = (T)ix * scale + xOff; const T yPos = (T)iy * scale + yOff; // Calculate the Mandelbrot index for the current location int m = CalcMandelbrot<T>(xPos, yPos, xJP, yJP, crunch, isJ); // int m = blockIdx.x; // uncomment to see scheduling order m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int pixel = imageW * iy + ix; if (frame == 0) { color.w = 0; dst[pixel] = color; } else { int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1; dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1; dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1; } } } } // Mandelbrot0 // The Mandelbrot CUDA GPU thread function (double single version) __global__ void MandelbrotDS0(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1, const float yOff0, const float yOff1, const float xJP, const float yJP, const float scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while (1) { __syncthreads(); // (needed to avoid race condition between threadblocks after first iteration) if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); if (blockIndex >= numBlocks) { break; // finish } // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Calculate the location float xPos0 = (float)ix * scale; float xPos1 = 0.0f; float yPos0 = (float)iy * scale; float yPos1 = 0.0f; dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1); dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1); // Calculate the Mandelbrot index for the current location int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int pixel = imageW * iy + ix; if (frame == 0) { color.w = 0; dst[pixel] = color; } else { int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1; dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1; dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1; } } } } // MandelbrotDS0 // The Mandelbrot secondary AA pass CUDA GPU thread function template<class T> __global__ void Mandelbrot1(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff, const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while (1) { __syncthreads(); // (needed to avoid race condition between threadblocks after first iteration) if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); if (blockIndex >= numBlocks) { break; // finish } // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Get the current pixel color int pixel = imageW * iy + ix; uchar4 pixelColor = dst[pixel]; int count = 0; // Search for pixels out of tolerance surrounding the current pixel if (ix > 0) { count += CheckColors(pixelColor, dst[pixel - 1]); } if (ix + 1 < imageW) { count += CheckColors(pixelColor, dst[pixel + 1]); } if (iy > 0) { count += CheckColors(pixelColor, dst[pixel - imageW]); } if (iy + 1 < imageH) { count += CheckColors(pixelColor, dst[pixel + imageW]); } if (count) { // Calculate the location const T xPos = (T)ix * scale + xOff; const T yPos = (T)iy * scale + yOff; // Calculate the Mandelbrot index for the current location int m = CalcMandelbrot(xPos, yPos, xJP, yJP, crunch, isJ); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1; dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1; dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1; } } } } // Mandelbrot1 // The Mandelbrot secondary AA pass CUDA GPU thread function (double single version) __global__ void MandelbrotDS1(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1, const float yOff0, const float yOff1, const float xJP, const float yJP, const float scale, const uchar4 colors, const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ) { __shared__ unsigned int blockIndex; __shared__ unsigned int blockX, blockY; // loop until all blocks completed while (1) { __syncthreads(); // (needed to avoid race condition between threadblocks after first iteration) if ((threadIdx.x==0) && (threadIdx.y==0)) { // get block to process blockIndex = atomicAdd(&blockCounter, 1); blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here blockY = blockIndex / gridWidth; } __syncthreads(); if (blockIndex >= numBlocks) { break; // finish } // process this block const int ix = blockDim.x * blockX + threadIdx.x; const int iy = blockDim.y * blockY + threadIdx.y; if ((ix < imageW) && (iy < imageH)) { // Get the current pixel color int pixel = imageW * iy + ix; uchar4 pixelColor = dst[pixel]; int count = 0; // Search for pixels out of tolerance surrounding the current pixel if (ix > 0) { count += CheckColors(pixelColor, dst[pixel - 1]); } if (ix + 1 < imageW) { count += CheckColors(pixelColor, dst[pixel + 1]); } if (iy > 0) { count += CheckColors(pixelColor, dst[pixel - imageW]); } if (iy + 1 < imageH) { count += CheckColors(pixelColor, dst[pixel + imageW]); } if (count) { // Calculate the location float xPos0 = (float)ix * scale; float xPos1 = 0.0f; float yPos0 = (float)iy * scale; float yPos1 = 0.0f; dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1); dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1); // Calculate the Mandelbrot index for the current location int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ); m = m > 0 ? crunch - m : 0; // Convert the Mandelbrot index into a color uchar4 color; if (m) { m += animationFrame; color.x = m * colors.x; color.y = m * colors.y; color.z = m * colors.z; } else { color.x = 0; color.y = 0; color.z = 0; } // Output the pixel int frame1 = frame + 1; int frame2 = frame1 / 2; dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1; dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1; dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1; } } } } // MandelbrotDS1 // The host CPU Mandebrot thread spawner void RunMandelbrot0(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff, const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame, const int animationFrame, const int mode, const int numSMs, const bool isJ, int version) { int blockdim_x_dynamic = BLOCKDIM_X; int blockdim_y_dynamic = BLOCKDIM_Y; //override original 1.x block dimensions on newer architectures, supporting 1024 threads/block with warpSize=32 if (version >= 20) { blockdim_x_dynamic = BLOCKDIM_X_SM20; blockdim_y_dynamic = BLOCKDIM_Y_SM20; } dim3 threads(blockdim_x_dynamic, blockdim_y_dynamic); dim3 grid(iDivUp(imageW, blockdim_x_dynamic), iDivUp(imageH, blockdim_y_dynamic)); // zero block counter unsigned int hBlockCounter = 0; checkCudaErrors(cudaMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, cudaMemcpyHostToDevice)); int numWorkerBlocks = numSMs; switch (mode) { default: case 0: Mandelbrot0<float><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, (float)xOff, (float)yOff, (float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ); break; case 1: float x0, x1, y0, y1; dsdeq(x0, x1, xOff); dsdeq(y0, y1, yOff); MandelbrotDS0<<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, x0, x1, y0, y1, xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ); break; case 2: Mandelbrot0<double><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, xOff, yOff, xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ); break; } getLastCudaError("Mandelbrot0 kernel execution failed.\n"); } // RunMandelbrot0 // The host CPU Mandebrot thread spawner void RunMandelbrot1(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff, const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame, const int animationFrame, const int mode, const int numSMs, const bool isJ, int version) { int blockdim_x_dynamic = BLOCKDIM_X; int blockdim_y_dynamic = BLOCKDIM_Y; //override original 1.x block dimensions on newer architectures, supporting 1024 threads/block with warpSize=32 if (version >= 20) { blockdim_x_dynamic = BLOCKDIM_X_SM20; blockdim_y_dynamic = BLOCKDIM_Y_SM20; } dim3 threads(blockdim_x_dynamic, blockdim_y_dynamic); dim3 grid(iDivUp(imageW, blockdim_x_dynamic), iDivUp(imageH, blockdim_y_dynamic)); // zero block counter unsigned int hBlockCounter = 0; checkCudaErrors(cudaMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, cudaMemcpyHostToDevice)); int numWorkerBlocks = numSMs; switch (mode) { default: case 0: Mandelbrot1<float><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, (float)xOff, (float)yOff, (float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ); break; case 1: float x0, x1, y0, y1; dsdeq(x0, x1, xOff); dsdeq(y0, y1, yOff); MandelbrotDS1<<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, x0, x1, y0, y1, xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ); break; case 2: Mandelbrot1<double><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, xOff, yOff, xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ); break; } getLastCudaError("Mandelbrot1 kernel execution failed.\n"); } // RunMandelbrot1
238727f72936785de56273bc3916bedd9957809d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void Kernel2(bool *g_graph_mask, bool *g_updating_graph_mask, bool *g_graph_visited, bool *g_over, int no_of_nodes) { int tid = blockIdx.x * MAX_THREADS_PER_BLOCK + threadIdx.x; if (tid < no_of_nodes && g_updating_graph_mask[tid]) { g_graph_mask[tid] = true; g_graph_visited[tid] = true; *g_over = true; g_updating_graph_mask[tid] = false; } }
238727f72936785de56273bc3916bedd9957809d.cu
#include "includes.h" __global__ void Kernel2(bool *g_graph_mask, bool *g_updating_graph_mask, bool *g_graph_visited, bool *g_over, int no_of_nodes) { int tid = blockIdx.x * MAX_THREADS_PER_BLOCK + threadIdx.x; if (tid < no_of_nodes && g_updating_graph_mask[tid]) { g_graph_mask[tid] = true; g_graph_visited[tid] = true; *g_over = true; g_updating_graph_mask[tid] = false; } }
57ed04dde2b0a3aa2c8965a4e840eb42d6eed769.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" ///////////////////////////////////////////////////////////////////////////// /// Copyright 2020 Google LLC /// /// Licensed under the Apache License, Version 2.0 (the "License"); /// you may not use this file except in compliance with the License. /// You may obtain a copy of the License at /// /// https://www.apache.org/licenses/LICENSE-2.0 /// /// Unless required by applicable law or agreed to in writing, software /// distributed under the License is distributed on an "AS IS" BASIS, /// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. /// See the License for the specific language governing permissions and /// limitations under the License. ///////////////////////////////////////////////////////////////////////////// /// Modifications: pedro hermosilla ([email protected]) ///////////////////////////////////////////////////////////////////////////// #include "defines.hpp" #include "math_helper.cuh" #include "cuda_kernel_utils.cuh" #include "grid_utils.cuh" #include "find_ranges_grid_ds.cuh" ///////////////////////// GPU /** * GPU kernel to find the ranges in the list of points for a grid cell * and its 26 neighbors. * @param pNumSamples Number of samples. * @param pNumPts Number of points. * @param pLastDOffsets Number of displacement in the last * dimension in the positive and negative axis. * @param pNumOffsets Number of offsets applied to the * keys. * @param pOffsets List of offsets to apply. * @param pSampleKeys Array of keys for each sample. * @param pPtKeys Array of keys for each point. * @param pGridDs Grid data structure. * @param pNumCells Number of cells. * @param pOutDS Output array with the ranges. * @paramT D Number of dimensions. */ template<int D> __global__ void find_ranges_grid_ds_gpu_kernel( const unsigned int pNumSamples, const unsigned int pNumPts, const unsigned int pLastDOffsets, const unsigned int pNumOffsets, const mccnn::ipoint<D>* __restrict__ pOffsets, const mccnn::int64_m* __restrict__ pSampleKeys, const mccnn::int64_m* __restrict__ pPtKeys, const int2* __restrict__ pGridDs, const mccnn::ipoint<D>* __restrict__ pNumCells, int2* __restrict__ pOutRanges) { int initPtIndex = mccnn::compute_global_index_gpu_funct(); int totalThreads = mccnn::compute_total_threads_gpu_funct(); //Calculate the total number of cells. mccnn::int64_m totalCells = mccnn::compute_total_num_cells_gpu_funct(pNumCells[0]); for(int curIter = initPtIndex; curIter < pNumSamples*pNumOffsets; curIter += totalThreads) { //Calculate the point and offset index. int curPtIndex = curIter/pNumOffsets; int curOffset = curIter%pNumOffsets; //Get the current offset. mccnn::ipoint<D> cellOffset = pOffsets[curOffset]; //Get the key of the current point. mccnn::int64_m curKey = pSampleKeys[curPtIndex]; //Get the new cell with the offset. mccnn::ipoint<D+1> cellIndex = mccnn::compute_cell_from_key_gpu_funct(curKey, pNumCells[0]); #pragma unroll for(int i=0; i < D; ++i) cellIndex[i+1] += cellOffset[i]; //Check if we are out of the bounding box. bool inside = true; #pragma unroll for(int i=0; i < D-1; ++i) inside = inside && cellIndex[i+1] >= 0 && cellIndex[i+1] < pNumCells[0][i]; if(inside) { //Get the range of pts to check in the data structure. int curDsIndex = mccnn::compute_ds_index_from_cell_gpu_funct(cellIndex, pNumCells[0]); int2 dsRange = pGridDs[curDsIndex]; int rangeSize = dsRange.y-dsRange.x-1; //Tube has at least 1 element. if(rangeSize >= 0){ //Compute max key of the range. mccnn::ipoint<D> auxCell(&cellIndex[1]); mccnn::int64_m auxKey = mccnn::compute_key_gpu_funct(auxCell, pNumCells[0], cellIndex[0]); mccnn::int64_m maxKey = auxKey+pLastDOffsets; mccnn::int64_m minKey = auxKey-pLastDOffsets; maxKey = (auxKey/totalCells == maxKey/totalCells)?maxKey:((auxKey/totalCells)*totalCells) + totalCells - 1; minKey = (auxKey/totalCells == minKey/totalCells)?minKey:((auxKey/totalCells)*totalCells); //Declare iterators and auxiliar variables. int2 curMinRange = make_int2(0, rangeSize); int2 curMaxRange = make_int2(0, rangeSize); //Search for the range. bool stopMinRange = rangeSize <= 1; bool stopMaxRange = stopMinRange; while(!stopMinRange || !stopMaxRange){ //Compute the pivots. int minPivot = (curMinRange.y + curMinRange.x)/2; int maxPivot = (curMaxRange.y + curMaxRange.x)/2; //Check the minimum range. if(!stopMinRange){ mccnn::int64_m curMinKey = pPtKeys[minPivot+dsRange.x]; if(curMinKey > maxKey) curMinRange.x = minPivot; else curMinRange.y = minPivot; } //Check the maximum range. if(!stopMaxRange){ mccnn::int64_m curMaxKey = pPtKeys[maxPivot+dsRange.x]; if(curMaxKey >= minKey) curMaxRange.x = maxPivot; else curMaxRange.y = maxPivot; } //Check the stopping condition. stopMinRange = (curMinRange.y - curMinRange.x) <= 1; stopMaxRange = (curMaxRange.y - curMaxRange.x) <= 1; } int2 resultingRange = make_int2(0, 0); //Get the values of the keys. mccnn::int64_m lastMinKey1 = pPtKeys[curMinRange.x+dsRange.x]; mccnn::int64_m lastMinKey2 = pPtKeys[curMinRange.y+dsRange.x]; //Test for the init of the range. if(lastMinKey1 >= minKey && lastMinKey1 <= maxKey){ resultingRange.x = curMinRange.x+dsRange.x; }else if(lastMinKey2 >= minKey && lastMinKey2 <= maxKey){ resultingRange.x = curMinRange.y+dsRange.x; } mccnn::int64_m lastMaxKey1 = pPtKeys[curMaxRange.x+dsRange.x]; mccnn::int64_m lastMaxKey2 = pPtKeys[curMaxRange.y+dsRange.x]; //Test for the end of the range. if(lastMaxKey2 >= minKey && lastMaxKey2 <= maxKey){ resultingRange.y = curMaxRange.y+dsRange.x+1; }else if(lastMaxKey1 >= minKey && lastMaxKey1 <= maxKey){ resultingRange.y = curMaxRange.x+dsRange.x+1; } //Store in memory the resulting range. pOutRanges[curIter] = resultingRange; } } } } ///////////////////////// CPU template<int D> void mccnn::find_ranges_grid_ds_gpu( std::unique_ptr<IGPUDevice>& pDevice, const unsigned int pNumSamples, const unsigned int pNumPts, const unsigned int pLastDOffsets, const unsigned int pNumOffsets, const int* pInGPUPtrOffsets, const mccnn::int64_m* pInGPUPtrSampleKeys, const mccnn::int64_m* pInGPUPtrPtsKeys, const int* pInGPUPtrGridDS, const int* pInGPUPtrNumCells, int* pOutGPUPtrRanges) { //Get the cuda stream. auto cudaStream = pDevice->getCUDAStream(); #ifdef DEBUG_INFO hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, cudaStream); #endif //Initialize to zero the output array. pDevice->memset(pOutGPUPtrRanges, 0, sizeof(int)*pNumSamples*pNumOffsets*2); pDevice->check_error(__FILE__, __LINE__); //Get the device properties. const GpuDeviceProperties& gpuProps = pDevice->get_device_properties(); //Calculate the ideal number of blocks for the selected block size. unsigned int numMP = gpuProps.numMPs_; unsigned int blockSize = gpuProps.warpSize_*2; unsigned int numBlocks = pDevice->get_max_active_block_x_sm( blockSize,(const void*)find_ranges_grid_ds_gpu_kernel<D>, 0); pDevice->check_error(__FILE__, __LINE__); //Calculate the total number of blocks to execute. unsigned int execBlocks = (pNumSamples*pNumOffsets)/blockSize; execBlocks += ((pNumSamples*pNumOffsets)%blockSize != 0)?1:0; unsigned int totalNumBlocks = numMP*numBlocks; totalNumBlocks = (totalNumBlocks > execBlocks)?execBlocks:totalNumBlocks; //Execute the cuda kernel. hipLaunchKernelGGL(( find_ranges_grid_ds_gpu_kernel<D>), dim3(totalNumBlocks), dim3(blockSize), 0, cudaStream, pNumSamples, pNumPts, pLastDOffsets, pNumOffsets, (const mccnn::ipoint<D>*)pInGPUPtrOffsets, pInGPUPtrSampleKeys, pInGPUPtrPtsKeys, (const int2*)pInGPUPtrGridDS, (const mccnn::ipoint<D>*)pInGPUPtrNumCells, (int2*)pOutGPUPtrRanges); pDevice->check_error(__FILE__, __LINE__); #ifdef DEBUG_INFO hipEventRecord(stop, cudaStream); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); float gpuOccupancy = (float)(numBlocks*blockSize)/(float)gpuProps.maxThreadsXMP_; fprintf(stderr, "### FIND RANGES KEYS ###\n"); fprintf(stderr, "Num samples: %d\n", pNumSamples); fprintf(stderr, "Num points: %d\n", pNumPts); fprintf(stderr, "Occupancy: %f\n", gpuOccupancy); fprintf(stderr, "Execution time: %f\n", milliseconds); fprintf(stderr, "\n"); #endif } unsigned int mccnn::computeTotalNumOffsets( const unsigned int pNumDimensions, const unsigned int pAxisOffset, std::vector<int>& pOutVector) { //Calculate the total number of offsets. unsigned int cellsXAxis = pAxisOffset*2 + 1; unsigned int numOffsets = cellsXAxis; for(int i = 0 ; i < ::max((int)pNumDimensions-2, 1); ++i) numOffsets *= cellsXAxis; //Calculate each offset. pOutVector.clear(); std::vector<int> curOffset(pNumDimensions, 0); for(int i = 0; i < numOffsets; ++i) { int auxInt = i; for(int j = ::max((int)pNumDimensions-2, 1); j >= 0 ; --j) { int auxInt2 = auxInt%cellsXAxis; auxInt2 = auxInt2-pAxisOffset; curOffset[j] = auxInt2; auxInt = auxInt/cellsXAxis; } if(pNumDimensions != 2) curOffset[pNumDimensions-1] = 0; for(int j = 0; j < pNumDimensions; ++j) pOutVector.push_back(curOffset[j]); } return numOffsets; } ///////////////////////// CPU Template declaration #define FIND_RANGES_GRID_DS_TEMP_DECL(Dims) \ template void mccnn::find_ranges_grid_ds_gpu<Dims>( \ std::unique_ptr<IGPUDevice>& pDevice, \ const unsigned int pNumSamples, \ const unsigned int pNumPts, \ const unsigned int pLastDOffsets, \ const unsigned int pNumOffsets, \ const int* pInGPUPtrOffsets, \ const mccnn::int64_m* pInGPUPtrSampleKeys, \ const mccnn::int64_m* pInGPUPtrPtsKeys, \ const int* pInGPUPtrGridDS, \ const int* pInGPUPtrNumCells, \ int* pOutGPUPtrRanges); DECLARE_TEMPLATE_DIMS(FIND_RANGES_GRID_DS_TEMP_DECL)
57ed04dde2b0a3aa2c8965a4e840eb42d6eed769.cu
///////////////////////////////////////////////////////////////////////////// /// Copyright 2020 Google LLC /// /// Licensed under the Apache License, Version 2.0 (the "License"); /// you may not use this file except in compliance with the License. /// You may obtain a copy of the License at /// /// https://www.apache.org/licenses/LICENSE-2.0 /// /// Unless required by applicable law or agreed to in writing, software /// distributed under the License is distributed on an "AS IS" BASIS, /// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. /// See the License for the specific language governing permissions and /// limitations under the License. ///////////////////////////////////////////////////////////////////////////// /// Modifications: pedro hermosilla ([email protected]) ///////////////////////////////////////////////////////////////////////////// #include "defines.hpp" #include "math_helper.cuh" #include "cuda_kernel_utils.cuh" #include "grid_utils.cuh" #include "find_ranges_grid_ds.cuh" ///////////////////////// GPU /** * GPU kernel to find the ranges in the list of points for a grid cell * and its 26 neighbors. * @param pNumSamples Number of samples. * @param pNumPts Number of points. * @param pLastDOffsets Number of displacement in the last * dimension in the positive and negative axis. * @param pNumOffsets Number of offsets applied to the * keys. * @param pOffsets List of offsets to apply. * @param pSampleKeys Array of keys for each sample. * @param pPtKeys Array of keys for each point. * @param pGridDs Grid data structure. * @param pNumCells Number of cells. * @param pOutDS Output array with the ranges. * @paramT D Number of dimensions. */ template<int D> __global__ void find_ranges_grid_ds_gpu_kernel( const unsigned int pNumSamples, const unsigned int pNumPts, const unsigned int pLastDOffsets, const unsigned int pNumOffsets, const mccnn::ipoint<D>* __restrict__ pOffsets, const mccnn::int64_m* __restrict__ pSampleKeys, const mccnn::int64_m* __restrict__ pPtKeys, const int2* __restrict__ pGridDs, const mccnn::ipoint<D>* __restrict__ pNumCells, int2* __restrict__ pOutRanges) { int initPtIndex = mccnn::compute_global_index_gpu_funct(); int totalThreads = mccnn::compute_total_threads_gpu_funct(); //Calculate the total number of cells. mccnn::int64_m totalCells = mccnn::compute_total_num_cells_gpu_funct(pNumCells[0]); for(int curIter = initPtIndex; curIter < pNumSamples*pNumOffsets; curIter += totalThreads) { //Calculate the point and offset index. int curPtIndex = curIter/pNumOffsets; int curOffset = curIter%pNumOffsets; //Get the current offset. mccnn::ipoint<D> cellOffset = pOffsets[curOffset]; //Get the key of the current point. mccnn::int64_m curKey = pSampleKeys[curPtIndex]; //Get the new cell with the offset. mccnn::ipoint<D+1> cellIndex = mccnn::compute_cell_from_key_gpu_funct(curKey, pNumCells[0]); #pragma unroll for(int i=0; i < D; ++i) cellIndex[i+1] += cellOffset[i]; //Check if we are out of the bounding box. bool inside = true; #pragma unroll for(int i=0; i < D-1; ++i) inside = inside && cellIndex[i+1] >= 0 && cellIndex[i+1] < pNumCells[0][i]; if(inside) { //Get the range of pts to check in the data structure. int curDsIndex = mccnn::compute_ds_index_from_cell_gpu_funct(cellIndex, pNumCells[0]); int2 dsRange = pGridDs[curDsIndex]; int rangeSize = dsRange.y-dsRange.x-1; //Tube has at least 1 element. if(rangeSize >= 0){ //Compute max key of the range. mccnn::ipoint<D> auxCell(&cellIndex[1]); mccnn::int64_m auxKey = mccnn::compute_key_gpu_funct(auxCell, pNumCells[0], cellIndex[0]); mccnn::int64_m maxKey = auxKey+pLastDOffsets; mccnn::int64_m minKey = auxKey-pLastDOffsets; maxKey = (auxKey/totalCells == maxKey/totalCells)?maxKey:((auxKey/totalCells)*totalCells) + totalCells - 1; minKey = (auxKey/totalCells == minKey/totalCells)?minKey:((auxKey/totalCells)*totalCells); //Declare iterators and auxiliar variables. int2 curMinRange = make_int2(0, rangeSize); int2 curMaxRange = make_int2(0, rangeSize); //Search for the range. bool stopMinRange = rangeSize <= 1; bool stopMaxRange = stopMinRange; while(!stopMinRange || !stopMaxRange){ //Compute the pivots. int minPivot = (curMinRange.y + curMinRange.x)/2; int maxPivot = (curMaxRange.y + curMaxRange.x)/2; //Check the minimum range. if(!stopMinRange){ mccnn::int64_m curMinKey = pPtKeys[minPivot+dsRange.x]; if(curMinKey > maxKey) curMinRange.x = minPivot; else curMinRange.y = minPivot; } //Check the maximum range. if(!stopMaxRange){ mccnn::int64_m curMaxKey = pPtKeys[maxPivot+dsRange.x]; if(curMaxKey >= minKey) curMaxRange.x = maxPivot; else curMaxRange.y = maxPivot; } //Check the stopping condition. stopMinRange = (curMinRange.y - curMinRange.x) <= 1; stopMaxRange = (curMaxRange.y - curMaxRange.x) <= 1; } int2 resultingRange = make_int2(0, 0); //Get the values of the keys. mccnn::int64_m lastMinKey1 = pPtKeys[curMinRange.x+dsRange.x]; mccnn::int64_m lastMinKey2 = pPtKeys[curMinRange.y+dsRange.x]; //Test for the init of the range. if(lastMinKey1 >= minKey && lastMinKey1 <= maxKey){ resultingRange.x = curMinRange.x+dsRange.x; }else if(lastMinKey2 >= minKey && lastMinKey2 <= maxKey){ resultingRange.x = curMinRange.y+dsRange.x; } mccnn::int64_m lastMaxKey1 = pPtKeys[curMaxRange.x+dsRange.x]; mccnn::int64_m lastMaxKey2 = pPtKeys[curMaxRange.y+dsRange.x]; //Test for the end of the range. if(lastMaxKey2 >= minKey && lastMaxKey2 <= maxKey){ resultingRange.y = curMaxRange.y+dsRange.x+1; }else if(lastMaxKey1 >= minKey && lastMaxKey1 <= maxKey){ resultingRange.y = curMaxRange.x+dsRange.x+1; } //Store in memory the resulting range. pOutRanges[curIter] = resultingRange; } } } } ///////////////////////// CPU template<int D> void mccnn::find_ranges_grid_ds_gpu( std::unique_ptr<IGPUDevice>& pDevice, const unsigned int pNumSamples, const unsigned int pNumPts, const unsigned int pLastDOffsets, const unsigned int pNumOffsets, const int* pInGPUPtrOffsets, const mccnn::int64_m* pInGPUPtrSampleKeys, const mccnn::int64_m* pInGPUPtrPtsKeys, const int* pInGPUPtrGridDS, const int* pInGPUPtrNumCells, int* pOutGPUPtrRanges) { //Get the cuda stream. auto cudaStream = pDevice->getCUDAStream(); #ifdef DEBUG_INFO cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, cudaStream); #endif //Initialize to zero the output array. pDevice->memset(pOutGPUPtrRanges, 0, sizeof(int)*pNumSamples*pNumOffsets*2); pDevice->check_error(__FILE__, __LINE__); //Get the device properties. const GpuDeviceProperties& gpuProps = pDevice->get_device_properties(); //Calculate the ideal number of blocks for the selected block size. unsigned int numMP = gpuProps.numMPs_; unsigned int blockSize = gpuProps.warpSize_*2; unsigned int numBlocks = pDevice->get_max_active_block_x_sm( blockSize,(const void*)find_ranges_grid_ds_gpu_kernel<D>, 0); pDevice->check_error(__FILE__, __LINE__); //Calculate the total number of blocks to execute. unsigned int execBlocks = (pNumSamples*pNumOffsets)/blockSize; execBlocks += ((pNumSamples*pNumOffsets)%blockSize != 0)?1:0; unsigned int totalNumBlocks = numMP*numBlocks; totalNumBlocks = (totalNumBlocks > execBlocks)?execBlocks:totalNumBlocks; //Execute the cuda kernel. find_ranges_grid_ds_gpu_kernel<D><<<totalNumBlocks, blockSize, 0, cudaStream>>>( pNumSamples, pNumPts, pLastDOffsets, pNumOffsets, (const mccnn::ipoint<D>*)pInGPUPtrOffsets, pInGPUPtrSampleKeys, pInGPUPtrPtsKeys, (const int2*)pInGPUPtrGridDS, (const mccnn::ipoint<D>*)pInGPUPtrNumCells, (int2*)pOutGPUPtrRanges); pDevice->check_error(__FILE__, __LINE__); #ifdef DEBUG_INFO cudaEventRecord(stop, cudaStream); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); float gpuOccupancy = (float)(numBlocks*blockSize)/(float)gpuProps.maxThreadsXMP_; fprintf(stderr, "### FIND RANGES KEYS ###\n"); fprintf(stderr, "Num samples: %d\n", pNumSamples); fprintf(stderr, "Num points: %d\n", pNumPts); fprintf(stderr, "Occupancy: %f\n", gpuOccupancy); fprintf(stderr, "Execution time: %f\n", milliseconds); fprintf(stderr, "\n"); #endif } unsigned int mccnn::computeTotalNumOffsets( const unsigned int pNumDimensions, const unsigned int pAxisOffset, std::vector<int>& pOutVector) { //Calculate the total number of offsets. unsigned int cellsXAxis = pAxisOffset*2 + 1; unsigned int numOffsets = cellsXAxis; for(int i = 0 ; i < std::max((int)pNumDimensions-2, 1); ++i) numOffsets *= cellsXAxis; //Calculate each offset. pOutVector.clear(); std::vector<int> curOffset(pNumDimensions, 0); for(int i = 0; i < numOffsets; ++i) { int auxInt = i; for(int j = std::max((int)pNumDimensions-2, 1); j >= 0 ; --j) { int auxInt2 = auxInt%cellsXAxis; auxInt2 = auxInt2-pAxisOffset; curOffset[j] = auxInt2; auxInt = auxInt/cellsXAxis; } if(pNumDimensions != 2) curOffset[pNumDimensions-1] = 0; for(int j = 0; j < pNumDimensions; ++j) pOutVector.push_back(curOffset[j]); } return numOffsets; } ///////////////////////// CPU Template declaration #define FIND_RANGES_GRID_DS_TEMP_DECL(Dims) \ template void mccnn::find_ranges_grid_ds_gpu<Dims>( \ std::unique_ptr<IGPUDevice>& pDevice, \ const unsigned int pNumSamples, \ const unsigned int pNumPts, \ const unsigned int pLastDOffsets, \ const unsigned int pNumOffsets, \ const int* pInGPUPtrOffsets, \ const mccnn::int64_m* pInGPUPtrSampleKeys, \ const mccnn::int64_m* pInGPUPtrPtsKeys, \ const int* pInGPUPtrGridDS, \ const int* pInGPUPtrNumCells, \ int* pOutGPUPtrRanges); DECLARE_TEMPLATE_DIMS(FIND_RANGES_GRID_DS_TEMP_DECL)
b4fe4f15976e650f5d19d92491b382419ce3c8fa.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "vec3.h" #include "ray.h" #include <fstream> #include <hip/hip_runtime_api.h> //BASICALLY NOT WORKING TTxTT #include <chrono> using namespace std; __device__ float hit_sphere(const vec3& center, float radious, const ray& r){ vec3 oc = r.origin() - center; //baskara! float a = dot(r.direction(), r.direction()); float b = 2.0f * dot(oc, r.direction()); float c = dot(oc,oc) - radious*radious; float discriminant = b*b -4*a*c; if (discriminant < 0){ return -1.0f; } else { return(-b-sqrt(discriminant)) / (2.0f*a); } } __device__ vec3 color(const ray& r,const int worldsize, const vec3 _sphere_centers[], const vec3 _sphere_colors[], const float _sphere_radious[]){ for(int i = 0; i < worldsize; i++){ float hit = hit_sphere(_sphere_centers[i],_sphere_radious[i], r); if(hit > 0.0f){ vec3 normal = unit_vector(r.point_at_parameter(hit) - _sphere_centers[i]); //some better color vec3 dif = normal - (unit_vector(r.direction())); float itensity = dif.length() - 1; //itensity = 1 - itensity; if(itensity < 0){ itensity = 0.0f; } return _sphere_colors[i] * itensity; } } vec3 unit_direction = unit_vector(r.direction()); float t = 0.5f*(unit_direction.y() + 1.0f); return (1.0f -t) *vec3(1.0f,1.0f,1.0f) + t*vec3(0.0f,0.0f,0.0f); } __device__ //return a "random" number betwen 1 and -1 based on 4 floats as seed (it is not random actually, but it is enough) float pseudo_rand(float a, float b, float c, float d){ //a controlled chaotic expression float val = ( (((a + 134.854f + c)/3.3f) * c) / (d + 3.645f) ); if(val == 0.0f){ //if a or c is zero, use this other expression val = (c + d + 89.423f) * 9.308f * d * 1.54f + c; } val *= 11.245f; //val = val % 2; //I cant use modulo on floats inside CUDA!!! //workaround: int precision = 100000; //how many decimal slots i want to keep (log(10), 5 in this case) int ret = (int) val % (2 * precision); //module it with some precision val = (float)ret / (precision); // make ret a floating point return (val - 1.0f); } __global__ void renderPixel(int nx, int ny, int channels, int antia, int _outvec[], vec3 _sphere_centers[] ,vec3 _sphere_colors[], float _sphere_radious[],int worldsize, vec3 origin, vec3 l_l_corner, vec3 vertical, vec3 horizontal){ //wich pixel am i seeing int index = blockIdx.x * blockDim.x + threadIdx.x; //(use multiplying it by channels) //get u and v int x = index % nx; int y = index / nx; float u = (float) x / float(nx); float v = (float) y / float(ny); //create ray ray r(origin, l_l_corner + u*horizontal + v*vertical); //info for antialiasing float difx = (((float) x + 1) / float(nx)) - u; //diference betwen this u and nexts pixel u float dify = (((float) y + 1) / float(ny)) - v; //diference betwen this v and nexts pixel v float udif,vdif = 0.0f; //launch ray for each antialiasing ray vec3 col = vec3(0,0,0); udif = pseudo_rand((float)index, u, (float)nx * 5.1f * (y), (float) x / y) * difx ; vdif = pseudo_rand((float)index, v, (float)ny * 1.52f * (x), (float) y / x) * dify; for(int i = 0; i < antia; i ++){ col += color(r,worldsize,_sphere_centers,_sphere_colors,_sphere_radious); udif -= pseudo_rand((float)index, u, (float)nx * 5.1f * i * (y), (float) x / y) * difx; vdif -= pseudo_rand((float)index, v, (float)ny * 1.52f * i * (x), (float) y / x) * dify; //printf("u%f, v%f, i-%i\n",udif,vdif,i); //change ray so it is a little diferent but inside the same pixel bounderies r.B = l_l_corner + ((u + udif)*horizontal + (v + vdif)*vertical); } col /= antia; //write r g and b on outvec _outvec[index * channels + 0] = int(255.99f*col[0]); _outvec[index * channels + 1] = int(255.99f*col[1]); _outvec[index * channels + 2] = int(255.99f*col[2]); } int main() { //image resolution config int nx = 1920 * 2; int ny = 1080 * 2; int channels = 3; //amount of antialiasing int antia = 8; //Set var for world inside cuda vec3 *sphere_centers; vec3 *sphere_colors; float *sphere_radious; //list for storing world values (for readbility) int worldsize = 3; //camera options //field of view float fov = 4.0f; float fovRatio = (float)ny / (float)nx; vec3 horizontal(fov,0.0f,0.0f); vec3 vertical(0.0f,fov * fovRatio,0.0f); //bounderies and origin vec3 lower_left_corner(-fov/2,-fov * fovRatio/2,-1.0f); vec3 origin(0.0f,0.0f,0.0f); //threading options int blockSize = 64; //vector for output image int * outvec;// = new int[nx * ny * channels]; //Start Cuda Profiler cout << "Starting GPU Parallelized section. \n"; hipProfilerStart(); //Start Timer chrono::high_resolution_clock::time_point start = chrono::high_resolution_clock::now(); //mallocs for render hipMallocManaged(&outvec, sizeof(int) * nx * ny * channels); //also for world hipMallocManaged(&sphere_centers, sizeof(vec3) * worldsize); hipMallocManaged(&sphere_colors, sizeof(vec3) * worldsize); hipMallocManaged(&sphere_radious, sizeof(float) * worldsize); sphere_centers[0] = vec3(0,1,-2); sphere_colors [0] = vec3(0.2f, 0.8f, 0.5f); sphere_radious[0] = 0.5f; sphere_centers[1] = vec3(1,-0.5f,-2); sphere_colors [1] = vec3(0.8f, 0.2f, 0.1f); sphere_radious[1] = 0.5f; sphere_centers[2] = vec3(-1,-0.5f,-2); sphere_colors [2] = vec3(0.9f, 0.8f, 0.4f); sphere_radious[2] = 0.5f; //preparations and kernel call int numblocks = (nx * ny) / blockSize; //REMEMBER: each thread calculates a color for each channel, so the outvec is channelTimes bigger than the total number of threads int remainingThreads = (nx * ny) % blockSize; hipLaunchKernelGGL(( renderPixel), dim3(numblocks),dim3(blockSize), 0, 0, nx,ny,channels,antia,outvec,sphere_centers,sphere_colors,sphere_radious,worldsize,origin,lower_left_corner,vertical,horizontal); //make a especial block for the remaining pixels (in case that the total pixel size is not factored by blocksize) if(blockSize > 0){ hipLaunchKernelGGL(( renderPixel), dim3(1),dim3(remainingThreads), 0, 0, nx,ny,channels,antia,outvec,sphere_centers,sphere_colors,sphere_radious,worldsize,origin,lower_left_corner,vertical,horizontal); } //sync everything hipDeviceSynchronize(); //End of GPU paralelization //End GPU time chrono::high_resolution_clock::time_point end = chrono::high_resolution_clock::now(); chrono::duration<double> time_span = chrono::duration_cast<chrono::duration<double>>(end - start); //Start write timer chrono::high_resolution_clock::time_point timeStart = chrono::high_resolution_clock::now(); //write file cout << "Writing File\n"; ofstream image; image.open("image.ppm",ios::out); image << "P3\n" << nx << " " << ny << "\n255\n"; for (int i = 0; i < nx * ny; i++){ image << outvec[i * channels] << " " << outvec[i * channels + 1] << " " << outvec[i * channels + 2] << "\n"; } image.close(); //End Writer timer end = chrono::high_resolution_clock::now(); //Flush timers cout << std::fixed << "GPU time taken: " << time_span.count() << "\n"; chrono::duration<double> write_time_span = chrono::duration_cast<chrono::duration<double>>(end - timeStart); cout << std::fixed << "Write time taken: " << write_time_span.count() << "\n"; chrono::duration<double> total_time_span = chrono::duration_cast<chrono::duration<double>>(end - start); cout << std::fixed << "Total time taken: " << total_time_span.count() << "\n"; //free memory hipFree(outvec); hipFree(sphere_centers); hipFree(sphere_colors); hipFree(sphere_radious); //done cout << "DONE\n"; hipProfilerStop(); return 0; }
b4fe4f15976e650f5d19d92491b382419ce3c8fa.cu
#include <iostream> #include "vec3.h" #include "ray.h" #include <fstream> #include <cuda_profiler_api.h> //BASICALLY NOT WORKING TTxTT #include <chrono> using namespace std; __device__ float hit_sphere(const vec3& center, float radious, const ray& r){ vec3 oc = r.origin() - center; //baskara! float a = dot(r.direction(), r.direction()); float b = 2.0f * dot(oc, r.direction()); float c = dot(oc,oc) - radious*radious; float discriminant = b*b -4*a*c; if (discriminant < 0){ return -1.0f; } else { return(-b-sqrt(discriminant)) / (2.0f*a); } } __device__ vec3 color(const ray& r,const int worldsize, const vec3 _sphere_centers[], const vec3 _sphere_colors[], const float _sphere_radious[]){ for(int i = 0; i < worldsize; i++){ float hit = hit_sphere(_sphere_centers[i],_sphere_radious[i], r); if(hit > 0.0f){ vec3 normal = unit_vector(r.point_at_parameter(hit) - _sphere_centers[i]); //some better color vec3 dif = normal - (unit_vector(r.direction())); float itensity = dif.length() - 1; //itensity = 1 - itensity; if(itensity < 0){ itensity = 0.0f; } return _sphere_colors[i] * itensity; } } vec3 unit_direction = unit_vector(r.direction()); float t = 0.5f*(unit_direction.y() + 1.0f); return (1.0f -t) *vec3(1.0f,1.0f,1.0f) + t*vec3(0.0f,0.0f,0.0f); } __device__ //return a "random" number betwen 1 and -1 based on 4 floats as seed (it is not random actually, but it is enough) float pseudo_rand(float a, float b, float c, float d){ //a controlled chaotic expression float val = ( (((a + 134.854f + c)/3.3f) * c) / (d + 3.645f) ); if(val == 0.0f){ //if a or c is zero, use this other expression val = (c + d + 89.423f) * 9.308f * d * 1.54f + c; } val *= 11.245f; //val = val % 2; //I cant use modulo on floats inside CUDA!!! //workaround: int precision = 100000; //how many decimal slots i want to keep (log(10), 5 in this case) int ret = (int) val % (2 * precision); //module it with some precision val = (float)ret / (precision); // make ret a floating point return (val - 1.0f); } __global__ void renderPixel(int nx, int ny, int channels, int antia, int _outvec[], vec3 _sphere_centers[] ,vec3 _sphere_colors[], float _sphere_radious[],int worldsize, vec3 origin, vec3 l_l_corner, vec3 vertical, vec3 horizontal){ //wich pixel am i seeing int index = blockIdx.x * blockDim.x + threadIdx.x; //(use multiplying it by channels) //get u and v int x = index % nx; int y = index / nx; float u = (float) x / float(nx); float v = (float) y / float(ny); //create ray ray r(origin, l_l_corner + u*horizontal + v*vertical); //info for antialiasing float difx = (((float) x + 1) / float(nx)) - u; //diference betwen this u and nexts pixel u float dify = (((float) y + 1) / float(ny)) - v; //diference betwen this v and nexts pixel v float udif,vdif = 0.0f; //launch ray for each antialiasing ray vec3 col = vec3(0,0,0); udif = pseudo_rand((float)index, u, (float)nx * 5.1f * (y), (float) x / y) * difx ; vdif = pseudo_rand((float)index, v, (float)ny * 1.52f * (x), (float) y / x) * dify; for(int i = 0; i < antia; i ++){ col += color(r,worldsize,_sphere_centers,_sphere_colors,_sphere_radious); udif -= pseudo_rand((float)index, u, (float)nx * 5.1f * i * (y), (float) x / y) * difx; vdif -= pseudo_rand((float)index, v, (float)ny * 1.52f * i * (x), (float) y / x) * dify; //printf("u%f, v%f, i-%i\n",udif,vdif,i); //change ray so it is a little diferent but inside the same pixel bounderies r.B = l_l_corner + ((u + udif)*horizontal + (v + vdif)*vertical); } col /= antia; //write r g and b on outvec _outvec[index * channels + 0] = int(255.99f*col[0]); _outvec[index * channels + 1] = int(255.99f*col[1]); _outvec[index * channels + 2] = int(255.99f*col[2]); } int main() { //image resolution config int nx = 1920 * 2; int ny = 1080 * 2; int channels = 3; //amount of antialiasing int antia = 8; //Set var for world inside cuda vec3 *sphere_centers; vec3 *sphere_colors; float *sphere_radious; //list for storing world values (for readbility) int worldsize = 3; //camera options //field of view float fov = 4.0f; float fovRatio = (float)ny / (float)nx; vec3 horizontal(fov,0.0f,0.0f); vec3 vertical(0.0f,fov * fovRatio,0.0f); //bounderies and origin vec3 lower_left_corner(-fov/2,-fov * fovRatio/2,-1.0f); vec3 origin(0.0f,0.0f,0.0f); //threading options int blockSize = 64; //vector for output image int * outvec;// = new int[nx * ny * channels]; //Start Cuda Profiler cout << "Starting GPU Parallelized section. \n"; cudaProfilerStart(); //Start Timer chrono::high_resolution_clock::time_point start = chrono::high_resolution_clock::now(); //mallocs for render cudaMallocManaged(&outvec, sizeof(int) * nx * ny * channels); //also for world cudaMallocManaged(&sphere_centers, sizeof(vec3) * worldsize); cudaMallocManaged(&sphere_colors, sizeof(vec3) * worldsize); cudaMallocManaged(&sphere_radious, sizeof(float) * worldsize); sphere_centers[0] = vec3(0,1,-2); sphere_colors [0] = vec3(0.2f, 0.8f, 0.5f); sphere_radious[0] = 0.5f; sphere_centers[1] = vec3(1,-0.5f,-2); sphere_colors [1] = vec3(0.8f, 0.2f, 0.1f); sphere_radious[1] = 0.5f; sphere_centers[2] = vec3(-1,-0.5f,-2); sphere_colors [2] = vec3(0.9f, 0.8f, 0.4f); sphere_radious[2] = 0.5f; //preparations and kernel call int numblocks = (nx * ny) / blockSize; //REMEMBER: each thread calculates a color for each channel, so the outvec is channelTimes bigger than the total number of threads int remainingThreads = (nx * ny) % blockSize; renderPixel<<<numblocks,blockSize>>>(nx,ny,channels,antia,outvec,sphere_centers,sphere_colors,sphere_radious,worldsize,origin,lower_left_corner,vertical,horizontal); //make a especial block for the remaining pixels (in case that the total pixel size is not factored by blocksize) if(blockSize > 0){ renderPixel<<<1,remainingThreads>>>(nx,ny,channels,antia,outvec,sphere_centers,sphere_colors,sphere_radious,worldsize,origin,lower_left_corner,vertical,horizontal); } //sync everything cudaDeviceSynchronize(); //End of GPU paralelization //End GPU time chrono::high_resolution_clock::time_point end = chrono::high_resolution_clock::now(); chrono::duration<double> time_span = chrono::duration_cast<chrono::duration<double>>(end - start); //Start write timer chrono::high_resolution_clock::time_point timeStart = chrono::high_resolution_clock::now(); //write file cout << "Writing File\n"; ofstream image; image.open("image.ppm",ios::out); image << "P3\n" << nx << " " << ny << "\n255\n"; for (int i = 0; i < nx * ny; i++){ image << outvec[i * channels] << " " << outvec[i * channels + 1] << " " << outvec[i * channels + 2] << "\n"; } image.close(); //End Writer timer end = chrono::high_resolution_clock::now(); //Flush timers cout << std::fixed << "GPU time taken: " << time_span.count() << "\n"; chrono::duration<double> write_time_span = chrono::duration_cast<chrono::duration<double>>(end - timeStart); cout << std::fixed << "Write time taken: " << write_time_span.count() << "\n"; chrono::duration<double> total_time_span = chrono::duration_cast<chrono::duration<double>>(end - start); cout << std::fixed << "Total time taken: " << total_time_span.count() << "\n"; //free memory cudaFree(outvec); cudaFree(sphere_centers); cudaFree(sphere_colors); cudaFree(sphere_radious); //done cout << "DONE\n"; cudaProfilerStop(); return 0; }
d78dfe28dd626d255582c40d4da59a598577c059.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <system/Environment.h> #include <loops/transform_bool.h> #include <types/types.h> #include <system/op_boilerplate.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> using namespace simdOps; template <typename X, typename Z, typename OpType> __global__ void transformBoolSimple( const void *x, const Nd4jLong *xShapeInfo, int xRank, void *params, void *z, const Nd4jLong *zShapeInfo, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { functions::transform::TransformBool<X,Z>::template transformCuda<OpType>(x,xShapeInfo,params,z,zShapeInfo,allocationPointer,reductionPointer,tadShapeInfo, tadOffsets); } namespace functions { namespace transform { template<typename X, typename Y> _CUDA_H void TransformBool<X,Y>::executeTransformShaped( dim3 launchDims, hipStream_t *stream, const int opNum, const void *x, const Nd4jLong *xShape, int xRank, void *extraParams, void *z, const Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_BOOL_OPS); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> template <typename OpType> __device__ void TransformBool<X,Z>::transformCuda( const void *vx, const Nd4jLong *xShapeInfo, void *vparams, void *vz, const Nd4jLong *zShapeInfo, int *allocationPointer, void *vreductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { auto x = static_cast<const X*>(vx); auto z = static_cast<Z*>(vz); auto params = static_cast<X*>(vparams); auto reductionPointer = static_cast<Z*>(vreductionPointer); if(OpType::requiresSpecial) { OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); return; } else { __shared__ Nd4jLong xEws; __shared__ Nd4jLong zEws; __shared__ char xOrder; __shared__ char zOrder; __shared__ Nd4jLong length; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); zEws = shape::elementWiseStride(zShapeInfo); xOrder = shape::order(xShapeInfo); zOrder = shape::order(zShapeInfo); length = shape::length(xShapeInfo); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; if(xEws > 0 && zEws > 0 && xOrder == zOrder && xOrder == 'c') { for (int i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params); } else { if(vx == vz) { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); z[xOffset] = OpType::op(x[xOffset], params); } } else { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); auto zOffset = shape::getIndexOffset(i, zShapeInfo); z[zOffset] = OpType::op(x[xOffset], params); } } } } }; template<typename X, typename Z> template <typename OpType> _CUDA_H void TransformBool<X,Z>::intermediateShaped( dim3 launchDims, hipStream_t *stream, const void *x, const Nd4jLong *xShape, int xRank, void *extraParams, void *z, const Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { hipLaunchKernelGGL(( transformBoolSimple<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); sd::DebugHelper::checkErrorCode(stream, "transformBool(...) failed"); } BUILD_DOUBLE_TEMPLATE(template class ND4J_LOCAL TransformBool, , LIBND4J_TYPES, BOOL_TYPES); } }
d78dfe28dd626d255582c40d4da59a598577c059.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <system/Environment.h> #include <loops/transform_bool.h> #include <types/types.h> #include <system/op_boilerplate.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> using namespace simdOps; template <typename X, typename Z, typename OpType> __global__ void transformBoolSimple( const void *x, const Nd4jLong *xShapeInfo, int xRank, void *params, void *z, const Nd4jLong *zShapeInfo, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { functions::transform::TransformBool<X,Z>::template transformCuda<OpType>(x,xShapeInfo,params,z,zShapeInfo,allocationPointer,reductionPointer,tadShapeInfo, tadOffsets); } namespace functions { namespace transform { template<typename X, typename Y> _CUDA_H void TransformBool<X,Y>::executeTransformShaped( dim3 launchDims, cudaStream_t *stream, const int opNum, const void *x, const Nd4jLong *xShape, int xRank, void *extraParams, void *z, const Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_BOOL_OPS); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> template <typename OpType> __device__ void TransformBool<X,Z>::transformCuda( const void *vx, const Nd4jLong *xShapeInfo, void *vparams, void *vz, const Nd4jLong *zShapeInfo, int *allocationPointer, void *vreductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { auto x = static_cast<const X*>(vx); auto z = static_cast<Z*>(vz); auto params = static_cast<X*>(vparams); auto reductionPointer = static_cast<Z*>(vreductionPointer); if(OpType::requiresSpecial) { OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); return; } else { __shared__ Nd4jLong xEws; __shared__ Nd4jLong zEws; __shared__ char xOrder; __shared__ char zOrder; __shared__ Nd4jLong length; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); zEws = shape::elementWiseStride(zShapeInfo); xOrder = shape::order(xShapeInfo); zOrder = shape::order(zShapeInfo); length = shape::length(xShapeInfo); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; if(xEws > 0 && zEws > 0 && xOrder == zOrder && xOrder == 'c') { for (int i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params); } else { if(vx == vz) { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); z[xOffset] = OpType::op(x[xOffset], params); } } else { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); auto zOffset = shape::getIndexOffset(i, zShapeInfo); z[zOffset] = OpType::op(x[xOffset], params); } } } } }; template<typename X, typename Z> template <typename OpType> _CUDA_H void TransformBool<X,Z>::intermediateShaped( dim3 launchDims, cudaStream_t *stream, const void *x, const Nd4jLong *xShape, int xRank, void *extraParams, void *z, const Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) { transformBoolSimple<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); sd::DebugHelper::checkErrorCode(stream, "transformBool(...) failed"); } BUILD_DOUBLE_TEMPLATE(template class ND4J_LOCAL TransformBool, , LIBND4J_TYPES, BOOL_TYPES); } }
4b89842a3ddd5dff97e384ba8fb6e9ab5977a2a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <stdbool.h> #include <sys/resource.h> //TODO: add support for when there's more sets than there are blocks __global__ void gCompress(uint8_t* fours, uint8_t* threes, size_t pitchfour, size_t pitchthree, size_t setcount) { long a = blockIdx.x; uint8_t* row4; uint8_t* row3; do { row4 = fours + a * pitchfour; row3 = threes + a * pitchthree; row3[threadIdx.x] = 0; row3[threadIdx.x] = (row4[threadIdx.x]) << (2 + (threadIdx.x * 2)); row3[threadIdx.x] += (row4[threadIdx.x + 1]) >> (4 - (threadIdx.x * 2)); a += 65535; } while(a < setcount); } __global__ void gUncompress(uint8_t* fours, uint8_t* threes, size_t pitchfour, size_t pitchthree, size_t setcount) { long a = blockIdx.x; uint8_t* row4; uint8_t* row3; do { row4 = fours + a * pitchfour; row3 = threes + a * pitchthree; row4[threadIdx.x] = 0; int i, ander = 0; for (i = 0; i < threadIdx.x; i++) ander += (48 >> (i*2)); if (threadIdx.x != 0) row4[threadIdx.x] = ((row3[threadIdx.x - 1]) << (4 - (2 * (threadIdx.x - 1)))) & ander; if (threadIdx.x != 3) row4[threadIdx.x] += ((row3[threadIdx.x]) >> (2 + (threadIdx.x * 2))); a += 65535; } while (a < setcount); } uint8_t getCharVal(char c) { if (c >= '0' && c <= '9') return c - '0'; else if (c >= 'a' && c <= 'z') return 10 + (c - 'a'); else if (c >= 'A' && c <= 'Z') return 36 + (c - 'A'); else return 62; } char getOriginalVal(uint8_t t) { if (t <= 9) return '0' + t; else if (t >= 10 && t <= 35) return 'a' + t - 10; else if (t >= 36 && t <= 61) return 'A' + t - 36; else return '\n'; } bool fileExists (char* name) { FILE* tmp = fopen (name, "rb"); bool exists = (tmp != NULL); if (tmp != NULL) fclose (tmp); return exists; } void compress(char* argv[]); void uncompress(char* argv[]); size_t setCount; size_t overflow; int main(int argc, char* argv[]) { if (argc != 3 || (strcmp(argv[1], "-c") != 0 && strcmp(argv[1], "-u") != 0)) { fprintf(stderr, "Usage:\n%s -c filename ....... to compress\n%s -u filename ....... to uncompress\n", argv[0], argv[0]); exit(0); } else if (!fileExists(argv[2])) { fprintf(stderr, "File %s does not exist.\n", argv[2]); exit(0); } const rlim_t kStackSize = 64L * 1024L * 1024L; // min stack size = 64 Mb struct rlimit rl; int result; result = getrlimit(RLIMIT_STACK, &rl); if (result == 0) { if (rl.rlim_cur < kStackSize) { rl.rlim_cur = kStackSize; result = setrlimit(RLIMIT_STACK, &rl); if (result != 0) { fprintf(stderr, "setrlimit returned result = %d\n", result); exit(0); } } } setCount = 0; if (strcmp(argv[1], "-c") == 0) compress(argv); else { uncompress(argv); } exit(0); } void compress(char* argv[]) { size_t i; char* filename = argv[2]; char* outfilename = (char*)malloc(sizeof(char) * 64); sprintf(outfilename, "%s.bcg", filename); FILE* infile = fopen(filename, "r"); FILE* outfile = fopen(outfilename, "w+"); long filesize = 0; fseek(infile, 0, SEEK_END); filesize = ftell(infile); fseek(infile, 0, SEEK_SET); overflow = filesize % 4; setCount = filesize / 4; if (overflow > 0) setCount++; uint8_t threebytes[setCount][3]; uint8_t fourbytes[setCount][4]; i = 0; while (!feof(infile)) { fourbytes[i / 4][i % 4] = getCharVal(fgetc(infile)); i++; } fclose(infile); size_t pitch3, pitch4; uint8_t* garr3; uint8_t* garr4; hipMallocPitch((void**)&garr3, &pitch3, (size_t)(3 * sizeof(uint8_t)), setCount); hipMallocPitch((void**)&garr4, &pitch4, (size_t)(4 * sizeof(uint8_t)), setCount); hipMemcpy2D((void*)garr4, pitch4, fourbytes, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), setCount, hipMemcpyHostToDevice); if (setCount <= 65535) hipLaunchKernelGGL(( gCompress), dim3(setCount), dim3(3), 0, 0, garr4, garr3, pitch4, pitch3, setCount); else hipLaunchKernelGGL(( gCompress), dim3(65535), dim3(3), 0, 0, garr4, garr3, pitch4, pitch3, setCount); hipMemcpy2D(threebytes, 3 * sizeof(uint8_t), garr3, pitch3, 3 * sizeof(uint8_t), setCount, hipMemcpyDeviceToHost); hipFree(garr3); hipFree(garr4); for (i = 0; i < setCount; i++) { fprintf(outfile, "%c%c%c", threebytes[i][0], threebytes[i][1], threebytes[i][2]); } fprintf(outfile, "%i", overflow); fclose(outfile); free(outfilename); } void uncompress(char* argv[]) { size_t i; //acquire and handle file overhead char* filename = argv[2]; char* outfilename = (char*)malloc(sizeof(char) * 64); sprintf(outfilename, "%s.out", filename); FILE* infile = fopen(filename, "r"); FILE* outfile = fopen(outfilename, "w+"); //determine file size and number of sets long filesize = 0; fseek(infile, 0, SEEK_END); filesize = ftell(infile) - 1; //don't count end delimiter fseek(infile, 0, SEEK_SET); setCount = filesize / 3; uint8_t threebytes[setCount][3]; uint8_t fourbytes[setCount][4]; //get file data i = 0; while (i < filesize) { threebytes[i / 3][i % 3] = (uint8_t)(fgetc(infile)); i++; } uint8_t delim = fgetc(infile) - '0'; fclose(infile); //begin gpu section size_t pitch3, pitch4; uint8_t* garr3; uint8_t* garr4; hipMallocPitch((void**)&garr3, &pitch3, (size_t)(3 * sizeof(uint8_t)), setCount); hipMallocPitch((void**)&garr4, &pitch4, (size_t)(4 * sizeof(uint8_t)), setCount); hipMemcpy2D((void*)garr3, pitch3, threebytes, 3 * sizeof(uint8_t), 3 * sizeof(uint8_t), setCount, hipMemcpyHostToDevice); if (setCount <= 65535) hipLaunchKernelGGL(( gUncompress), dim3(setCount), dim3(4), 0, 0, garr4, garr3, pitch4, pitch3, setCount); else hipLaunchKernelGGL(( gUncompress), dim3(65535), dim3(4), 0, 0, garr4, garr3, pitch4, pitch3, setCount); hipMemcpy2D(fourbytes, 4 * sizeof(uint8_t), garr4, pitch4, 4 * sizeof(uint8_t), setCount, hipMemcpyDeviceToHost); hipFree(garr3); hipFree(garr4); for (i = 0; i < setCount; i++) { if (delim == 0 || i != setCount - 1) fprintf(outfile, "%c%c%c%c", getOriginalVal(fourbytes[i][0]), getOriginalVal(fourbytes[i][1]), getOriginalVal(fourbytes[i][2]), getOriginalVal(fourbytes[i][3])); else { int k; for (k = 0; k < delim; k++) fprintf(outfile, "%c", getOriginalVal(fourbytes[i][k])); } } fclose(outfile); free(outfilename); }
4b89842a3ddd5dff97e384ba8fb6e9ab5977a2a6.cu
#include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <stdbool.h> #include <sys/resource.h> //TODO: add support for when there's more sets than there are blocks __global__ void gCompress(uint8_t* fours, uint8_t* threes, size_t pitchfour, size_t pitchthree, size_t setcount) { long a = blockIdx.x; uint8_t* row4; uint8_t* row3; do { row4 = fours + a * pitchfour; row3 = threes + a * pitchthree; row3[threadIdx.x] = 0; row3[threadIdx.x] = (row4[threadIdx.x]) << (2 + (threadIdx.x * 2)); row3[threadIdx.x] += (row4[threadIdx.x + 1]) >> (4 - (threadIdx.x * 2)); a += 65535; } while(a < setcount); } __global__ void gUncompress(uint8_t* fours, uint8_t* threes, size_t pitchfour, size_t pitchthree, size_t setcount) { long a = blockIdx.x; uint8_t* row4; uint8_t* row3; do { row4 = fours + a * pitchfour; row3 = threes + a * pitchthree; row4[threadIdx.x] = 0; int i, ander = 0; for (i = 0; i < threadIdx.x; i++) ander += (48 >> (i*2)); if (threadIdx.x != 0) row4[threadIdx.x] = ((row3[threadIdx.x - 1]) << (4 - (2 * (threadIdx.x - 1)))) & ander; if (threadIdx.x != 3) row4[threadIdx.x] += ((row3[threadIdx.x]) >> (2 + (threadIdx.x * 2))); a += 65535; } while (a < setcount); } uint8_t getCharVal(char c) { if (c >= '0' && c <= '9') return c - '0'; else if (c >= 'a' && c <= 'z') return 10 + (c - 'a'); else if (c >= 'A' && c <= 'Z') return 36 + (c - 'A'); else return 62; } char getOriginalVal(uint8_t t) { if (t <= 9) return '0' + t; else if (t >= 10 && t <= 35) return 'a' + t - 10; else if (t >= 36 && t <= 61) return 'A' + t - 36; else return '\n'; } bool fileExists (char* name) { FILE* tmp = fopen (name, "rb"); bool exists = (tmp != NULL); if (tmp != NULL) fclose (tmp); return exists; } void compress(char* argv[]); void uncompress(char* argv[]); size_t setCount; size_t overflow; int main(int argc, char* argv[]) { if (argc != 3 || (strcmp(argv[1], "-c") != 0 && strcmp(argv[1], "-u") != 0)) { fprintf(stderr, "Usage:\n%s -c filename ....... to compress\n%s -u filename ....... to uncompress\n", argv[0], argv[0]); exit(0); } else if (!fileExists(argv[2])) { fprintf(stderr, "File %s does not exist.\n", argv[2]); exit(0); } const rlim_t kStackSize = 64L * 1024L * 1024L; // min stack size = 64 Mb struct rlimit rl; int result; result = getrlimit(RLIMIT_STACK, &rl); if (result == 0) { if (rl.rlim_cur < kStackSize) { rl.rlim_cur = kStackSize; result = setrlimit(RLIMIT_STACK, &rl); if (result != 0) { fprintf(stderr, "setrlimit returned result = %d\n", result); exit(0); } } } setCount = 0; if (strcmp(argv[1], "-c") == 0) compress(argv); else { uncompress(argv); } exit(0); } void compress(char* argv[]) { size_t i; char* filename = argv[2]; char* outfilename = (char*)malloc(sizeof(char) * 64); sprintf(outfilename, "%s.bcg", filename); FILE* infile = fopen(filename, "r"); FILE* outfile = fopen(outfilename, "w+"); long filesize = 0; fseek(infile, 0, SEEK_END); filesize = ftell(infile); fseek(infile, 0, SEEK_SET); overflow = filesize % 4; setCount = filesize / 4; if (overflow > 0) setCount++; uint8_t threebytes[setCount][3]; uint8_t fourbytes[setCount][4]; i = 0; while (!feof(infile)) { fourbytes[i / 4][i % 4] = getCharVal(fgetc(infile)); i++; } fclose(infile); size_t pitch3, pitch4; uint8_t* garr3; uint8_t* garr4; cudaMallocPitch((void**)&garr3, &pitch3, (size_t)(3 * sizeof(uint8_t)), setCount); cudaMallocPitch((void**)&garr4, &pitch4, (size_t)(4 * sizeof(uint8_t)), setCount); cudaMemcpy2D((void*)garr4, pitch4, fourbytes, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), setCount, cudaMemcpyHostToDevice); if (setCount <= 65535) gCompress<<<setCount, 3>>>(garr4, garr3, pitch4, pitch3, setCount); else gCompress<<<65535, 3>>>(garr4, garr3, pitch4, pitch3, setCount); cudaMemcpy2D(threebytes, 3 * sizeof(uint8_t), garr3, pitch3, 3 * sizeof(uint8_t), setCount, cudaMemcpyDeviceToHost); cudaFree(garr3); cudaFree(garr4); for (i = 0; i < setCount; i++) { fprintf(outfile, "%c%c%c", threebytes[i][0], threebytes[i][1], threebytes[i][2]); } fprintf(outfile, "%i", overflow); fclose(outfile); free(outfilename); } void uncompress(char* argv[]) { size_t i; //acquire and handle file overhead char* filename = argv[2]; char* outfilename = (char*)malloc(sizeof(char) * 64); sprintf(outfilename, "%s.out", filename); FILE* infile = fopen(filename, "r"); FILE* outfile = fopen(outfilename, "w+"); //determine file size and number of sets long filesize = 0; fseek(infile, 0, SEEK_END); filesize = ftell(infile) - 1; //don't count end delimiter fseek(infile, 0, SEEK_SET); setCount = filesize / 3; uint8_t threebytes[setCount][3]; uint8_t fourbytes[setCount][4]; //get file data i = 0; while (i < filesize) { threebytes[i / 3][i % 3] = (uint8_t)(fgetc(infile)); i++; } uint8_t delim = fgetc(infile) - '0'; fclose(infile); //begin gpu section size_t pitch3, pitch4; uint8_t* garr3; uint8_t* garr4; cudaMallocPitch((void**)&garr3, &pitch3, (size_t)(3 * sizeof(uint8_t)), setCount); cudaMallocPitch((void**)&garr4, &pitch4, (size_t)(4 * sizeof(uint8_t)), setCount); cudaMemcpy2D((void*)garr3, pitch3, threebytes, 3 * sizeof(uint8_t), 3 * sizeof(uint8_t), setCount, cudaMemcpyHostToDevice); if (setCount <= 65535) gUncompress<<<setCount, 4>>>(garr4, garr3, pitch4, pitch3, setCount); else gUncompress<<<65535, 4>>>(garr4, garr3, pitch4, pitch3, setCount); cudaMemcpy2D(fourbytes, 4 * sizeof(uint8_t), garr4, pitch4, 4 * sizeof(uint8_t), setCount, cudaMemcpyDeviceToHost); cudaFree(garr3); cudaFree(garr4); for (i = 0; i < setCount; i++) { if (delim == 0 || i != setCount - 1) fprintf(outfile, "%c%c%c%c", getOriginalVal(fourbytes[i][0]), getOriginalVal(fourbytes[i][1]), getOriginalVal(fourbytes[i][2]), getOriginalVal(fourbytes[i][3])); else { int k; for (k = 0; k < delim; k++) fprintf(outfile, "%c", getOriginalVal(fourbytes[i][k])); } } fclose(outfile); free(outfilename); }
e228793f7b8e13825b55b352d3c6f446ca3d5f31.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #define N (16*1024) #define THREADS_PER_BLOCK 512.0 void random_floats(float *a,int n){ int i; float maxVal = 5.0; for(i=0;i<n;i++){ a[i] = ((float)rand()/(float)RAND_MAX)*maxVal; } } __global__ void sum(float *inp,float* blockSums) { __shared__ float intraBlock[512]; int lindex = threadIdx.x; int gindex = blockIdx.x*blockDim.x + threadIdx.x; if(gindex<N) intraBlock[lindex]=inp[gindex]; __syncthreads(); if(lindex<256 && gindex+256<N) intraBlock[lindex]+=intraBlock[lindex+256]; __syncthreads(); if(lindex<128 && gindex+128<N) intraBlock[lindex]+=intraBlock[lindex+128]; __syncthreads(); if(lindex<64 && gindex+64<N) intraBlock[lindex]+=intraBlock[lindex+64]; __syncthreads(); if(lindex<32 && gindex+32<N) intraBlock[lindex]+=intraBlock[lindex+32]; __syncthreads(); if(lindex<16 && gindex+16<N) intraBlock[lindex]+=intraBlock[lindex+16]; __syncthreads(); if(lindex<8 && gindex+8<N) intraBlock[lindex]+=intraBlock[lindex+8]; __syncthreads(); if(lindex<4 && gindex+4<N) intraBlock[lindex]+=intraBlock[lindex+4]; __syncthreads(); if(lindex<2 && gindex+2<N) intraBlock[lindex]+=intraBlock[lindex+2]; __syncthreads(); if(lindex<1 && gindex+1<N) intraBlock[lindex]+=intraBlock[lindex+1]; __syncthreads(); if(lindex==0) blockSums[blockIdx.x]=intraBlock[0]; } int checkSum(float *a,float calcSum) { float s=0; int i; for(i=0;i<N;i++) s+=a[i]; printf("Sequential Sum = %f\n",s); if(abs(s-calcSum)>0.1) return 0; else return 1; } int main(){ float *a,*blockSums; int size = N*sizeof(float); int no_of_blocks = ceil(N/THREADS_PER_BLOCK); a=(float*)malloc(size); blockSums = (float*)malloc(no_of_blocks*sizeof(float)); random_floats(a,N); float *d_a,*d_blockSums; hipMalloc((void**)&d_a,size); hipMalloc((void**)&d_blockSums,no_of_blocks*sizeof(float)); hipMemcpy(d_a,a,size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( sum), dim3(no_of_blocks),dim3(THREADS_PER_BLOCK), 0, 0, d_a,d_blockSums); hipMemcpy(blockSums,d_blockSums,no_of_blocks*sizeof(float),hipMemcpyDeviceToHost); float finSum=0; int i; for(i=0;i<no_of_blocks;i++) finSum+=blockSums[i]; printf("Final Sum = %f\n",finSum); checkSum(a,finSum); }
e228793f7b8e13825b55b352d3c6f446ca3d5f31.cu
#include <stdio.h> #include <math.h> #define N (16*1024) #define THREADS_PER_BLOCK 512.0 void random_floats(float *a,int n){ int i; float maxVal = 5.0; for(i=0;i<n;i++){ a[i] = ((float)rand()/(float)RAND_MAX)*maxVal; } } __global__ void sum(float *inp,float* blockSums) { __shared__ float intraBlock[512]; int lindex = threadIdx.x; int gindex = blockIdx.x*blockDim.x + threadIdx.x; if(gindex<N) intraBlock[lindex]=inp[gindex]; __syncthreads(); if(lindex<256 && gindex+256<N) intraBlock[lindex]+=intraBlock[lindex+256]; __syncthreads(); if(lindex<128 && gindex+128<N) intraBlock[lindex]+=intraBlock[lindex+128]; __syncthreads(); if(lindex<64 && gindex+64<N) intraBlock[lindex]+=intraBlock[lindex+64]; __syncthreads(); if(lindex<32 && gindex+32<N) intraBlock[lindex]+=intraBlock[lindex+32]; __syncthreads(); if(lindex<16 && gindex+16<N) intraBlock[lindex]+=intraBlock[lindex+16]; __syncthreads(); if(lindex<8 && gindex+8<N) intraBlock[lindex]+=intraBlock[lindex+8]; __syncthreads(); if(lindex<4 && gindex+4<N) intraBlock[lindex]+=intraBlock[lindex+4]; __syncthreads(); if(lindex<2 && gindex+2<N) intraBlock[lindex]+=intraBlock[lindex+2]; __syncthreads(); if(lindex<1 && gindex+1<N) intraBlock[lindex]+=intraBlock[lindex+1]; __syncthreads(); if(lindex==0) blockSums[blockIdx.x]=intraBlock[0]; } int checkSum(float *a,float calcSum) { float s=0; int i; for(i=0;i<N;i++) s+=a[i]; printf("Sequential Sum = %f\n",s); if(abs(s-calcSum)>0.1) return 0; else return 1; } int main(){ float *a,*blockSums; int size = N*sizeof(float); int no_of_blocks = ceil(N/THREADS_PER_BLOCK); a=(float*)malloc(size); blockSums = (float*)malloc(no_of_blocks*sizeof(float)); random_floats(a,N); float *d_a,*d_blockSums; cudaMalloc((void**)&d_a,size); cudaMalloc((void**)&d_blockSums,no_of_blocks*sizeof(float)); cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice); sum<<<no_of_blocks,THREADS_PER_BLOCK>>>(d_a,d_blockSums); cudaMemcpy(blockSums,d_blockSums,no_of_blocks*sizeof(float),cudaMemcpyDeviceToHost); float finSum=0; int i; for(i=0;i<no_of_blocks;i++) finSum+=blockSums[i]; printf("Final Sum = %f\n",finSum); checkSum(a,finSum); }
3330161ab8620e4cb435119deeaa6ed9de161d02.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // WeightChannelNorm op in Caffe2 for GPU // Written by Kai Hu // This is a stand-alone op: Y = gamma * (X - weighted_mu) * weighted_rsig + beta // where // weighted_mu_ni = sum(mu_nc * weight_ci, c) // weighted_sig_ni = sum((sig_nc^2 + mu_nc^2) * weight_ci, c) - (sum(mu_nc * weight_ci, c))^2 // ------------------------------------------------------------------ #include "weight_channel_norm_op.h" #include <array> #include <hipcub/hipcub.hpp> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" #include "caffe2/utils/math_utils.h" namespace caffe2 { namespace { template <typename T> using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; __global__ void InvStdCUDAKernel( const int size, const float epsilon, const float* var, float* rsig) { CUDA_1D_KERNEL_LOOP(i, size) { #if __CUDA_ARCH__ >= 350 rsig[i] = rsqrtf(__ldg(var + i) + epsilon); #else rsig[i] = rsqrtf(var[i] + epsilon); #endif } } template <typename T, StorageOrder kOrder> __global__ void WeightChannelNormForwardCUDAKernel( const int size, const int C, const int HxW, const T* X, const T* weighted_mu, const T* weighted_rsig, const T* gamma, const T* beta, T* Y) { CUDA_1D_KERNEL_LOOP(i, size) { const int i_mu = kOrder == StorageOrder::NCHW ? i / HxW : i / (C * HxW) * C + (i % C); const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C; #if __CUDA_ARCH__ >= 350 Y[i] = __ldg(gamma + i_gamma) * (__ldg(X + i) - __ldg(weighted_mu + i_mu)) * __ldg(weighted_rsig + i_mu) + __ldg(beta + i_gamma); #else Y[i] = gamma[i_gamma] * (X[i] - weighted_mu[i_mu]) * weighted_rsig[i_mu] + beta[i_gamma]; #endif } } // add by Kai Hu, add function to compute the weighted mean and variance. template <typename T> __global__ void WeightChannelMeanCUDAKernel( const int N, const int C, const T* mu, const T* weight, T* weighted_mu) { const int outer_size = N * C; const int inner_size = C; __shared__ typename BlockReduce<T>::TempStorage wmu_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T wmu_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int i_nc = i; const int i_wc = j * C + i % C; const int i_nw = (i / C) * C + j; #if __CUDA_ARCH__ >= 350 wmu_val += __ldg(mu + i_nw) * __ldg(weight + i_wc); #else wmu_val += mu[i_nw] * weight[i_wc]; #endif } wmu_val = BlockReduce<T>(wmu_storage).Reduce(wmu_val, hipcub::Sum()); if (threadIdx.x == 0) { weighted_mu[i] = wmu_val; } __syncthreads(); } } template <typename T> __global__ void WeightChannelVarianceCUDAKernel( const int N, const int C, const T* mu, const T* var, const T* weight, T* weighted_mu, T* weighted_var) { const int outer_size = N * C; const int inner_size = C; __shared__ typename BlockReduce<T>::TempStorage wvar_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T wvar_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int i_nc = i; const int i_wc = j * C + i % C; const int i_nw = (i / C) * C + j; #if __CUDA_ARCH__ >= 350 wvar_val += __ldg(weight + i_wc) * (__ldg(var + i_nw) + __ldg(mu + i_nw) * __ldg(mu + i_nw) - __ldg(weighted_mu + i_nc) * __ldg(weighted_mu + i_nc)); #else wvar_val += weight[i_wc] * (var[i_nw] + mu[i_nw] * mu[i_nw] - weighted_mu[i_nc] * weighted_mu[i_nc]); #endif } wvar_val = BlockReduce<T>(wvar_storage).Reduce(wvar_val, hipcub::Sum()); if (threadIdx.x == 0) { weighted_var[i] = wvar_val; } __syncthreads(); } } template <typename T, StorageOrder kOrder> __global__ void ComputeInternalGradientsCUDAKernel( const int N, const int C, const int HxW, const T* dY, const T* X, const T* gamma, T* ds, T* db) { const int outer_size = N * C; const int inner_size = HxW; __shared__ typename BlockReduce<T>::TempStorage ds_storage; __shared__ typename BlockReduce<T>::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T ds_val = 0; T db_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int i_gamma = i % C; const int index = kOrder == StorageOrder::NCHW ? i * inner_size + j : ((i / C) * HxW + j) * C + i % C; #if __CUDA_ARCH__ >= 350 ds_val += __ldg(gamma + i_gamma) * __ldg(dY + index) * __ldg(X + index); db_val += __ldg(gamma + i_gamma) * __ldg(dY + index); #else ds_val += gamma[i_gamma] * dY[index] * X[index]; db_val += gamma[i_gamma] * dY[index]; #endif } ds_val = BlockReduce<T>(ds_storage).Reduce(ds_val, hipcub::Sum()); db_val = BlockReduce<T>(db_storage).Reduce(db_val, hipcub::Sum()); if (threadIdx.x == 0) { ds[i] = ds_val; db[i] = db_val; } __syncthreads(); } } template <typename T> __global__ void ComputeTempVariable( const int N, const int C, const int HxW, const T* weighted_mu, const T* weighted_rsig, const T* weight, const T* ds, const T* db, T* temp_u, T* temp_v) { const T denom = T(1) / static_cast<T>(HxW); const int outer_size = N * C; const int inner_size = C; __shared__ typename BlockReduce<T>::TempStorage u_storage; __shared__ typename BlockReduce<T>::TempStorage v_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T u_val = 0; T v_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int i_nj = (i / C) * C + j; const int i_cj = (i % C) * C + j; #if __CUDA_ARCH__ >= 350 u_val += (__ldg(db + i_nj) * __ldg(weighted_mu + i_nj) - __ldg(ds + i_nj)) * __ldg(weight + i_cj) * math::utils::Cube<T>(__ldg(weighted_rsig + i_nj)); v_val += (__ldg(db + i_nj) * __ldg(weighted_mu + i_nj) - __ldg(ds + i_nj)) * __ldg(weight + i_cj) * math::utils::Cube<T>(__ldg(weighted_rsig + i_nj)) * __ldg(weighted_mu + i_nj); v_val += __ldg(db + i_nj) * __ldg(weighted_rsig + i_nj) * __ldg(weight + i_cj); #else u_val += (db[i_nj] * weighted_mu[i_nj] - ds[i_nj]) * weight[i_cj] * math::utils::Cube<T>(weighted_rsig[i_nj]); v_val += (db[i_nj] * weighted_mu[i_nj] - ds[i_nj]) * weight[i_cj] * math::utils::Cube<T>(weighted_rsig[i_nj]) * weighted_mu[i_nj]; v_val += db[i_nj] * weighted_rsig[i_nj] * weight[i_cj]; #endif } u_val = BlockReduce<T>(u_storage).Reduce(u_val, hipcub::Sum()); v_val = BlockReduce<T>(v_storage).Reduce(v_val, hipcub::Sum()); if (threadIdx.x == 0) { temp_u[i] = u_val * denom; temp_v[i] = v_val * denom; } } } // Math: // Y = gamma * (X - weighted_mu) * weighted_rsig + beta // let s = gamma * weighted_rsig // let b = beta - gamma * weighted_mu * weighted_rsig // Y = s * X + b // let n = HxW // dL/dX = dL/dY * dY/dX = dL/dY * (s + X * ds/dX + db/dX) // ds/dX = gamma * dweighted_rsig/dX // db/dX = -gamma * weighted_mu * dweighted_rsig/dX - gamma * weighted_rsig * dweighted_mu/dX // Attention: dL/ds, dL/db has timed the gamma, so wo don't time it there in the code. // dweighted_rsig/dX = -0.5 * weighted_rsig^3 * dweighted_var/dX // dweighted_var/dX = dweighted_var/dvar * dvar/dX + dweighted_var/dmu * dmu/dX // dweighted_mu/dX = weight * dmu/dX // dsig/dX = 2 * (X - mu) / n * (1 - 1/n) attention: GN has ignored the (1 - 1/n) reasonable, we also ignore it. // dmu/dX = 1 / n // dL/dgamma = dL/dY * (dY/ds * ds/dgamma + dY/db * db/dgamma) // = dL/dY * (X - weighted_mu) * weighted_rsig // dL/dbeta = dL/dY // dL/dweight = dL/dY * (dY/ds * ds/dweighted_sig * dweighted_sig/dweight + dY/db * db/dweighted_mu * dweighted_mu/dweight + dY/db * db/dsig_ * dsig_/dweight) // = dL/dY * (-0.5 * X * rsig_^3 * dsig_/dweight - gamma * rsig_ * dmu_/dweight + gamma * mu_ * rsig_^3 * drsig_/dweight) // TODO:Kai Hu This function can be optimized // template <typename T, StorageOrder kOrder> // __global__ void WeightChannelNormBackwardCUDAKernel( // const int size, // const int C, // const int HxW, // const T* dY, // const T* X, // const T* mu, // const T* var, // const T* weighted_mu, // const T* weighted_rsig, // const T* gamma, // const T* weight, // const T* ds, // const T* db, // T* dX) { // const T denom = T(1) / static_cast<T>(HxW); // const int outer_size = size; // const int inner_size = C; // __shared__ typename BlockReduce<T>::TempStorage u_storage; // __shared__ typename BlockReduce<T>::TempStorage v_storage; // for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { // T u_val = 0; // T v_val = 0; // for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { // const int i_mu = kOrder == StorageOrder::NCHW // ? i / HxW // : (i / (C * HxW)) * C + (i % C); // (n,c) // const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C; // (c) // const int i_nj = i_mu / C * C + j; // (n,c') // const int i_cj = i_gamma * C + j; // (c,c') // #if __CUDA_ARCH__ >= 350 // u_val += (__ldg(db + i_nj) * __ldg(weighted_mu + i_nj) - __ldg(ds + i_nj)) * // __ldg(weight + i_cj) * (__ldg(X + i) - __ldg(weighted_mu + i_nj)) * // math::utils::Cube<T>(__ldg(weighted_rsig + i_nj)); // v_val += __ldg(db + i_nj) * __ldg(weighted_rsig + i_nj) * __ldg(weight + i_cj); // #else // u_val += (db[i_nj] * weighted_mu[i_nj] - ds[i_nj]) * weight[i_cj] * (X[i] - weighted_mu[i_nj]) * math::utils::Cube<T>(weighted_rsig[i_nj]); // v_val += db[i_nj] * weighted_rsig[i_nj] * weight[i_cj]; // #endif // } // u_val = BlockReduce<T>(u_storage).Reduce(u_val, hipcub::Sum()); // v_val = BlockReduce<T>(v_storage).Reduce(v_val, hipcub::Sum()); // if (threadIdx.x == 0) { // const int i_mu = kOrder == StorageOrder::NCHW // ? i / HxW // : (i / (C * HxW)) * C + (i % C); // (n,c) // const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C; // (c) // dX[i] = gamma[i_gamma] * dY[i] * weighted_rsig[i_mu] + (u_val - v_val) * denom; // } // __syncthreads(); // } // } template <typename T, StorageOrder kOrder> __global__ void WeightChannelNormBackwardCUDAKernel( const int size, const int C, const int HxW, const T* dY, const T* X, const T* weighted_rsig, const T* gamma, const T* temp_u, const T* temp_v, T* dX) { CUDA_1D_KERNEL_LOOP(i, size) { const int i_mu = kOrder == StorageOrder::NCHW ? i / HxW : (i / (C * HxW)) * C + (i % C); // (n,c) const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C; // (c) dX[i] = X[i] * temp_u[i_mu] - temp_v[i_mu] + gamma[i_gamma] * dY[i] * weighted_rsig[i_mu]; } } template <typename T, StorageOrder kOrder> __global__ void GammaBetaBackwardCUDAKernel( const int N, const int C, const int HxW, const T* dY, const T* X, const T* weighted_mu, const T* weighted_rsig, T* dgamma, T* dbeta) { const int outer_size = C; const int inner_size = N * HxW; __shared__ typename BlockReduce<T>::TempStorage dg_storage; __shared__ typename BlockReduce<T>::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T dg_val = 0; T db_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int n = j / HxW; const int index = kOrder == StorageOrder::NCHW ? (n * outer_size + i) * HxW + j % HxW : j * outer_size + i; const int i_mu = n * outer_size + i; #if __CUDA_ARCH__ >= 350 dg_val += __ldg(dY + index) * (__ldg(X + index) - __ldg(weighted_mu + i_mu)) * __ldg(weighted_rsig + i_mu); db_val += __ldg(dY + index); #else dg_val += dY[index] * (X[index] - weighted_mu[i_mu]) * weighted_rsig[i_mu]; db_val += dY[index]; #endif } dg_val = BlockReduce<T>(dg_storage).Reduce(dg_val, hipcub::Sum()); db_val = BlockReduce<T>(db_storage).Reduce(db_val, hipcub::Sum()); if (threadIdx.x == 0) { dgamma[i] = dg_val; dbeta[i] = db_val; } __syncthreads(); } } template <typename T, StorageOrder kOrder> __global__ void WeightBackwardCUDAKernel( const int N, const int C, const T* mu, const T* var, const T* weighted_mu, const T* weighted_rsig, const T* gamma, const T* ds, const T* db, T* dweight) { const int outer_size = C * C; const int inner_size = N; __shared__ typename BlockReduce<T>::TempStorage dw_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T dw_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { // const int n = j; // const int k = i % C; // const int c = i / C; const int i_nc = j * C + i / C; const int i_c = i / C; const int i_nk = j * C + i % C; #if __CUDA_ARCH__ >= 350 const T u = 0.5 * (__ldg(db + i_nk) * __ldg(weighted_mu + i_nk) - __ldg(ds + i_nk)) * math::utils::Cube<T>(__ldg(weighted_rsig + i_nk)) * (__ldg(var + i_nc) + __ldg(mu + i_nc) * __ldg(mu + i_nc) - 2 * __ldg(weighted_mu + i_nk) * __ldg(mu + i_nc)); const T v = __ldg(db + i_nk) * __ldg(weighted_rsig + i_nk) * __ldg(mu + i_nc); dw_val += u - v; #else const T u = 0.5 * (db[i_nk] * weighted_mu[i_nk] - ds[i_nk]) * math::utils::Cube<T>(weighted_rsig[i_nk]) * (var[i_nc] + mu[i_nc] * mu[i_nc] - 2 * weighted_mu[i_nk] * mu[i_nc]); const T v = db[i_nk] * weighted_rsig[i_nk] * mu[i_nc]; dw_val += u - v; #endif } dw_val = BlockReduce<T>(dw_storage).Reduce(dw_val, hipcub::Sum()); if (threadIdx.x == 0) { dweight[i] = dw_val; } __syncthreads(); } } } // namespace template <> bool WeightChannelNormOp<float, CUDAContext>::RunOnDeviceImpl( const int N, const int C, const int HxW, const float* X_data, const float* gamma_data, const float* beta_data, const float* weight_data, float* Y_data, float* mu_data, float* var_data, float* weighted_mu_data, float* weighted_rsig_data) { const std::array<int, 3> dims = order_ == StorageOrder::NCHW ? std::array<int, 3>{N, C, HxW} : std::array<int, 3>{N, HxW, C}; const std::array<int, 1> axes = order_ == StorageOrder::NCHW ? std::array<int, 1>{2} : std::array<int, 1>{1}; // Computes mean and variance. math::Moments<float, CUDAContext>( 3, dims.data(), 1, axes.data(), X_data, mu_data, var_data, &context_); // Computes weighted mean and variance. hipLaunchKernelGGL(( WeightChannelMeanCUDAKernel<float>) , dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, mu_data, weight_data, weighted_mu_data); hipLaunchKernelGGL(( WeightChannelVarianceCUDAKernel<float>) , dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, mu_data, var_data, weight_data, weighted_mu_data, weighted_rsig_data); // Uses rsqrt to computes 1 / std which is much faster than computes std. hipLaunchKernelGGL(( InvStdCUDAKernel), dim3(CAFFE_GET_BLOCKS(N * C)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N * C, epsilon_, weighted_rsig_data, weighted_rsig_data); // Computes Y = gamma * (X - weighted_mu) * weighted_rsig + beta. const int size = N * C * HxW; if (order_ == StorageOrder::NCHW) { hipLaunchKernelGGL(( WeightChannelNormForwardCUDAKernel<float, StorageOrder::NCHW>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), size, C, HxW, X_data, weighted_mu_data, weighted_rsig_data, gamma_data, beta_data, Y_data); } else { hipLaunchKernelGGL(( WeightChannelNormForwardCUDAKernel<float, StorageOrder::NHWC>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), size, C, HxW, X_data, weighted_mu_data, weighted_rsig_data, gamma_data, beta_data, Y_data); } return true; } // Math: // let: s = gamma * rsig // let: b = beta - mu * gamma * rsig // then: Y = s * X + b template <> bool WeightChannelNormGradientOp<float, CUDAContext>::RunOnDeviceImpl( const int N, const int C, const int HxW, const float* dY_data, const float* X_data, const float* mu_data, const float* var_data, const float* weighted_mu_data, const float* weighted_rsig_data, const float* gamma_data, const float* weight_data, float* dX_data, float* dgamma_data, float* dbeta_data, float* dweight_data) { const int size = N * C * HxW; ds_.Resize(N, C); db_.Resize(N, C); temp_u.Resize(N, C); temp_v.Resize(N, C); float* ds_data = ds_.mutable_data<float>(); float* db_data = db_.mutable_data<float>(); float* temp_u_data = temp_u.mutable_data<float>(); float* temp_v_data = temp_v.mutable_data<float>(); if (order_ == StorageOrder::NCHW) { // Computes dL/ds and dL/db. // dL/ds = Sum(dL/dY * gamma * X) // dL/db = Sum(dL/dY * gamma) // Attention: dL/ds, dL/db has timed the gamma to accelerate the computation. hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<float, StorageOrder::NCHW>) , dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, HxW, dY_data, X_data, gamma_data, ds_data, db_data); hipLaunchKernelGGL(( ComputeTempVariable<float>) , dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, HxW, weighted_mu_data, weighted_rsig_data, weight_data, ds_data, db_data, temp_u_data, temp_v_data); // Computes dL/dX. hipLaunchKernelGGL(( WeightChannelNormBackwardCUDAKernel<float, StorageOrder::NCHW>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), size, C, HxW, dY_data, X_data, weighted_rsig_data, gamma_data, temp_u_data, temp_v_data, dX_data); // Computes dL/dgamma and dL/dbeta. hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel<float, StorageOrder::NCHW>) , dim3(::min(C, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, HxW, dY_data, X_data, weighted_mu_data, weighted_rsig_data, dgamma_data, dbeta_data); // Computes dL/dweight hipLaunchKernelGGL(( WeightBackwardCUDAKernel<float, StorageOrder::NCHW>) , dim3(::min(C * C, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, mu_data, var_data, weighted_mu_data, weighted_rsig_data, gamma_data, ds_data, db_data, dweight_data); } else { // Computes dL/ds and dL/db. // dL/ds = Sum(dL/dY * gamma * X) // dL/db = Sum(dL/dY * gamma) hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<float, StorageOrder::NHWC>) , dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, HxW, dY_data, X_data, gamma_data, ds_data, db_data); hipLaunchKernelGGL(( ComputeTempVariable<float>) , dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, HxW, weighted_mu_data, weighted_rsig_data, weight_data, ds_data, db_data, temp_u_data, temp_v_data); // Computes dL/dX. hipLaunchKernelGGL(( WeightChannelNormBackwardCUDAKernel<float, StorageOrder::NHWC>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), size, C, HxW, dY_data, X_data, weighted_rsig_data, gamma_data, temp_u_data, temp_v_data, dX_data); // Computes dL/dgamma and dL/dbeta. hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel<float, StorageOrder::NHWC>) , dim3(::min(C, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, HxW, dY_data, X_data, weighted_mu_data, weighted_rsig_data, dgamma_data, dbeta_data); hipLaunchKernelGGL(( WeightBackwardCUDAKernel<float, StorageOrder::NHWC>) , dim3(::min(C * C, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, mu_data, var_data, weighted_mu_data, weighted_rsig_data, gamma_data, ds_data, db_data, dweight_data); } return true; } REGISTER_CUDA_OPERATOR(WeightChannelNorm, WeightChannelNormOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( WeightChannelNormGradient, WeightChannelNormGradientOp<float, CUDAContext>); } // namespace caffe2
3330161ab8620e4cb435119deeaa6ed9de161d02.cu
// ------------------------------------------------------------------ // WeightChannelNorm op in Caffe2 for GPU // Written by Kai Hu // This is a stand-alone op: Y = gamma * (X - weighted_mu) * weighted_rsig + beta // where // weighted_mu_ni = sum(mu_nc * weight_ci, c) // weighted_sig_ni = sum((sig_nc^2 + mu_nc^2) * weight_ci, c) - (sum(mu_nc * weight_ci, c))^2 // ------------------------------------------------------------------ #include "weight_channel_norm_op.h" #include <array> #include <cub/block/block_reduce.cuh> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" #include "caffe2/utils/math_utils.h" namespace caffe2 { namespace { template <typename T> using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; __global__ void InvStdCUDAKernel( const int size, const float epsilon, const float* var, float* rsig) { CUDA_1D_KERNEL_LOOP(i, size) { #if __CUDA_ARCH__ >= 350 rsig[i] = rsqrtf(__ldg(var + i) + epsilon); #else rsig[i] = rsqrtf(var[i] + epsilon); #endif } } template <typename T, StorageOrder kOrder> __global__ void WeightChannelNormForwardCUDAKernel( const int size, const int C, const int HxW, const T* X, const T* weighted_mu, const T* weighted_rsig, const T* gamma, const T* beta, T* Y) { CUDA_1D_KERNEL_LOOP(i, size) { const int i_mu = kOrder == StorageOrder::NCHW ? i / HxW : i / (C * HxW) * C + (i % C); const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C; #if __CUDA_ARCH__ >= 350 Y[i] = __ldg(gamma + i_gamma) * (__ldg(X + i) - __ldg(weighted_mu + i_mu)) * __ldg(weighted_rsig + i_mu) + __ldg(beta + i_gamma); #else Y[i] = gamma[i_gamma] * (X[i] - weighted_mu[i_mu]) * weighted_rsig[i_mu] + beta[i_gamma]; #endif } } // add by Kai Hu, add function to compute the weighted mean and variance. template <typename T> __global__ void WeightChannelMeanCUDAKernel( const int N, const int C, const T* mu, const T* weight, T* weighted_mu) { const int outer_size = N * C; const int inner_size = C; __shared__ typename BlockReduce<T>::TempStorage wmu_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T wmu_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int i_nc = i; const int i_wc = j * C + i % C; const int i_nw = (i / C) * C + j; #if __CUDA_ARCH__ >= 350 wmu_val += __ldg(mu + i_nw) * __ldg(weight + i_wc); #else wmu_val += mu[i_nw] * weight[i_wc]; #endif } wmu_val = BlockReduce<T>(wmu_storage).Reduce(wmu_val, cub::Sum()); if (threadIdx.x == 0) { weighted_mu[i] = wmu_val; } __syncthreads(); } } template <typename T> __global__ void WeightChannelVarianceCUDAKernel( const int N, const int C, const T* mu, const T* var, const T* weight, T* weighted_mu, T* weighted_var) { const int outer_size = N * C; const int inner_size = C; __shared__ typename BlockReduce<T>::TempStorage wvar_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T wvar_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int i_nc = i; const int i_wc = j * C + i % C; const int i_nw = (i / C) * C + j; #if __CUDA_ARCH__ >= 350 wvar_val += __ldg(weight + i_wc) * (__ldg(var + i_nw) + __ldg(mu + i_nw) * __ldg(mu + i_nw) - __ldg(weighted_mu + i_nc) * __ldg(weighted_mu + i_nc)); #else wvar_val += weight[i_wc] * (var[i_nw] + mu[i_nw] * mu[i_nw] - weighted_mu[i_nc] * weighted_mu[i_nc]); #endif } wvar_val = BlockReduce<T>(wvar_storage).Reduce(wvar_val, cub::Sum()); if (threadIdx.x == 0) { weighted_var[i] = wvar_val; } __syncthreads(); } } template <typename T, StorageOrder kOrder> __global__ void ComputeInternalGradientsCUDAKernel( const int N, const int C, const int HxW, const T* dY, const T* X, const T* gamma, T* ds, T* db) { const int outer_size = N * C; const int inner_size = HxW; __shared__ typename BlockReduce<T>::TempStorage ds_storage; __shared__ typename BlockReduce<T>::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T ds_val = 0; T db_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int i_gamma = i % C; const int index = kOrder == StorageOrder::NCHW ? i * inner_size + j : ((i / C) * HxW + j) * C + i % C; #if __CUDA_ARCH__ >= 350 ds_val += __ldg(gamma + i_gamma) * __ldg(dY + index) * __ldg(X + index); db_val += __ldg(gamma + i_gamma) * __ldg(dY + index); #else ds_val += gamma[i_gamma] * dY[index] * X[index]; db_val += gamma[i_gamma] * dY[index]; #endif } ds_val = BlockReduce<T>(ds_storage).Reduce(ds_val, cub::Sum()); db_val = BlockReduce<T>(db_storage).Reduce(db_val, cub::Sum()); if (threadIdx.x == 0) { ds[i] = ds_val; db[i] = db_val; } __syncthreads(); } } template <typename T> __global__ void ComputeTempVariable( const int N, const int C, const int HxW, const T* weighted_mu, const T* weighted_rsig, const T* weight, const T* ds, const T* db, T* temp_u, T* temp_v) { const T denom = T(1) / static_cast<T>(HxW); const int outer_size = N * C; const int inner_size = C; __shared__ typename BlockReduce<T>::TempStorage u_storage; __shared__ typename BlockReduce<T>::TempStorage v_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T u_val = 0; T v_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int i_nj = (i / C) * C + j; const int i_cj = (i % C) * C + j; #if __CUDA_ARCH__ >= 350 u_val += (__ldg(db + i_nj) * __ldg(weighted_mu + i_nj) - __ldg(ds + i_nj)) * __ldg(weight + i_cj) * math::utils::Cube<T>(__ldg(weighted_rsig + i_nj)); v_val += (__ldg(db + i_nj) * __ldg(weighted_mu + i_nj) - __ldg(ds + i_nj)) * __ldg(weight + i_cj) * math::utils::Cube<T>(__ldg(weighted_rsig + i_nj)) * __ldg(weighted_mu + i_nj); v_val += __ldg(db + i_nj) * __ldg(weighted_rsig + i_nj) * __ldg(weight + i_cj); #else u_val += (db[i_nj] * weighted_mu[i_nj] - ds[i_nj]) * weight[i_cj] * math::utils::Cube<T>(weighted_rsig[i_nj]); v_val += (db[i_nj] * weighted_mu[i_nj] - ds[i_nj]) * weight[i_cj] * math::utils::Cube<T>(weighted_rsig[i_nj]) * weighted_mu[i_nj]; v_val += db[i_nj] * weighted_rsig[i_nj] * weight[i_cj]; #endif } u_val = BlockReduce<T>(u_storage).Reduce(u_val, cub::Sum()); v_val = BlockReduce<T>(v_storage).Reduce(v_val, cub::Sum()); if (threadIdx.x == 0) { temp_u[i] = u_val * denom; temp_v[i] = v_val * denom; } } } // Math: // Y = gamma * (X - weighted_mu) * weighted_rsig + beta // let s = gamma * weighted_rsig // let b = beta - gamma * weighted_mu * weighted_rsig // Y = s * X + b // let n = HxW // dL/dX = dL/dY * dY/dX = dL/dY * (s + X * ds/dX + db/dX) // ds/dX = gamma * dweighted_rsig/dX // db/dX = -gamma * weighted_mu * dweighted_rsig/dX - gamma * weighted_rsig * dweighted_mu/dX // Attention: dL/ds, dL/db has timed the gamma, so wo don't time it there in the code. // dweighted_rsig/dX = -0.5 * weighted_rsig^3 * dweighted_var/dX // dweighted_var/dX = dweighted_var/dvar * dvar/dX + dweighted_var/dmu * dmu/dX // dweighted_mu/dX = weight * dmu/dX // dsig/dX = 2 * (X - mu) / n * (1 - 1/n) attention: GN has ignored the (1 - 1/n) reasonable, we also ignore it. // dmu/dX = 1 / n // dL/dgamma = dL/dY * (dY/ds * ds/dgamma + dY/db * db/dgamma) // = dL/dY * (X - weighted_mu) * weighted_rsig // dL/dbeta = dL/dY // dL/dweight = dL/dY * (dY/ds * ds/dweighted_sig * dweighted_sig/dweight + dY/db * db/dweighted_mu * dweighted_mu/dweight + dY/db * db/dsig_ * dsig_/dweight) // = dL/dY * (-0.5 * X * rsig_^3 * dsig_/dweight - gamma * rsig_ * dmu_/dweight + gamma * mu_ * rsig_^3 * drsig_/dweight) // TODO:Kai Hu This function can be optimized // template <typename T, StorageOrder kOrder> // __global__ void WeightChannelNormBackwardCUDAKernel( // const int size, // const int C, // const int HxW, // const T* dY, // const T* X, // const T* mu, // const T* var, // const T* weighted_mu, // const T* weighted_rsig, // const T* gamma, // const T* weight, // const T* ds, // const T* db, // T* dX) { // const T denom = T(1) / static_cast<T>(HxW); // const int outer_size = size; // const int inner_size = C; // __shared__ typename BlockReduce<T>::TempStorage u_storage; // __shared__ typename BlockReduce<T>::TempStorage v_storage; // for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { // T u_val = 0; // T v_val = 0; // for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { // const int i_mu = kOrder == StorageOrder::NCHW // ? i / HxW // : (i / (C * HxW)) * C + (i % C); // (n,c) // const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C; // (c) // const int i_nj = i_mu / C * C + j; // (n,c') // const int i_cj = i_gamma * C + j; // (c,c') // #if __CUDA_ARCH__ >= 350 // u_val += (__ldg(db + i_nj) * __ldg(weighted_mu + i_nj) - __ldg(ds + i_nj)) * // __ldg(weight + i_cj) * (__ldg(X + i) - __ldg(weighted_mu + i_nj)) * // math::utils::Cube<T>(__ldg(weighted_rsig + i_nj)); // v_val += __ldg(db + i_nj) * __ldg(weighted_rsig + i_nj) * __ldg(weight + i_cj); // #else // u_val += (db[i_nj] * weighted_mu[i_nj] - ds[i_nj]) * weight[i_cj] * (X[i] - weighted_mu[i_nj]) * math::utils::Cube<T>(weighted_rsig[i_nj]); // v_val += db[i_nj] * weighted_rsig[i_nj] * weight[i_cj]; // #endif // } // u_val = BlockReduce<T>(u_storage).Reduce(u_val, cub::Sum()); // v_val = BlockReduce<T>(v_storage).Reduce(v_val, cub::Sum()); // if (threadIdx.x == 0) { // const int i_mu = kOrder == StorageOrder::NCHW // ? i / HxW // : (i / (C * HxW)) * C + (i % C); // (n,c) // const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C; // (c) // dX[i] = gamma[i_gamma] * dY[i] * weighted_rsig[i_mu] + (u_val - v_val) * denom; // } // __syncthreads(); // } // } template <typename T, StorageOrder kOrder> __global__ void WeightChannelNormBackwardCUDAKernel( const int size, const int C, const int HxW, const T* dY, const T* X, const T* weighted_rsig, const T* gamma, const T* temp_u, const T* temp_v, T* dX) { CUDA_1D_KERNEL_LOOP(i, size) { const int i_mu = kOrder == StorageOrder::NCHW ? i / HxW : (i / (C * HxW)) * C + (i % C); // (n,c) const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C; // (c) dX[i] = X[i] * temp_u[i_mu] - temp_v[i_mu] + gamma[i_gamma] * dY[i] * weighted_rsig[i_mu]; } } template <typename T, StorageOrder kOrder> __global__ void GammaBetaBackwardCUDAKernel( const int N, const int C, const int HxW, const T* dY, const T* X, const T* weighted_mu, const T* weighted_rsig, T* dgamma, T* dbeta) { const int outer_size = C; const int inner_size = N * HxW; __shared__ typename BlockReduce<T>::TempStorage dg_storage; __shared__ typename BlockReduce<T>::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T dg_val = 0; T db_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int n = j / HxW; const int index = kOrder == StorageOrder::NCHW ? (n * outer_size + i) * HxW + j % HxW : j * outer_size + i; const int i_mu = n * outer_size + i; #if __CUDA_ARCH__ >= 350 dg_val += __ldg(dY + index) * (__ldg(X + index) - __ldg(weighted_mu + i_mu)) * __ldg(weighted_rsig + i_mu); db_val += __ldg(dY + index); #else dg_val += dY[index] * (X[index] - weighted_mu[i_mu]) * weighted_rsig[i_mu]; db_val += dY[index]; #endif } dg_val = BlockReduce<T>(dg_storage).Reduce(dg_val, cub::Sum()); db_val = BlockReduce<T>(db_storage).Reduce(db_val, cub::Sum()); if (threadIdx.x == 0) { dgamma[i] = dg_val; dbeta[i] = db_val; } __syncthreads(); } } template <typename T, StorageOrder kOrder> __global__ void WeightBackwardCUDAKernel( const int N, const int C, const T* mu, const T* var, const T* weighted_mu, const T* weighted_rsig, const T* gamma, const T* ds, const T* db, T* dweight) { const int outer_size = C * C; const int inner_size = N; __shared__ typename BlockReduce<T>::TempStorage dw_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T dw_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { // const int n = j; // const int k = i % C; // const int c = i / C; const int i_nc = j * C + i / C; const int i_c = i / C; const int i_nk = j * C + i % C; #if __CUDA_ARCH__ >= 350 const T u = 0.5 * (__ldg(db + i_nk) * __ldg(weighted_mu + i_nk) - __ldg(ds + i_nk)) * math::utils::Cube<T>(__ldg(weighted_rsig + i_nk)) * (__ldg(var + i_nc) + __ldg(mu + i_nc) * __ldg(mu + i_nc) - 2 * __ldg(weighted_mu + i_nk) * __ldg(mu + i_nc)); const T v = __ldg(db + i_nk) * __ldg(weighted_rsig + i_nk) * __ldg(mu + i_nc); dw_val += u - v; #else const T u = 0.5 * (db[i_nk] * weighted_mu[i_nk] - ds[i_nk]) * math::utils::Cube<T>(weighted_rsig[i_nk]) * (var[i_nc] + mu[i_nc] * mu[i_nc] - 2 * weighted_mu[i_nk] * mu[i_nc]); const T v = db[i_nk] * weighted_rsig[i_nk] * mu[i_nc]; dw_val += u - v; #endif } dw_val = BlockReduce<T>(dw_storage).Reduce(dw_val, cub::Sum()); if (threadIdx.x == 0) { dweight[i] = dw_val; } __syncthreads(); } } } // namespace template <> bool WeightChannelNormOp<float, CUDAContext>::RunOnDeviceImpl( const int N, const int C, const int HxW, const float* X_data, const float* gamma_data, const float* beta_data, const float* weight_data, float* Y_data, float* mu_data, float* var_data, float* weighted_mu_data, float* weighted_rsig_data) { const std::array<int, 3> dims = order_ == StorageOrder::NCHW ? std::array<int, 3>{N, C, HxW} : std::array<int, 3>{N, HxW, C}; const std::array<int, 1> axes = order_ == StorageOrder::NCHW ? std::array<int, 1>{2} : std::array<int, 1>{1}; // Computes mean and variance. math::Moments<float, CUDAContext>( 3, dims.data(), 1, axes.data(), X_data, mu_data, var_data, &context_); // Computes weighted mean and variance. WeightChannelMeanCUDAKernel<float> <<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, mu_data, weight_data, weighted_mu_data); WeightChannelVarianceCUDAKernel<float> <<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, mu_data, var_data, weight_data, weighted_mu_data, weighted_rsig_data); // Uses rsqrt to computes 1 / std which is much faster than computes std. InvStdCUDAKernel<<< CAFFE_GET_BLOCKS(N * C), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N * C, epsilon_, weighted_rsig_data, weighted_rsig_data); // Computes Y = gamma * (X - weighted_mu) * weighted_rsig + beta. const int size = N * C * HxW; if (order_ == StorageOrder::NCHW) { WeightChannelNormForwardCUDAKernel<float, StorageOrder::NCHW> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( size, C, HxW, X_data, weighted_mu_data, weighted_rsig_data, gamma_data, beta_data, Y_data); } else { WeightChannelNormForwardCUDAKernel<float, StorageOrder::NHWC> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( size, C, HxW, X_data, weighted_mu_data, weighted_rsig_data, gamma_data, beta_data, Y_data); } return true; } // Math: // let: s = gamma * rsig // let: b = beta - mu * gamma * rsig // then: Y = s * X + b template <> bool WeightChannelNormGradientOp<float, CUDAContext>::RunOnDeviceImpl( const int N, const int C, const int HxW, const float* dY_data, const float* X_data, const float* mu_data, const float* var_data, const float* weighted_mu_data, const float* weighted_rsig_data, const float* gamma_data, const float* weight_data, float* dX_data, float* dgamma_data, float* dbeta_data, float* dweight_data) { const int size = N * C * HxW; ds_.Resize(N, C); db_.Resize(N, C); temp_u.Resize(N, C); temp_v.Resize(N, C); float* ds_data = ds_.mutable_data<float>(); float* db_data = db_.mutable_data<float>(); float* temp_u_data = temp_u.mutable_data<float>(); float* temp_v_data = temp_v.mutable_data<float>(); if (order_ == StorageOrder::NCHW) { // Computes dL/ds and dL/db. // dL/ds = Sum(dL/dY * gamma * X) // dL/db = Sum(dL/dY * gamma) // Attention: dL/ds, dL/db has timed the gamma to accelerate the computation. ComputeInternalGradientsCUDAKernel<float, StorageOrder::NCHW> <<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, HxW, dY_data, X_data, gamma_data, ds_data, db_data); ComputeTempVariable<float> <<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, HxW, weighted_mu_data, weighted_rsig_data, weight_data, ds_data, db_data, temp_u_data, temp_v_data); // Computes dL/dX. WeightChannelNormBackwardCUDAKernel<float, StorageOrder::NCHW> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( size, C, HxW, dY_data, X_data, weighted_rsig_data, gamma_data, temp_u_data, temp_v_data, dX_data); // Computes dL/dgamma and dL/dbeta. GammaBetaBackwardCUDAKernel<float, StorageOrder::NCHW> <<<std::min(C, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, HxW, dY_data, X_data, weighted_mu_data, weighted_rsig_data, dgamma_data, dbeta_data); // Computes dL/dweight WeightBackwardCUDAKernel<float, StorageOrder::NCHW> <<<std::min(C * C, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, mu_data, var_data, weighted_mu_data, weighted_rsig_data, gamma_data, ds_data, db_data, dweight_data); } else { // Computes dL/ds and dL/db. // dL/ds = Sum(dL/dY * gamma * X) // dL/db = Sum(dL/dY * gamma) ComputeInternalGradientsCUDAKernel<float, StorageOrder::NHWC> <<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, HxW, dY_data, X_data, gamma_data, ds_data, db_data); ComputeTempVariable<float> <<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, HxW, weighted_mu_data, weighted_rsig_data, weight_data, ds_data, db_data, temp_u_data, temp_v_data); // Computes dL/dX. WeightChannelNormBackwardCUDAKernel<float, StorageOrder::NHWC> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( size, C, HxW, dY_data, X_data, weighted_rsig_data, gamma_data, temp_u_data, temp_v_data, dX_data); // Computes dL/dgamma and dL/dbeta. GammaBetaBackwardCUDAKernel<float, StorageOrder::NHWC> <<<std::min(C, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, HxW, dY_data, X_data, weighted_mu_data, weighted_rsig_data, dgamma_data, dbeta_data); WeightBackwardCUDAKernel<float, StorageOrder::NHWC> <<<std::min(C * C, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, mu_data, var_data, weighted_mu_data, weighted_rsig_data, gamma_data, ds_data, db_data, dweight_data); } return true; } REGISTER_CUDA_OPERATOR(WeightChannelNorm, WeightChannelNormOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( WeightChannelNormGradient, WeightChannelNormGradientOp<float, CUDAContext>); } // namespace caffe2
1e48b22273e8007c8f0d9581cf590af0a5a8ff9f.hip
// !!! This is a file automatically generated by hipify!!! #include <Rcpp.h> #include <hip/hip_runtime.h> using namespace Rcpp; //indent = 4 spaces //CUDA function __global__ void degree0(float *B, float *knots, float *x, int i) { //current thread int me = blockIdx.x * blockDim.x + threadIdx.x; B[me] = ((x[me] >= knots[i]) && (x[me] < knots[i+1])) ? 1 : 0; } NumericVector c_basis(NumericVector x, int degree, int i, NumericVector knots, int lenx){ NumericVector B(lenx); // output variable NumericVector alpha1(lenx), alpha2(lenx); //NumericVector B, alpha1, alpha2; if(degree==0) { //blocks and threads for GPU int nthreads = min(lenx, 500); int nblocks = ceil(lenx/nthreads); //set up grid/block dimensions dim3 dimGrid(nblocks,1); dim3 dimBlock(nthreads,1,1); float *dB, *dknots, *dx; //copy to the GPU hipMemcpy(dB,B,lenx,hipMemcpyHostToDevice); hipMemcpy(dknots, knots, knots.size(), hipMemcpyHostToDevice); hipMemcpy(dx, x, lenx, hipMemcpyHostToDevice); hipLaunchKernelGGL(( degree0), dim3(dimGrid),dim3(dimBlock), 0, 0, dB,dknots,dx,i); //copy back to B hipMemcpy(B,dB,lenx,hipMemcpyDeviceToHost); B = wrap(ifelse((x >= knots[i]) & (x < knots[i+1]), 1, 0)); } //end if else { if((knots[degree+i] - knots[i]) == 0) { alpha1 = rep(0,lenx); } else { alpha1 = wrap((x - knots[i])/(knots[degree+i] - knots[i])); } if((knots[i+degree+1] - knots[i+1]) == 0) { alpha2 = rep(0,lenx); }//end if else { alpha2 = wrap((knots[i+degree+1] - x)/(knots[i+degree+1] - knots[i+1])); }//end else B = (alpha1 * c_basis(x, (degree - 1), i, knots, lenx)) + (alpha2 * c_basis(x, (degree - 1), (i + 1), knots, lenx)); }//end else return B; } //end basis function //change name from matrix to something more meaningful RcppExport SEXP c_formMatrix(SEXP x_, SEXP degree_, SEXP knots_, SEXP k_, SEXP lenx_){ //convert data types from R to C++ int degree = as<int>(degree_), k = as<int>(k_), lenx = as<int>(lenx_); //SEXP to NumericVector: http://dirk.eddelbuettel.com/code/rcpp/Rcpp-quickref.pdf NumericVector x(x_); NumericVector knots(knots_); //output variable allocation: NumericMatrix out(lenx,k); //blocks and threads for GPU int nthreads = min(lenx, 500); int nblocks = ceil(lenx/nthreads); //set up grid/block dimensions dim3 dimGrid(nblocks,1); dim3 dimBlock(nthreads,1,1); float *dB, *dknots, *dx; //make space on GPU hipMalloc((void **) &dB,lenx*sizeof(float)); hipMalloc((void **) &dknots, knots.size()*sizeof(float)); hipMalloc((void **) &dx, lenx*sizeof(float)); for(int j = 0; j < k; j++) { //R equivalent: for(j in 1:k){ //Reference the jth column; changes propagate to matrix NumericMatrix::Column jvector = out(_,j); jvector = c_basis(x, degree, j, knots, lenx); }//end for(j) hipFree(dB); hipFree(dknots); hipFree(dx); return out; } //end matrix function
1e48b22273e8007c8f0d9581cf590af0a5a8ff9f.cu
#include <Rcpp.h> #include <cuda.h> using namespace Rcpp; //indent = 4 spaces //CUDA function __global__ void degree0(float *B, float *knots, float *x, int i) { //current thread int me = blockIdx.x * blockDim.x + threadIdx.x; B[me] = ((x[me] >= knots[i]) && (x[me] < knots[i+1])) ? 1 : 0; } NumericVector c_basis(NumericVector x, int degree, int i, NumericVector knots, int lenx){ NumericVector B(lenx); // output variable NumericVector alpha1(lenx), alpha2(lenx); //NumericVector B, alpha1, alpha2; if(degree==0) { //blocks and threads for GPU int nthreads = min(lenx, 500); int nblocks = ceil(lenx/nthreads); //set up grid/block dimensions dim3 dimGrid(nblocks,1); dim3 dimBlock(nthreads,1,1); float *dB, *dknots, *dx; //copy to the GPU cudaMemcpy(dB,B,lenx,cudaMemcpyHostToDevice); cudaMemcpy(dknots, knots, knots.size(), cudaMemcpyHostToDevice); cudaMemcpy(dx, x, lenx, cudaMemcpyHostToDevice); degree0<<<dimGrid,dimBlock>>>(dB,dknots,dx,i); //copy back to B cudaMemcpy(B,dB,lenx,cudaMemcpyDeviceToHost); B = wrap(ifelse((x >= knots[i]) & (x < knots[i+1]), 1, 0)); } //end if else { if((knots[degree+i] - knots[i]) == 0) { alpha1 = rep(0,lenx); } else { alpha1 = wrap((x - knots[i])/(knots[degree+i] - knots[i])); } if((knots[i+degree+1] - knots[i+1]) == 0) { alpha2 = rep(0,lenx); }//end if else { alpha2 = wrap((knots[i+degree+1] - x)/(knots[i+degree+1] - knots[i+1])); }//end else B = (alpha1 * c_basis(x, (degree - 1), i, knots, lenx)) + (alpha2 * c_basis(x, (degree - 1), (i + 1), knots, lenx)); }//end else return B; } //end basis function //change name from matrix to something more meaningful RcppExport SEXP c_formMatrix(SEXP x_, SEXP degree_, SEXP knots_, SEXP k_, SEXP lenx_){ //convert data types from R to C++ int degree = as<int>(degree_), k = as<int>(k_), lenx = as<int>(lenx_); //SEXP to NumericVector: http://dirk.eddelbuettel.com/code/rcpp/Rcpp-quickref.pdf NumericVector x(x_); NumericVector knots(knots_); //output variable allocation: NumericMatrix out(lenx,k); //blocks and threads for GPU int nthreads = min(lenx, 500); int nblocks = ceil(lenx/nthreads); //set up grid/block dimensions dim3 dimGrid(nblocks,1); dim3 dimBlock(nthreads,1,1); float *dB, *dknots, *dx; //make space on GPU cudaMalloc((void **) &dB,lenx*sizeof(float)); cudaMalloc((void **) &dknots, knots.size()*sizeof(float)); cudaMalloc((void **) &dx, lenx*sizeof(float)); for(int j = 0; j < k; j++) { //R equivalent: for(j in 1:k){ //Reference the jth column; changes propagate to matrix NumericMatrix::Column jvector = out(_,j); jvector = c_basis(x, degree, j, knots, lenx); }//end for(j) cudaFree(dB); cudaFree(dknots); cudaFree(dx); return out; } //end matrix function
490481ffb75f90e9ddfd149dd99c547423e00c9d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * JCudaVec - Vector operations for JCuda * http://www.jcuda.org * * Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org */ extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the floating point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in floating-point. extern "C" // Round to nearest integer value in floating-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two floating point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the floating-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision floating-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision floating-point remainder. extern "C" __global__ void vec_scalarAddf (size_t n, float *result, float x, float *y) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < n) { result[id] = x + y[id]; } }
490481ffb75f90e9ddfd149dd99c547423e00c9d.cu
#include "includes.h" /* * JCudaVec - Vector operations for JCuda * http://www.jcuda.org * * Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org */ extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument × p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the floating point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in floating-point. extern "C" // Round to nearest integer value in floating-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument × p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two floating point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the floating-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision floating-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision floating-point remainder. extern "C" __global__ void vec_scalarAddf (size_t n, float *result, float x, float *y) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < n) { result[id] = x + y[id]; } }
693be61c437d184d0bc09dc463bae522c9d615cc.hip
// !!! This is a file automatically generated by hipify!!! #define ThisSoftwareVersion "200722" #define CodeName "in the middle of nowhere" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <stdio.h> #include <string> #include <fstream> #include <iomanip> #include <sstream> #include <cstring> #include <cmath> #include <algorithm> #include <ctime> #include <hip/hip_runtime.h> #include <vector> #include <csignal> #include <time.h> #ifdef _WIN32 #include "windows.h" #endif using namespace std; using std::cout; int state; //__device__ double *C, *C0, *ux, *uy, *vx, *vy, *p, *p0, *mu; //__device__ multi_cross *Md; __constant__ double hx, hy, tau, Lx, Ly, tau_p; __constant__ double A, Ca, Gr, Pe, Re, Gs, MM, dP; __constant__ double Amp, Omega, vibr_X, vibr_Y, VV; __constant__ double alpha, sinA, cosA, theta, cosTh, sinTh; __constant__ unsigned int nx, ny, n, offset, border_type; __constant__ double eps0_d = 1e-5; __constant__ double pi = 3.1415926535897932384626433832795; __constant__ int Mx, My, Msize, Moffset, OFFSET; __constant__ unsigned int iter; __constant__ unsigned int TOTAL_SIZE; __constant__ unsigned int nxg, nyg; __constant__ unsigned int PHASE; __constant__ unsigned int PHI_border_left, PHI_border_right, W_BORDER; __constant__ double PHI_value_left, PHI_value_right; __device__ int* n1, *n2, *n3, *n4, *t, *J_back; __device__ double Phi_reference = 0.0; __global__ void hello() { printf("\n thread x:%i y:%i, information copied from device:\n", threadIdx.x, threadIdx.y); printf("A= %f Ca=%f \n", A, Ca); printf("Gr= %f Pe=%f \n", Gr, Pe); printf("Re= %f M=%f \n", Re, MM); printf("hx= %f hy=%f \n", hx, hy); printf("tau= %20.16f \n", tau); printf("tau_p= %20.16f \n", tau_p); printf("nx= %i ny=%i \n", nx, ny); printf("Lx= %f Ly=%f \n", Lx, Ly); printf("offset= %i \n", offset); printf("sinA= %f cosA=%f \n", sinA, cosA); printf("sinTh= %f cosTh=%f \n", sinTh, cosTh); printf("Total number of nodes = %i \n", TOTAL_SIZE); printf("P inject factor = %f \n", dP); printf("Amp= %f Omega=%f V=%f \n", Amp, Omega, VV); printf("vibr_X= %f vibr_Y=%f \n", vibr_X, vibr_Y); printf("Vibro border: W = %i, Phi_L = %i, Phi_R = %i, VALUE_L = %f, VALUE_R = %f \n", W_BORDER, PHI_border_left, PHI_border_right, PHI_value_left, PHI_value_right); if (PHASE == 1) printf("Phase field \n"); if (PHASE == 0) printf("Single phase flow \n"); printf("\n"); } #define Pi 3.1415926535897932384626433832795 #define pause system("pause"); #define timer timer2 = clock()/ CLOCKS_PER_SEC; cout << "time (seconds)= " << (timer2 - timer1) << endl; #define cudaCheckError() { \ hipError_t e = hipGetLastError(); \ if (e != hipSuccess) {\ printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(e)); \ exit(0); \ } \ } #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, hipGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) #define VarName(Variable) (#Variable) #define PrintVar(Variable) cout << (#Variable) << " = " << Variable << endl; #define defConst(F, type) type F##_h; File.reading<type>(F##_h, #F, 0.0); hipMemcpyToSymbol(F, &F ## _h, sizeof(type), 0, hipMemcpyHostToDevice); //getting Ek and Vmax void velocity(unsigned int N, double hx, double hy, double *vx, double *vy, double &Ek, double &Vmax) { double V = 0; Ek = 0.0; Vmax = 0.0; for (unsigned int C = 0; C < N; C++) { V = +vx[C] * vx[C] + vy[C] * vy[C]; Ek += V; if (sqrt(V) > Vmax) Vmax = sqrt(V); } Ek = Ek / 2.0 * hx * hy; } double maxval(double* f, unsigned int n) { double max = abs(f[0]); for (unsigned int i = 0; i < n; i++) { if (abs(f[i])>max) { max = abs(f[i]); } } return max; } double MINval(double* f, unsigned int n) { double min = abs(f[0]); for (unsigned int i = 0; i < n; i++) { if (f[i]<min) { min = f[i]; } } return min; } double MAXval(double* f, unsigned int n) { double max = (f[0]); for (unsigned int i = 0; i < n; i++) { if ((f[i])>max) { max = (f[i]); } } return max; } double sum(double* f, unsigned int n) { double sum = 0; for (unsigned int i = 0; i < n; i++) { sum += f[i]; } return sum; } //volumetric flow rate void VFR(double *vx, int *t, unsigned int size, double hy, double &Q_in, double &Q_out, double *C, double &C_average, double &Cv) { Q_in = 0; Q_out = 0; C_average = 0; Cv = 0; for (unsigned int i = 0; i < size; i++) { if (t[i] == 9) { Q_in += vx[i]; } if (t[i] == 10) { Q_out += vx[i]; C_average += C[i]; Cv += vx[i] * C[i]; } } Q_in = Q_in*hy; Q_out = Q_out*hy; C_average = C_average*hy / (1.0 - hy); Cv = Cv*hy; } void C_statistics(unsigned int size, double hx, double hy, int *t, double *C, double &C_av, double &C_plus, double &C_minus) { C_av = 0; C_plus = 0; C_minus = 0; unsigned int n = 0, n2 = 0, n_plus = 0, n2_plus = 0, n_minus = 0, n2_minus = 0; for (unsigned int l = 0; l < size; l++) { if (t[l] == 0) { C_av += C[l]; n++; if (C[l] > 0) { C_plus += C[l]; n_plus++; } if (C[l] < 0) { C_minus += C[l]; n_minus++; } } else { C_av += C[l] / 2; n2++; if (C[l] > 0) { C_plus += C[l] / 2; n2_plus++; } if (C[l] < 0) { C_minus += C[l] / 2; n2_minus++; } } } if (n + n2 > 0) C_av /= (n + 0.5*n2); if (n_plus + n2_plus > 0) C_plus /= (n_plus + 0.5*n2_plus); if (n_minus + n2_minus > 0) C_minus /= (n_minus + 0.5*n2_minus); } void reading_parameters(unsigned int &ny_h, unsigned int &nx_h, double &each_t, unsigned int &each, unsigned int &Matrix_X, unsigned int &Matrix_Y, double &tau_h, double &A_h, double &Ca_h, double &Gr_h, double &Pe_h, double &Re_h, double &alpha_h, double &MM_h, double &tecplot, unsigned int &PHASE_h) { ifstream read; string str, substr; stringstream ss; read.open("inp.dat"); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; ny_h = atoi(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; nx_h = atoi(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; each_t = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; each = atoi(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; Matrix_X = atoi(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; Matrix_Y = atoi(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; tau_h = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; A_h = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; Ca_h = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; Gr_h = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; Pe_h = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; Re_h = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; alpha_h = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; MM_h = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; tecplot = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; PHASE_h = atoi(substr.c_str()); read.close(); } struct ReadingFile { private: ifstream read; string str, substr, buffer; string file_name; stringstream ss; istringstream iss; ostringstream oss; int stat, pos; public: ReadingFile(string name) { file_name = name; open_file(file_name); stat = 0; } ReadingFile() { stat = 0; } void open_file(string file_name) { read.open(file_name.c_str()); if (read.good()) { cout << endl << "the parameter file \"" << file_name << "\" has been read " << endl << endl; oss << read.rdbuf(); buffer = oss.str(); iss.str(buffer); } else { cout << "the parameter file has been not found, default parameters will be initialized " << endl; buffer = ""; iss.str(buffer); } } template <typename T> int reading(T &var, string parameter_name, T def_var, T min = 0, T max = 0) { int ret = 0; stat = 0; transform(parameter_name.begin(), parameter_name.end(), parameter_name.begin(), ::tolower); iss.clear(); iss.seekg(0); while (getline(iss, str)) { //substr.clear(); ss.str(""); ss.clear(); ss << str; ss >> substr; transform(substr.begin(), substr.end(), substr.begin(), ::tolower); if (substr == parameter_name) { ret = 1; pos = (int)ss.tellg(); while (ss >> substr) { if (substr == "=") { ss >> var; stat = 1; break; } } if (stat == 0) { ss.clear(); ss.seekg(pos); ss >> var; } break; } } if (iss.fail()) { var = def_var; } if (min != max && (min + max) != 0) { if (var > max || var < min) { cout << "Warning: \"" + parameter_name + "\" should not be within this range" << endl; var = def_var; } } return ret; //return 1 if read } void reading_string(string &var, string parameter_name, string def_var) { stat = 0; transform(parameter_name.begin(), parameter_name.end(), parameter_name.begin(), ::tolower); iss.clear(); iss.seekg(0); while (getline(iss, str)) { //substr.clear(); ss.str(""); ss.clear(); ss << str; ss >> substr; transform(substr.begin(), substr.end(), substr.begin(), ::tolower); if (substr == parameter_name) { pos = (int)ss.tellg(); while (ss >> substr) { if (substr == "=") { ss >> var; stat = 1; break; } } if (stat == 0) { ss.clear(); ss.seekg(pos); ss >> var; } break; } } if (iss.fail()) { var = def_var; } } }; __device__ double dx1(unsigned int l, double *f) { return 0.5*(f[n3[l]] - f[n1[l]]) / hx; } __device__ double dy1(unsigned int l, double *f) { return 0.5*(f[n2[l]] - f[n4[l]]) / hy; } __device__ double dx2(unsigned int l, double *f) { return (f[n3[l]] - 2.0*f[l] + f[n1[l]]) / hx / hx; } __device__ double dy2(unsigned int l, double *f) { return (f[n2[l]] - 2.0*f[l] + f[n4[l]]) / hy / hy; } __device__ double dx1_eq_0_forward(unsigned int l, double *f) { return (4.0*f[n3[l]] - f[n3[n3[l]]]) / 3.0; } __device__ double dx1_eq_0_back(unsigned int l, double *f) { return (4.0*f[n1[l]] - f[n1[n1[l]]]) / 3.0; } __device__ double dy1_eq_0_up(unsigned int l, double *f) { return (4.0*f[n2[l]] - f[n2[n2[l]]]) / 3.0; } __device__ double dy1_eq_0_down(unsigned int l, double *f) { return (4.0*f[n4[l]] - f[n4[n4[l]]]) / 3.0; } __device__ double dx1_forward(unsigned int l, double *f) { return -0.5*(3.0*f[l] - 4.0*f[n3[l]] + f[n3[n3[l]]]) / hx; } __device__ double dx1_back(unsigned int l, double *f) { return 0.5*(3.0*f[l] - 4.0*f[n1[l]] + f[n1[n1[l]]]) / hx; } __device__ double dy1_up(unsigned int l, double *f) { return -0.5*(3.0*f[l] - 4.0*f[n2[l]] + f[n2[n2[l]]]) / hy; } __device__ double dy1_down(unsigned int l, double *f) { return 0.5*(3.0*f[l] - 4.0*f[n4[l]] + f[n4[n4[l]]]) / hy; } __device__ double dx2_forward(unsigned int l, double *f) { return (2.0 * f[l] - 5.0 * f[n3[l]] + 4.0 * f[n3[n3[l]]] - f[n3[n3[n3[l]]]]) / hx / hx; } __device__ double dx2_back(unsigned int l, double *f) { return (2.0 * f[l] - 5.0 * f[n1[l]] + 4.0 * f[n1[n1[l]]] - f[n1[n1[n1[l]]]]) / hx / hx; } __device__ double dy2_up(unsigned int l, double *f) { return (2.0 * f[l] - 5.0 * f[n2[l]] + 4.0 * f[n2[n2[l]]] - f[n2[n2[n2[l]]]]) / hy / hy; } __device__ double dy2_down(unsigned int l, double *f) { return (2.0 * f[l] - 5.0 * f[n4[l]] + 4.0 * f[n4[n4[l]]] - f[n4[n4[n4[l]]]]) / hy / hy; } __device__ double dx2_eq_0_forward(unsigned int l, double* f) { return (5.0 * f[n3[l]] - 4.0 * f[n3[n3[l]]] + f[n3[n3[n3[l]]]]) * 0.5; } __device__ double dx2_eq_0_back(unsigned int l, double* f) { return (5.0 * f[n1[l]] - 4.0 * f[n1[n1[l]]] + f[n1[n1[n1[l]]]]) * 0.5; } __device__ double dy2_eq_0_up(unsigned int l, double* f) { return (5.0 * f[n2[l]] - 4.0 * f[n2[n2[l]]] + f[n2[n2[n2[l]]]]) * 0.5; } __device__ double dy2_eq_0_down(unsigned int l, double* f) { return (5.0 * f[n4[l]] - 4.0 * f[n4[n4[l]]] + f[n4[n4[n4[l]]]]) * 0.5; } __device__ double dxy1(double *f, int l, int i, int j) { int ii = (J_back[l] - (J_back[l] / OFFSET)*OFFSET); int jj = (J_back[l] / OFFSET); if (i > 0 && i < nx && j > 0 && j < ny) { return (-f[l - 1 + offset] + f[l + 1 + offset] - f[l + 1 - offset] + f[l - 1 - offset]) / hx / hy / 4.0; } else { return 0; } } __device__ double extrapolate_back(unsigned int l, double *f) { return 2.0*f[n3[l]] - f[n3[n3[l]]]; } __device__ double extrapolate_forward(unsigned int l, double *f) { return 2.0*f[n1[l]] - f[n1[n1[l]]]; } __device__ double extrapolate_down(unsigned int l, double *f) { return 2.0*f[n2[l]] - f[n2[n2[l]]]; } __device__ double extrapolate_up(unsigned int l, double *f) { return 2.0*f[n4[l]] - f[n4[n4[l]]]; } __device__ double VgradF(unsigned int l, double *f, double *vx, double *vy) { double val = 0; double VR, VL, VU, VD; double FR, FL, FU, FD; FR = FL = FU = FD = 0; VR = (vx[n3[l]] + vx[l])*0.5; VL = (vx[l] + vx[n1[l]])*0.5; if (VR > 0) FR = f[l]; else if (VR < 0) FR = f[n3[l]]; if (VL > 0) FL = f[n1[l]]; else if (VL < 0) FL = f[l]; val += (VR*FR - VL*FL) / hx; VU = (vy[n2[l]] + vy[l])*0.5; VD = (vy[l] + vy[n4[l]])*0.5; if (VU > 0) FU = f[l]; else if (VU < 0) FU = f[n2[l]]; if (VD > 0) FD = f[n4[l]]; else if (VD < 0) FD = f[l]; val += (VU*FU - VD*FD) / hy; return val; } __device__ double VgradF_forward(unsigned int l, double *f, double *vx, double *vy) { double val = 0; double VR, VL, VU, VD; double FR, FL, FU, FD; FR = FL = FU = FD = 0; VR = (vx[n3[l]] + vx[l])*0.5; VL = (vx[l] + vx[n1[l]])*0.5; if (VR > 0) FR = f[l]; else if (VR < 0) FR = f[n3[l]]; if (VL > 0) FL = f[n1[l]]; else if (VL < 0) FL = f[l]; val += (VR*FR - VL*FL) / hx; VU = (vy[n2[l]] + vy[l])*0.5; VD = (vy[l] + vy[n4[l]])*0.5; if (VU > 0) FU = f[l]; else if (VU < 0) FU = f[n2[l]]; if (VD > 0) FD = f[n4[l]]; else if (VD < 0) FD = f[l]; val += (VU*FU - VD*FD) / hy; return val; } /* #define dx1(l, f) 0.5*(f[n3[l]] - f[n1[l]]) / hx #define dy1(l, f) 0.5*(f[n2[l]] - f[n4[l]]) / hy #define dx2(l, f) (f[n3[l]] - 2.0*f[l] + f[n1[l]]) / hx / hx #define dy2(l, f) (f[n2[l]] - 2.0*f[l] + f[n4[l]]) / hy / hy #define dx1_eq_0_forward(l, f) (4.0*f[n3[l]] - f[n3[n3[l]]]) / 3.0 #define dx1_eq_0_back(l, f) (4.0*f[n1[l]] - f[n1[n1[l]]]) / 3.0 #define dy1_eq_0_up(l, f) (4.0*f[n2[l]] - f[n2[n2[l]]]) / 3.0 #define dy1_eq_0_down(l, f) (4.0*f[n4[l]] - f[n4[n4[l]]]) / 3.0 #define dx1_forward(l, f) -0.5*(3.0*f[l] - 4.0*f[n3[l]] + f[n3[n3[l]]]) / hx #define dx1_back(l, f) 0.5*(3.0*f[l] - 4.0*f[n1[l]] + f[n1[n1[l]]]) / hx #define dy1_up(l, f) -0.5*(3.0*f[l] - 4.0*f[n2[l]] + f[n2[n2[l]]]) / hy #define dy1_down(l, f) 0.5*(3.0*f[l] - 4.0*f[n4[l]] + f[n4[n4[l]]]) / hy #define dx2_forward(l, f) (2.0 * f[l] - 5.0 * f[n3[l]] + 4.0 * f[n3[n3[l]]] - f[n3[n3[n3[l]]]]) / hx / hx #define dx2_back(l, f) (2.0 * f[l] - 5.0 * f[n1[l]] + 4.0 * f[n1[n1[l]]] - f[n1[n1[n1[l]]]]) / hx / hx #define dy2_up(l, f) (2.0 * f[l] - 5.0 * f[n2[l]] + 4.0 * f[n2[n2[l]]] - f[n2[n2[n2[l]]]]) / hy / hy #define dy2_down(l, f) (2.0 * f[l] - 5.0 * f[n4[l]] + 4.0 * f[n4[n4[l]]] - f[n4[n4[n4[l]]]]) / hy / hy */ //integer global inxed i __device__ unsigned int iG(unsigned int l) { return (J_back[l] - (J_back[l] / OFFSET)*OFFSET); } //integer global index j __device__ unsigned int jG(unsigned int l) { return (J_back[l] / OFFSET); } __device__ double r_gamma(unsigned int l) { return (J_back[l] - (J_back[l] / OFFSET)*OFFSET) * hx*cosA + //cosA*x (J_back[l] / OFFSET) * hy*sinA; //sinA*y } __device__ double x_gamma(unsigned int l) { return (J_back[l] - (J_back[l] / OFFSET)*OFFSET) * hx*cosA; //cosA*x } __device__ double y_gamma(unsigned int l) { return (J_back[l] / OFFSET) * hy*sinA; //sinA*y } __global__ void chemical_potential(double *mu, double *C) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner mu[l] = -MM*Gr* r_gamma(l) //nu takoe // da norm + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - Ca*(dx2(l, C) + dy2(l, C)); break; case 1: //left rigid mu[l] = dx1_eq_0_forward(l, mu); break; case 2: //upper rigid mu[l] = dy1_eq_0_down(l, mu); /* int down1 = n4[l]; int down2 = n4[n4[l]]; double m1 = -MM*Gr* r_gamma(down1) +2.0 * A * C[down1] + 4.0 * pow(C[down1], 3) - Ca*(dx2(down1, C) + dy2(down1, C)); double m2 = -MM*Gr* r_gamma(down2) +2.0 * A * C[down2] + 4.0 * pow(C[down2], 3) - Ca*(dx2(down2, C) + dy2(down2, C)); mu[l] = (4.0*m1 - m2) / 3.0; */ break; case 3: //right rigid mu[l] = dx1_eq_0_back(l, mu); break; case 4: //lower rigid mu[l] = dy1_eq_0_up(l, mu); /* int up1 = n2[l]; int up2 = n2[n2[l]]; double m1_ = -MM*Gr* r_gamma(up1) +2.0 * A * C[up1] + 4.0 * pow(C[up1], 3) - Ca*(dx2(up1, C) + dy2(up1, C)); double m2_ = -MM*Gr* r_gamma(up2) +2.0 * A * C[up2] + 4.0 * pow(C[up2], 3) - Ca*(dx2(up2, C) + dy2(up2, C)); mu[l] = (4.0*m1_ - m2_) / 3.0; */ break; case 5: //left upper rigid corner mu[l] = 0.5* (dx1_eq_0_forward(l, mu) + dy1_eq_0_down(l, mu)); break; case 6: //right upper rigid corner mu[l] = 0.5* (dx1_eq_0_back(l, mu) + dy1_eq_0_down(l, mu)); break; case 7: //right lower rigid corner mu[l] = 0.5* (dx1_eq_0_back(l, mu) + dy1_eq_0_up(l, mu)); break; case 8: //left lower rigid corner mu[l] = 0.5* (dx1_eq_0_forward(l, mu) + dy1_eq_0_up(l, mu)); break; case 9: //inlet (from left) mu[l] = -Ca*dx2_forward(l, C) - Ca*dy2(l, C) + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - MM*Gr* r_gamma(l); //dx1_eq_0_forward(l, mu); break; case 10://outlet (to right) mu[l] = -Ca*dx2_back(l, C) - Ca*dy2(l, C) + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - MM*Gr* r_gamma(l); //dx1_eq_0_back(l, mu); break; default: break; } } } __global__ void chemical_potential_border(double *mu, double *C) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner break; case 1: //left rigid mu[l] = dx1_eq_0_forward(l, mu); break; case 2: //upper rigid mu[l] = dy1_eq_0_down(l, mu);// -2.0 / 3.0*hy*(-Gr*r_gamma(l)*C[l]); /* int down1 = n4[l]; int down2 = n4[n4[l]]; double m1 = -MM*Gr* r_gamma(down1) +2.0 * A * C[down1] + 4.0 * pow(C[down1], 3) - Ca*(dx2(down1, C) + dy2(down1, C)); double m2 = -MM*Gr* r_gamma(down2) +2.0 * A * C[down2] + 4.0 * pow(C[down2], 3) - Ca*(dx2(down2, C) + dy2(down2, C)); mu[l] = (4.0*m1 - m2) / 3.0; */ break; case 3: //right rigid mu[l] = dx1_eq_0_back(l, mu); break; case 4: //lower rigid mu[l] = dy1_eq_0_up(l, mu);// +2.0 / 3.0*hy*(-Gr*r_gamma(l)*C[l]);; /* int up1 = n2[l]; int up2 = n2[n2[l]]; double m1_ = -MM*Gr* r_gamma(up1) +2.0 * A * C[up1] + 4.0 * pow(C[up1], 3) - Ca*(dx2(up1, C) + dy2(up1, C)); double m2_ = -MM*Gr* r_gamma(up2) +2.0 * A * C[up2] + 4.0 * pow(C[up2], 3) - Ca*(dx2(up2, C) + dy2(up2, C)); mu[l] = (4.0*m1_ - m2_) / 3.0; */ break; case 5: //left upper rigid corner mu[l] = 0.5* (dx1_eq_0_forward(l, mu) + dy1_eq_0_down(l, mu)); break; case 6: //right upper rigid corner mu[l] = 0.5* (dx1_eq_0_back(l, mu) + dy1_eq_0_down(l, mu)); break; case 7: //right lower rigid corner mu[l] = 0.5* (dx1_eq_0_back(l, mu) + dy1_eq_0_up(l, mu)); break; case 8: //left lower rigid corner mu[l] = 0.5* (dx1_eq_0_forward(l, mu) + dy1_eq_0_up(l, mu)); break; case 9: //inlet (from left) //mu[l] = dx1_eq_0_forward(l, mu); //mu[l] = dx2_eq_0_forward(l, mu); break; case 10://outlet (to right) //mu[l] = dx1_eq_0_back(l, mu); //mu[l] = dx2_eq_0_back(l, mu); break; default: break; } } } __global__ void chemical_potential_inside(double *mu, double *C) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner mu[l] = -MM*Gr* r_gamma(l) //nu takoe // da norm + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - Ca*(dx2(l, C) + dy2(l, C)); break; case 1: //left rigid break; case 2: //upper rigid break; case 3: //right rigid break; case 4: //lower rigid break; case 5: //left upper rigid corner mu[l] = 0.5* (dx1_eq_0_forward(l, mu) + dy1_eq_0_down(l, mu)); break; case 6: //right upper rigid corner mu[l] = 0.5* (dx1_eq_0_back(l, mu) + dy1_eq_0_down(l, mu)); break; case 7: //right lower rigid corner mu[l] = 0.5* (dx1_eq_0_back(l, mu) + dy1_eq_0_up(l, mu)); break; case 8: //left lower rigid corner mu[l] = 0.5* (dx1_eq_0_forward(l, mu) + dy1_eq_0_up(l, mu)); break; case 9: //inlet (from left) mu[l] = -Ca*dx2_forward(l, C) - Ca*dy2(l, C) + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - MM*Gr* r_gamma(l); break; case 10://outlet (to right) mu[l] = -Ca*dx2_back(l, C) - Ca*dy2(l, C) + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - MM*Gr* r_gamma(l); break; default: break; } } } __global__ void chemical_potential_Gr(double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { mu[l] = -MM*Gr* r_gamma(l); } } __global__ void quasi_velocity(double *ux, double *uy, double *vx, double *vy, double *C0, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner //ux_d ux[l] = vx[l] //+ Gr*C0[l] * x_gamma(l) + tau * ( -vx[l] * dx1(l, vx) - vy[l] * dy1(l, vx) + (dx2(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); //uy_d uy[l] = vy[l] //+ Gr*C0[l] * y_gamma(l) + tau * ( -vx[l] * dx1(l, vy) - vy[l] * dy1(l, vy) + (dx2(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 1: //left rigid ux[l] = tau / Re * dx2_forward(l, vx); break; case 2: //upper rigid uy[l] = tau / Re * dy2_down(l, vy); break; case 3: //right rigid ux[l] = tau / Re * dx2_back(l, vx); break; case 4: //lower rigid uy[l] = tau / Re * dy2_up(l, vy); break; case 5: //left upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 6: //right upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 7: //right lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 8: //left lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 9: //inlet (from left) ux[l] = vx[l] + tau*( -vx[l] * dx1_forward(l, vx) - vy[l] * dy1(l, vx) + (dx2_forward(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1_forward(l, mu) / MM ); uy[l] = tau * ( -vx[l] * dx1_forward(l, vy) - vy[l] * dy1(l, vy) + (dx2_forward(l, vy) + dy2(l, vy)) / Re // ! , ! - C0[l] * dy1(l, mu) / MM ); break; case 10: //outlet (to right) ux[l] = vx[l] + tau*( -vx[l] * dx1_back(l, vx) - vy[l] * dy1(l, vx) + (dx2_back(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1_back(l, mu) / MM //! ); uy[l] = tau * ( -vx[l] * dx1_back(l, vy) - vy[l] * dy1(l, vy) + (dx2_back(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM //! ); break; default: break; } } } __global__ void quasi_velocity_pulsation(double *ux, double *uy, double *vx, double *vy, double *C0, double *mu, double time) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner //ux_d ux[l] = vx[l] //+ Gr*C0[l] * x_gamma(l) + tau * ( -vx[l] * dx1(l, vx) - vy[l] * dy1(l, vx) + (dx2(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); //uy_d uy[l] = vy[l] //+ Gr*C0[l] * y_gamma(l) + tau * ( -vx[l] * dx1(l, vy) - vy[l] * dy1(l, vy) + (dx2(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 1: //left rigid ux[l] = tau / Re * dx2_forward(l, vx); break; case 2: //upper rigid uy[l] = tau / Re * dy2_down(l, vy); break; case 3: //right rigid ux[l] = tau / Re * dx2_back(l, vx); break; case 4: //lower rigid uy[l] = tau / Re * dy2_up(l, vy); break; case 5: //left upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 6: //right upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 7: //right lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 8: //left lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 9: //inlet (from left) ux[l] = vx[l] + tau*( -vx[l] * dx1_forward(l, vx) - vy[l] * dy1(l, vx) + (dx2_forward(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1_forward(l, mu) / MM ); uy[l] = tau * ( -vx[l] * dx1_forward(l, vy) - vy[l] * dy1(l, vy) + (dx2_forward(l, vy) + dy2(l, vy)) / Re // ! , ! - C0[l] * dy1(l, mu) / MM ); break; case 10: //outlet (to right) ux[l] = vx[l] + tau*( -vx[l] * dx1_back(l, vx) - vy[l] * dy1(l, vx) + (dx2_back(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1_back(l, mu) / MM //! ); uy[l] = tau * ( -vx[l] * dx1_back(l, vy) - vy[l] * dy1(l, vy) + (dx2_back(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM //! ); break; default: break; } ux[l] += tau*Amp*sin(Omega*time)*vibr_X; uy[l] += tau*Amp*sin(Omega*time)*vibr_Y; } } __global__ void quasi_velocity_pulsation_with_Phi(double *ux, double *uy, double *vx, double *vy, double *C0, double *mu, double time, double *Phi, double *WX, double *WY) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { double W0_W = WX[l] * vibr_X + WY[l] * vibr_Y; switch (t[l]) { case 0: //inner //ux_d ux[l] = vx[l] //+ Gr*C0[l] * x_gamma(l) + tau * ( -vx[l] * dx1(l, vx) - vy[l] * dy1(l, vx) + (dx2(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1(l, mu) / MM - VV*(W0_W)*dx1(l, C0) ); //uy_d uy[l] = vy[l] //+ Gr*C0[l] * y_gamma(l) + tau * ( -vx[l] * dx1(l, vy) - vy[l] * dy1(l, vy) + (dx2(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM - VV*(W0_W)*dy1(l, C0) ); break; case 1: //left rigid ux[l] = tau / Re * dx2_forward(l, vx); break; case 2: //upper rigid uy[l] = tau / Re * dy2_down(l, vy); break; case 3: //right rigid ux[l] = tau / Re * dx2_back(l, vx); break; case 4: //lower rigid uy[l] = tau / Re * dy2_up(l, vy); break; case 5: //left upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 6: //right upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 7: //right lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 8: //left lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 9: //inlet (from left) ux[l] = vx[l] + tau*( -vx[l] * dx1_forward(l, vx) - vy[l] * dy1(l, vx) + (dx2_forward(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1_forward(l, mu) / MM - VV*(W0_W)*dx1_forward(l, C0) ); uy[l] = tau * ( -vx[l] * dx1_forward(l, vy) - vy[l] * dy1(l, vy) + (dx2_forward(l, vy) + dy2(l, vy)) / Re // ! , ! - C0[l] * dy1(l, mu) / MM - VV*(W0_W)*dy1(l, C0) ); break; case 10: //outlet (to right) ux[l] = vx[l] + tau*( -vx[l] * dx1_back(l, vx) - vy[l] * dy1(l, vx) + (dx2_back(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1_back(l, mu) / MM //! - VV*(W0_W)*dx1_back(l, C0) ); uy[l] = tau * ( -vx[l] * dx1_back(l, vy) - vy[l] * dy1(l, vy) + (dx2_back(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM //! - VV*(W0_W)*dy1(l, C0) ); break; default: break; } } } __global__ void quasi_velocity_no_phase_field(double *ux, double *uy, double *vx, double *vy, double *C0, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner //ux_d ux[l] = vx[l] //+ Gr*C0[l] * x_gamma(l) + tau * ( -vx[l] * dx1(l, vx) - vy[l] * dy1(l, vx) + (dx2(l, vx) + dy2(l, vx)) / Re + Gr*C0[l] * cosA ); //uy_d uy[l] = vy[l] //+ Gr*C0[l] * y_gamma(l) + tau * ( -vx[l] * dx1(l, vy) - vy[l] * dy1(l, vy) + (dx2(l, vy) + dy2(l, vy)) / Re + Gr*C0[l] * sinA ); break; case 1: //left rigid ux[l] = tau / Re * dx2_forward(l, vx) + tau*Gr*C0[l] * cosA; break; case 2: //upper rigid uy[l] = tau / Re * dy2_down(l, vy) + tau*Gr*C0[l] * sinA; break; case 3: //right rigid ux[l] = tau / Re * dx2_back(l, vx) + tau*Gr*C0[l] * cosA; break; case 4: //lower rigid uy[l] = tau / Re * dy2_up(l, vy) + tau*Gr*C0[l] * sinA; break; case 5: //left upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 6: //right upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 7: //right lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 8: //left lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 9: //inlet (from left) ux[l] = vx[l] + tau*( -vx[l] * dx1_forward(l, vx) - vy[l] * dy1(l, vx) + (dx2_forward(l, vx) + dy2(l, vx)) / Re + Gr*C0[l] * cosA ); uy[l] = tau * ( -vx[l] * dx1_forward(l, vy) - vy[l] * dy1(l, vy) + (dx2_forward(l, vy) + dy2(l, vy)) / Re // ! , ! + Gr*C0[l] * sinA ); break; case 10: //outlet (to right) ux[l] = vx[l] + tau*( -vx[l] * dx1_back(l, vx) - vy[l] * dy1(l, vx) + (dx2_back(l, vx) + dy2(l, vx)) / Re + Gr*C0[l] * cosA ); uy[l] = tau * ( -vx[l] * dx1_back(l, vy) - vy[l] * dy1(l, vy) + (dx2_back(l, vy) + dy2(l, vy)) / Re + Gr*C0[l] * sinA ); break; default: break; } } } __global__ void concentration(double *C, double *C0, double *vx, double *vy, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner C[l] = C0[l] + tau * ( -vx[l] * dx1(l, C0) - vy[l] * dy1(l, C0) + (dx2(l, mu) + dy2(l, mu)) / Pe ); break; case 1: //left rigid C[l] = dx1_eq_0_forward(l, C0); break; case 2: //upper rigid C[l] = dy1_eq_0_down(l, C0); break; case 3: //right rigid C[l] = dx1_eq_0_back(l, C0); break; case 4: //lower rigid C[l] = dy1_eq_0_up(l, C0); break; case 5: //left upper rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) + dy1_eq_0_down(l, C0)); break; case 6: //right upper rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + dy1_eq_0_down(l, C0)); break; case 7: //right lower rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + dy1_eq_0_up(l, C0)); break; case 8: //left lower rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) + dy1_eq_0_up(l, C0)); break; case 9: //inlet (from left) C[l] = -0.5; break; case 10://outlet (to right) C[l] = dx1_eq_0_back(l, C0); break; default: break; } } } __global__ void concentration_surface_energy_wetting(double *C, double *C0, double *vx, double *vy, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; double Ca_test = sqrt(Ca) * 5; if (l < n) { switch (t[l]) { case 0: //inner C[l] = C0[l] + tau * ( -vx[l] * dx1(l, C0) - vy[l] * dy1(l, C0) + (dx2(l, mu) + dy2(l, mu)) / Pe ); break; case 1: //left rigid C[l] = dx1_eq_0_forward(l, C0) - 0.75*cosTh*(1.0 - 4.0*C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test; break; case 2: //upper rigid C[l] = dy1_eq_0_down(l, C0) - 0.75*cosTh*(1.0 - 4.0*C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test; break; case 3: //right rigid C[l] = dx1_eq_0_back(l, C0) - 0.75*cosTh*(1.0 - 4.0*C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test; break; case 4: //lower rigid C[l] = dy1_eq_0_up(l, C0) - 0.75*cosTh*(1.0 - 4.0*C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test; break; case 5: //left upper rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test + dy1_eq_0_down(l, C0) + 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test); break; case 6: //right upper rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test + dy1_eq_0_down(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test); break; case 7: //right lower rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test + dy1_eq_0_up(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test); break; case 8: //left lower rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test + dy1_eq_0_up(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test); break; case 9: //inlet (from left) C[l] = -0.5; break; case 10://outlet (to right) C[l] = dx1_eq_0_back(l, C0); break; default: break; } } } __global__ void concentration_geometrical_wetting(double *C, double *C0, double *vx, double *vy, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; double Ca_test = sqrt(Ca) * 5; if (l < n) { switch (t[l]) { case 0: //inner C[l] = C0[l] + tau * ( -vx[l] * dx1(l, C0) - vy[l] * dy1(l, C0) + (dx2(l, mu) + dy2(l, mu)) / Pe ); break; case 1: //left rigid C[l] = dx1_eq_0_forward(l, C0) - cosTh * 2.0 / 3.0*hx * sqrt(pow(dx1_forward(l, C0), 2) + pow(dy1(l, C0), 2)); break; case 2: //upper rigid C[l] = dy1_eq_0_down(l, C0) - 0.75*cosTh*(1.0 - 4.0*C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test; break; case 3: //right rigid C[l] = dx1_eq_0_back(l, C0) - 0.75*cosTh*(1.0 - 4.0*C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test; break; case 4: //lower rigid C[l] = dy1_eq_0_up(l, C0) - 0.75*cosTh*(1.0 - 4.0*C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test; break; case 5: //left upper rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test + dy1_eq_0_down(l, C0) + 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test); break; case 6: //right upper rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test + dy1_eq_0_down(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test); break; case 7: //right lower rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test + dy1_eq_0_up(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test); break; case 8: //left lower rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test + dy1_eq_0_up(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test); break; case 9: //inlet (from left) C[l] = -0.5; break; case 10://outlet (to right) C[l] = dx1_eq_0_back(l, C0); break; default: break; } } } __global__ void concentration_wetting(double *C, double *C0, double *vx, double *vy, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner C[l] = C0[l] + tau * ( -vx[l] * dx1(l, C0) - vy[l] * dy1(l, C0) + (dx2(l, mu) + dy2(l, mu)) / Pe ); break; case 1: //left rigid if (C0[n3[l]] < C0[l]) C[l] = C0[n3[l]]; break; case 2: //upper rigid if (C0[n4[l]] < C0[l]) C[l] = C0[n4[l]]; break; case 3: //right rigid if (C0[n1[l]] < C0[l]) C[l] = C0[n1[l]]; break; case 4: //lower rigid if (C0[n2[l]] < C0[l]) C[l] = C0[n2[l]]; break; case 5: //left upper rigid corner if (C0[n3[n4[l]]] < C0[l]) C[l] = C0[n3[n4[l]]]; break; case 6: //right upper rigid corner if (C0[n1[n4[l]]] < C0[l]) C[l] = C0[n1[n4[l]]]; break; case 7: //right lower rigid corner if (C0[n2[n1[l]]] < C0[l]) C[l] = C0[n2[n1[l]]]; break; case 8: //left lower rigid corner if (C0[n2[n3[l]]] < C0[l]) C[l] = C0[n2[n3[l]]]; break; case 9: //inlet (from left) C[l] = -0.5; break; case 10://outlet (to right) C[l] = dx1_eq_0_back(l, C0); break; default: break; } } } __global__ void concentration_no_wetting(double *C, double *C0, double *vx, double *vy, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner C[l] = C0[l] + tau * ( -vx[l] * dx1(l, C0) - vy[l] * dy1(l, C0) + (dx2(l, mu) + dy2(l, mu)) / Pe ); break; case 1: //left rigid C[l] = 0.5; break; case 2: //upper rigid C[l] = 0.5; break; case 3: //right rigid C[l] = 0.5; break; case 4: //lower rigid C[l] = 0.5; break; case 5: //left upper rigid corner C[l] = 0.5; break; case 6: //right upper rigid corner C[l] = 0.5; break; case 7: //right lower rigid corner C[l] = 0.5; break; case 8: //left lower rigid corner C[l] = 0.5; break; case 9: //inlet (from left) C[l] = -0.5; break; case 10://outlet (to right) C[l] = dx1_eq_0_back(l, C0); break; default: break; } } } __global__ void concentration_no_input_C(double *C, double *C0, double *vx, double *vy, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner C[l] = C0[l] + tau * ( -vx[l] * dx1(l, C0) - vy[l] * dy1(l, C0) + (dx2(l, mu) + dy2(l, mu)) / Pe ); break; case 1: //left rigid C[l] = dx1_eq_0_forward(l, C0); break; case 2: //upper rigid C[l] = dy1_eq_0_down(l, C0); break; case 3: //right rigid C[l] = dx1_eq_0_back(l, C0); break; case 4: //lower rigid C[l] = dy1_eq_0_up(l, C0); break; case 5: //left upper rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) + dy1_eq_0_down(l, C0)); break; case 6: //right upper rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + dy1_eq_0_down(l, C0)); break; case 7: //right lower rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + dy1_eq_0_up(l, C0)); break; case 8: //left lower rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) + dy1_eq_0_up(l, C0)); break; case 9: //inlet (from left) C[l] = 0.5; dx1_eq_0_forward(l, C0); break; case 10://outlet (to right) C[l] = dx1_eq_0_back(l, C0); break; default: break; } } } __global__ void velocity_correction(double *vx, double *vy, double *ux, double *uy, double *p) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner vx[l] = ux[l] - tau * dx1(l, p); vy[l] = uy[l] - tau * dy1(l, p); break; case 1: //left rigid vx[l] = 0.0; vy[l] = 0.0; break; case 2: //upper rigid vx[l] = 0.0; vy[l] = 0.0; break; case 3: //right rigid vx[l] = 0.0; vy[l] = 0.0; break; case 4: //lower rigid vx[l] = 0.0; vy[l] = 0.0; break; case 5: //left upper rigid corner vx[l] = 0.0; vy[l] = 0.0; break; case 6: //right upper rigid corner vx[l] = 0.0; vy[l] = 0.0; break; case 7: //right lower rigid corner vx[l] = 0.0; vy[l] = 0.0; break; case 8: //left lower rigid corner vx[l] = 0.0; vy[l] = 0.0; break; case 9: //inlet (from left) vx[l] = ux[l] - tau * dx1_forward(l, p); vy[l] = uy[l] - tau * dy1(l, p); //double vx1 = ux[n3[l]] - tau * dx1(n3[l], p); //double vx2 = ux[n3[n3[l]]] - tau * dx1(n3[n3[l]], p); //double vy1 = uy[n3[l]] - tau * dy1(n3[l], p); //double vy2 = uy[n3[n3[l]]] - tau * dy1(n3[n3[l]], p); //vx[l] = 2.0*vx1 - vx2; //vy[l] = 2.0*vy1 - vy2; break; case 10: //outlet (to right) vx[l] = ux[l] - tau * dx1_back(l, p); vy[l] = uy[l] - tau * dy1(l, p); //vx[l] = ux[n1[l]] - tau * dx1(n1[l], p); //vy[l] = uy[n1[l]] - tau * dy1(n1[l], p); break; default: break; } } } //for solely pressure __global__ void Poisson(double *p, double *p0, double *ux, double *uy, double *mu, double *C) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner p[l] = p0[l] + tau_p*( -(dx1(l, ux) + dy1(l, uy)) / tau + dx2(l, p0) + dy2(l, p0) ); break; case 1: //left rigid p[l] = dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0; break; case 2: //upper rigid p[l] = dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0; break; case 3: //right rigid p[l] = dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0; break; case 4: //lower rigid p[l] = dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0; break; case 5: //left upper rigid corner p[l] = 0.5* (dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0); break; case 6: //right upper rigid corner p[l] = 0.5* (dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0); break; case 7: //right lower rigid corner p[l] = 0.5* (dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0); break; case 8: //left lower rigid corner p[l] = 0.5* (dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0); break; case 9: //inlet (from left) p[l] = 8.0 / Re*Lx*dP + PHASE*((0.5*Ca*pow(dx1_forward(l, C), 2) - mu[l] * C[l] + A*pow(C[l], 2) + pow(C[l], 4)) / MM - Gr*C[l] * r_gamma(l)); break; case 10://outlet (to right) p[l] = 0 + PHASE*((0.5*Ca*pow(dx1_back(l, C), 2) - mu[l] * C[l] + A*pow(C[l], 2) + pow(C[l], 4)) / MM - Gr*C[l] * r_gamma(l)); break; default: break; } } } //for pressure with Phi __global__ void Poisson_pulsation_Phi(double *p, double *p0, double *ux, double *uy, double *mu, double *C, double *Phi, double *WX, double *WY) { //vibration = 1 unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner p[l] = p0[l] + tau_p*( -(dx1(l, ux) + dy1(l, uy)) / tau + dx2(l, p0) + dy2(l, p0) ); break; case 1: //left rigid p[l] = dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0; break; case 2: //upper rigid p[l] = dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0; break; case 3: //right rigid p[l] = dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0; break; case 4: //lower rigid p[l] = dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0; break; case 5: //left upper rigid corner p[l] = 0.5* (dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0); break; case 6: //right upper rigid corner p[l] = 0.5* (dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0); break; case 7: //right lower rigid corner p[l] = 0.5* (dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0); break; case 8: //left lower rigid corner p[l] = 0.5* (dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0); break; case 9: //inlet (from left) p[l] = 8.0 / Re*Lx*dP + PHASE*((0.5*Ca*pow(dx1_forward(l, C), 2) - mu[l] * C[l] + A*pow(C[l], 2) + pow(C[l], 4)) / MM - Gr*C[l] * r_gamma(l)); p[l] += (WX[l] * WX[l] + WY[l] * WY[l])*0.5*VV; break; case 10://outlet (to right) p[l] = 0 + PHASE*((0.5*Ca*pow(dx1_back(l, C), 2) - mu[l] * C[l] + A*pow(C[l], 2) + pow(C[l], 4)) / MM - Gr*C[l] * r_gamma(l)); p[l] += (WX[l] * WX[l] + WY[l] * WY[l])*0.5*VV; break; default: break; } } } __global__ void Poisson_pulsation(double *p, double *p0, double *ux, double *uy, double *mu, double *C, double time) { //vibration = 3 unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner p[l] = p0[l] + tau_p*( -(dx1(l, ux) + dy1(l, uy)) / tau + dx2(l, p0) + dy2(l, p0) ); break; case 1: //left rigid p[l] = dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0; break; case 2: //upper rigid p[l] = dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0; break; case 3: //right rigid p[l] = dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0; break; case 4: //lower rigid p[l] = dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0; break; case 5: //left upper rigid corner p[l] = 0.5* (dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0); break; case 6: //right upper rigid corner p[l] = 0.5* (dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0); break; case 7: //right lower rigid corner p[l] = 0.5* (dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0); break; case 8: //left lower rigid corner p[l] = 0.5* (dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0); break; case 9: //inlet (from left) p[l] = 8.0 / Re*Lx*dP*(1.0 + Amp*sin(Omega*time)) + PHASE*((0.5*Ca*pow(dx1_forward(l, C), 2) - mu[l] * C[l] + A*pow(C[l], 2) + pow(C[l], 4)) / MM - Gr*C[l] * r_gamma(l)); break; case 10://outlet (to right) p[l] = 0 + PHASE*((0.5*Ca*pow(dx1_back(l, C), 2) - mu[l] * C[l] + A*pow(C[l], 2) + pow(C[l], 4)) / MM - Gr*C[l] * r_gamma(l)); break; default: break; } } } __global__ void Poisson_Phi(double *Phi, double *Phi0, double *C, double *WX, double *WY) { //vibration = 1 unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { //if (l == 1) Phi0[l] = 0; switch (t[l]) { case 0: //inner Phi[l] = Phi0[l] + tau_p*( -(dx1(l, C)*vibr_X + dy1(l, C)*vibr_Y) + dx2(l, Phi0) + dy2(l, Phi0) ); break; case 1: //left rigid Phi[l] = dx1_eq_0_forward(l, Phi0) - C[l] * vibr_X * 2.0 * hx / 3.0; break; case 2: //upper rigid Phi[l] = dy1_eq_0_down(l, Phi0) + C[l] * vibr_Y * 2.0 * hy / 3.0; break; case 3: //right rigid Phi[l] = dx1_eq_0_back(l, Phi0) + C[l] * vibr_X * 2.0 * hx / 3.0; break; case 4: //lower rigid Phi[l] = dy1_eq_0_up(l, Phi0) - C[l] * vibr_Y * 2.0 * hy / 3.0; break; case 5: //left upper rigid corner Phi[l] = 0.5* (dx1_eq_0_forward(l, Phi0) - C[l] * vibr_X * 2.0 * hx / 3.0 + dy1_eq_0_down(l, Phi0) + C[l] * vibr_Y * 2.0 * hy / 3.0); break; case 6: //right upper rigid corner Phi[l] = 0.5* (dx1_eq_0_back(l, Phi0) + C[l] * vibr_X * 2.0 * hx / 3.0 + dy1_eq_0_down(l, Phi0) + C[l] * vibr_Y * 2.0 * hy / 3.0); break; case 7: //right lower rigid corner Phi[l] = 0.5* (dx1_eq_0_back(l, Phi0) + C[l] * vibr_X * 2.0 * hx / 3.0 + dy1_eq_0_up(l, Phi0) - C[l] * vibr_Y * 2.0 * hy / 3.0); break; case 8: //left lower rigid corner Phi[l] = 0.5* (dx1_eq_0_forward(l, Phi0) - C[l] * vibr_X * 2.0 * hx / 3.0 + dy1_eq_0_up(l, Phi0) - C[l] * vibr_Y * 2.0 * hy / 3.0); break; case 9: //inlet (from left) if (PHI_border_left == 0) { Phi[l] = PHI_value_left; } else if (PHI_border_left == 1) { Phi[l] = dx1_eq_0_forward(l, Phi0) - (PHI_value_left) * 2.0 * hx / 3.0; } else if (PHI_border_left == 2) { Phi[l] = dx2_eq_0_forward(l, Phi0) - (PHI_value_left)* hx * hx / 2.0; } else if (PHI_border_left == 3) { Phi[l] = dx1_eq_0_forward(l, Phi0) - (C[l] * vibr_X) * 2.0 * hx / 3.0; } else if (PHI_border_left == 4) { Phi[l] = C[l]*jG(l)*hy; } break; case 10://outlet (to right) if (PHI_border_right == 0) { Phi[l] = PHI_value_right; } else if (PHI_border_right == 1) { Phi[l] = dx1_eq_0_back(l, Phi0) + (PHI_value_right) * 2.0 * hx / 3.0; } else if (PHI_border_right == 2) { Phi[l] = dx2_eq_0_back(l, Phi0) - (PHI_value_right)* hx * hx / 2.0; } else if (PHI_border_right == 3) { Phi[l] = dx1_eq_0_back(l, Phi0) + (C[l] * vibr_X) * 2.0 * hx / 3.0; } else if (PHI_border_right == 4) { Phi[l] = C[l] * jG(l)*hy; } break; default: break; } if (l == 1) Phi_reference = Phi[l]; } } __global__ void Phi_normalization(double *Phi) { //vibration = 1 unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { Phi[l] = Phi[l] - Phi_reference; } } __global__ void WW_from_Phi(double *WX, double *WY, double *Phi, double *C) { { //vibration = 1 unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner WX[l] = -vibr_X*C[l] + dx1(l, Phi); WY[l] = -vibr_Y*C[l] + dy1(l, Phi); break; case 1: //left rigid WX[l] = 0; WY[l] = 0; // -vibr_Y*C[n3[l]] + dy1(n3[l], Phi);; //well, think of it if dC/dn != 0 break; case 2: //upper rigid WY[l] = 0; if (t[n4[l]] == 9) WX[l] = -vibr_X*C[l] + dx1_forward(l, Phi); else if (t[n4[l]] == 10) WX[l] = -vibr_X*C[l] + dx1_back(l, Phi); else WX[l] = -vibr_X*C[l] + dx1(l, Phi); break; case 3: //right rigid WX[l] = 0; WY[l] = 0; // -vibr_Y*C[n1[l]] + dy1(n1[l], Phi); break; case 4: //lower rigid WY[l] = 0; if (t[n2[l]] == 9) WX[l] = -vibr_X*C[l] + dx1_forward(l, Phi); else if (t[n2[l]] == 10) WX[l] = -vibr_X*C[l] + dx1_back(l, Phi); else WX[l] = -vibr_X*C[l] + dx1(l, Phi); break; case 5: //left upper rigid corner WX[l] = 0; WY[l] = 0; break; case 6: //right upper rigid corner WX[l] = 0; WY[l] = 0; break; case 7: //right lower rigid corner WX[l] = 0; WY[l] = 0; break; case 8: //left lower rigid corner WX[l] = 0; WY[l] = 0; break; case 9: //inlet (from left) WX[l] = -vibr_X * C[l] + dx1_forward(l, Phi); WY[l] = -vibr_Y * C[l] + dy1(l, Phi); break; /* if (W_BORDER == 0) { //dPhi/dn = 0 WX[l] = -vibr_X * C[l]; WY[l] = -vibr_Y*C[l] + dy1(l, Phi); } else if (W_BORDER == 1) { //W = 0 WX[l] = 0; WY[l] = 0; } else if (W_BORDER == 2) { WX[l] = 0; WY[l] = -vibr_Y*C[l] + dy1(l, Phi); } else if (W_BORDER == 3) { //dW/dn = 0 WX[l] = (4.0*(-vibr_X*C[n3[l]] + dx1(n3[l], Phi)) - (-vibr_X*C[n3[n3[l]]] + dx1(n3[n3[l]], Phi))) / 3.0; WY[l] = -vibr_Y*C[l] + dy1(l, Phi); } */ case 10://outlet (to right) WX[l] = -vibr_X * C[l] + dx1_back(l, Phi); WY[l] = -vibr_Y * C[l] + dy1(l, Phi); break; /* if (W_BORDER == 0) { //dPhi/dn = 0 WX[l] = -vibr_X*C[l]; WY[l] = -vibr_Y*C[l] + dy1(l, Phi); } else if (W_BORDER == 1) { //W = 0 WX[l] = 0; WY[l] = 0; } else if (W_BORDER == 2) { WX[l] = 0; WY[l] = -vibr_Y*C[l] + dy1(l, Phi); } else if (W_BORDER == 3) { //dW/dn = 0 WX[l] = (4.0*(-vibr_X*C[n1[l]] + dx1(n1[l], Phi)) - (-vibr_X*C[n1[n1[l]]] + dx1(n1[n1[l]], Phi))) / 3.0; WY[l] = -vibr_Y*C[l] + dy1(l, Phi); } */ default: break; } } } } __global__ void reduction00(double *data, unsigned int n, double* reduced) { extern __shared__ double shared[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x; shared[tid] = (i < n) ? abs(data[i]) : 0; if (i + blockDim.x < n) shared[tid] += abs(data[i + blockDim.x]); __syncthreads(); // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s>32; s >>= 1) { if (tid < s) { shared[tid] += shared[tid + s]; } __syncthreads(); } if (tid < 32) { // Fetch final intermediate sum from 2nd warp if (blockDim.x >= 64) shared[tid] += shared[tid + 32]; // Reduce final warp using shuffle for (int offset = warpSize / 2; offset > 0; offset /= 2) { shared[tid] += __shfl_down(shared[tid], offset); } } if (tid == 0) { reduced[blockIdx.x] = shared[0]; } } __global__ void reduction0(double *data, unsigned int n, double* reduced) { extern __shared__ double shared[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { shared[tid] = abs(data[i]); } else { shared[tid] = 0.0; } __syncthreads(); // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s>0; s >>= 1) { if (tid < s) { shared[tid] += shared[tid + s]; } __syncthreads(); } if (tid == 0) { reduced[blockIdx.x] = shared[0]; } } __global__ void reduction(double *data, unsigned int n, double* reduced) { extern __shared__ double shared[]; unsigned int tid = threadIdx.x; //unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { shared[tid] = abs(data[i]); //if (i + blockDim.x < n) shared[tid] += abs(data[i + blockDim.x]); } else { shared[tid] = 0.0; } __syncthreads(); if (blockDim.x >= 1024) { if (tid < 512) { shared[tid] += shared[tid + 512]; } __syncthreads(); } if (blockDim.x >= 512) { if (tid < 256) { shared[tid] += shared[tid + 256]; } __syncthreads(); } if (blockDim.x >= 256) { if (tid < 128) { shared[tid] += shared[tid + 128]; } __syncthreads(); } if (blockDim.x >= 128) { if (tid < 64) { shared[tid] += shared[tid + 64]; } __syncthreads(); } if (tid < 32) { if (blockDim.x >= 64) shared[tid] += shared[tid + 32]; if (blockDim.x >= 32) shared[tid] += shared[tid + 16]; if (blockDim.x >= 16) shared[tid] += shared[tid + 8]; if (blockDim.x >= 8) shared[tid] += shared[tid + 4]; if (blockDim.x >= 4) shared[tid] += shared[tid + 2]; if (blockDim.x >= 2) shared[tid] += shared[tid + 1]; } if (tid == 0) { reduced[blockIdx.x] = shared[0]; //if (blockDim.x==1) *last = shared[0]; } } __global__ void quasi_velocity_upstream(double *ux, double *uy, double *vx, double *vy, double *C0, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner //ux_d ux[l] = vx[l] //+ Gr*C0[l] * x_gamma(l) + tau * ( -VgradF(l, vx, vx, vy) //-vx[l] * dx1(l, vx) - vy[l] * dy1(l, vx) + (dx2(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); //uy_d uy[l] = vy[l] //+ Gr*C0[l] * y_gamma(l) + tau * ( -VgradF(l, vy, vx, vy) //-vx[l] * dx1(l, vy) - vy[l] * dy1(l, vy) + (dx2(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 1: //left rigid ux[l] = tau / Re * dx2_forward(l, vx); break; case 2: //upper rigid uy[l] = tau / Re * dy2_down(l, vy); break; case 3: //right rigid ux[l] = tau / Re * dx2_back(l, vx); break; case 4: //lower rigid uy[l] = tau / Re * dy2_up(l, vy); break; case 5: //left upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 6: //right upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 7: //right lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 8: //left lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 9: //inlet (from left) ux[l] = vx[l] + tau*( -vx[l] * dx1_forward(l, vx) - vy[l] * dy1(l, vx) + (dx2_forward(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1_forward(l, mu) / MM ); uy[l] = tau * ( -vx[l] * dx1_forward(l, vy) - vy[l] * dy1(l, vy) + (dx2_forward(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 10: //outlet (to right) ux[l] = vx[l] + tau*( -vx[l] * dx1_back(l, vx) - vy[l] * dy1(l, vx) + (dx2_back(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1_back(l, mu) / MM //! ); uy[l] = tau * ( -vx[l] * dx1_back(l, vy) - vy[l] * dy1(l, vy) + (dx2_back(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM //! ); break; default: break; } } } __global__ void concentration_upstream(double *C, double *C0, double *vx, double *vy, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner C[l] = C0[l] + tau * ( -VgradF(l, C0, vx, vy) + (dx2(l, mu) + dy2(l, mu)) / Pe ); break; case 1: //left rigid C[l] = dx1_eq_0_forward(l, C0); break; case 2: //upper rigid C[l] = dy1_eq_0_down(l, C0); break; case 3: //right rigid C[l] = dx1_eq_0_back(l, C0); break; case 4: //lower rigid C[l] = dy1_eq_0_up(l, C0); break; case 5: //left upper rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) + dy1_eq_0_down(l, C0)); break; case 6: //right upper rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + dy1_eq_0_down(l, C0)); break; case 7: //right lower rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + dy1_eq_0_up(l, C0)); break; case 8: //left lower rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) + dy1_eq_0_up(l, C0)); break; case 9: //inlet (from left) C[l] = -0.5; break; case 10://outlet (to right) //C[l] = dx1_eq_0_back(l, C0); C[l] = extrapolate_forward(l, C0); break; default: break; } } } __global__ void chemical_potential_upstream(double *mu, double *C) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner mu[l] = -Gr* r_gamma(l) //nu takoe + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - Ca*(dx2(l, C) + dy2(l, C)); break; case 1: //left rigid mu[l] = dx1_eq_0_forward(l, mu); break; case 2: //upper rigid mu[l] = dy1_eq_0_down(l, mu); break; case 3: //right rigid mu[l] = dx1_eq_0_back(l, mu); break; case 4: //lower rigid mu[l] = dy1_eq_0_up(l, mu); break; case 5: //left upper rigid corner mu[l] = 0.5* (dx1_eq_0_forward(l, mu) + dy1_eq_0_down(l, mu)); break; case 6: //right upper rigid corner mu[l] = 0.5* (dx1_eq_0_back(l, mu) + dy1_eq_0_down(l, mu)); break; case 7: //right lower rigid corner mu[l] = 0.5* (dx1_eq_0_back(l, mu) + dy1_eq_0_up(l, mu)); break; case 8: //left lower rigid corner mu[l] = 0.5* (dx1_eq_0_forward(l, mu) + dy1_eq_0_up(l, mu)); break; case 9: //inlet (from left) mu[l] = -Ca*dx2_forward(l, C) + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - Gr* r_gamma(l); //dx1_eq_0_forward(l, mu); break; case 10://outlet (to right) //mu[l] = -Ca*dx2_back(l, C) - Ca*dy2(l, C) + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - Gr* r_gamma(l); //dx1_eq_0_back(l, mu); mu[l] = extrapolate_forward(l, mu); break; default: break; } } } __global__ void swap_one(double* f_old, double* f_new) { unsigned int l = blockIdx.x*blockDim.x + threadIdx.x; if (l < n) f_old[l] = f_new[l]; } __global__ void swap_3(double* f1_old, double* f1_new, double* f2_old, double* f2_new, double* f3_old, double* f3_new) { unsigned int l = blockIdx.x*blockDim.x + threadIdx.x; if (l < n) { f1_old[l] = f1_new[l]; f2_old[l] = f2_new[l]; f3_old[l] = f3_new[l]; } } struct cross { int nx[5], ny[5]; int offset[5]; int size[5], total_size; int idx, idy, id; int block[5]; __host__ __device__ void set_geometry(unsigned int length, unsigned int height) { nx[0] = height; ny[0] = height; nx[1] = length - 1; ny[1] = height; nx[2] = height; ny[2] = length - 1; nx[3] = length - 1; ny[3] = height; nx[4] = height; ny[4] = length - 1; total_size = 0; for (int i = 0; i < 5; i++) { size[i] = (nx[i] + 1)*(ny[i] + 1); offset[i] = nx[i] + 1; total_size += size[i]; } }; __host__ __device__ void set_geometry_narrow_tubes(unsigned int L, /*length of horizontal tube*/ unsigned int H, /*length(height) of vertical tube*/ unsigned int D /*diameter(width) of tube*/) { nx[0] = D; ny[0] = D; nx[1] = L - 1; ny[1] = D; nx[2] = D; ny[2] = H - 1; nx[3] = L - 1; ny[3] = D; nx[4] = D; ny[4] = H - 1; total_size = 0; for (int i = 0; i < 5; i++) { size[i] = (nx[i] + 1)*(ny[i] + 1); offset[i] = nx[i] + 1; total_size += size[i]; } } __host__ __device__ void delete_block(int i) { total_size -= size[i]; nx[i] = -1; ny[i] = -1; offset[i] = 0; size[i] = 0; }; __host__ __device__ void set_block(int add) { block[0] = 0 + add; block[1] = block[0] + size[0]; block[2] = block[1] + size[1]; block[3] = block[2] + size[2]; block[4] = block[3] + size[3]; } //~cross() {} }; struct multi_cross { cross *Mcr; int *l, *t, *I, *J, *J_back; int Mx, My, Msize, Moffset, OFFSET; unsigned int iter = 0; unsigned int TOTAL_SIZE = 0; int *n1, *n2, *n3, *n4; unsigned int nxg, nyg, ng; unsigned int nx, ny, offset; double *C0, *C, *p, *p0, *ux, *uy, *vx, *vy, *mu; double LX, LY; //integer global inxed i unsigned int iG(unsigned int l) { return (J_back[l] - (J_back[l] / OFFSET)*OFFSET); } //integer global index j unsigned int jG(unsigned int l) { return (J_back[l] / OFFSET); } void set_global_size(int input_nx, int input_ny, int input_Mx, int input_My) { Mx = input_Mx - 1; My = input_My - 1; Msize = input_Mx*input_My; Moffset = input_Mx; //Mcr.resize(Msize, cr); Mcr = new cross[Msize]; for (int i = 0; i < Msize; i++) { Mcr[i].set_geometry(input_nx, input_ny); } for (int i = 0; i <= Mx; i++) { for (int j = 0; j <= My; j++) { Mcr[i + Moffset*j].id = i + Moffset*j; Mcr[i + Moffset*j].idx = i; Mcr[i + Moffset*j].idy = j; if (j == 0) Mcr[i + Moffset*j].delete_block(4); if (j == My) Mcr[i + Moffset*j].delete_block(2); } } for (int i = 0; i < Msize; i++) { Mcr[i].set_block(TOTAL_SIZE); TOTAL_SIZE += Mcr[i].total_size; } } void set_global_size_narrow_tubes(int input_L, int input_H, int input_D, int input_Mx, int input_My) { Mx = input_Mx - 1; My = input_My - 1; Msize = input_Mx*input_My; Moffset = input_Mx; //Mcr.resize(Msize, cr); Mcr = new cross[Msize]; for (int i = 0; i < Msize; i++) { Mcr[i].set_geometry_narrow_tubes(input_L, input_H, input_D); } for (int i = 0; i <= Mx; i++) { for (int j = 0; j <= My; j++) { Mcr[i + Moffset*j].id = i + Moffset*j; Mcr[i + Moffset*j].idx = i; Mcr[i + Moffset*j].idy = j; if (j == 0) Mcr[i + Moffset*j].delete_block(4); if (j == My) Mcr[i + Moffset*j].delete_block(2); } } for (int i = 0; i < Msize; i++) { Mcr[i].set_block(TOTAL_SIZE); TOTAL_SIZE += Mcr[i].total_size; } } void set_type() { if (Msize == 0) { printf("hop hey la la ley, stop it, bro, ya doin it wron' \n"); } l = new int[TOTAL_SIZE]; t = new int[TOTAL_SIZE]; for (unsigned int i = 0; i < TOTAL_SIZE; i++) { l[i] = 0; t[i] = 0; } unsigned int k; for (int jm = 0; jm <= My; jm++) { for (int im = 0; im <= Mx; im++) { k = im + Moffset*jm; for (unsigned int q = 0; q < 5; q++) { if (Mcr[k].size[q] == 0) continue; for (int j = 0; j <= Mcr[k].ny[q]; j++) { for (int i = 0; i <= Mcr[k].nx[q]; i++) { l[iter] = iter; if (q == 0) { if (i == 0 && j == Mcr[k].ny[q]) t[iter] = 5; if (i == Mcr[k].nx[q] && j == Mcr[k].ny[q]) t[iter] = 6; if (i == Mcr[k].nx[q] && j == 0) t[iter] = 7; if (i == 0 && j == 0) t[iter] = 8; if (i == 0 && Mcr[k].size[1] == 0) t[iter] = 1; if (i == Mcr[k].nx[q] && Mcr[k].size[3] == 0) t[iter] = 3; if (j == 0 && Mcr[k].size[4] == 0) t[iter] = 4; if (j == Mcr[k].ny[q] && Mcr[k].size[2] == 0) t[iter] = 2; } if (q == 2) { if (i == 0) t[iter] = 1; if (i == Mcr[k].nx[q]) t[iter] = 3; } if (q == 4) { if (i == 0) t[iter] = 1; if (i == Mcr[k].nx[q]) t[iter] = 3; } if (im == 0 && i == 0 && q == 1) t[iter] = 9; if (im == Mx && i == Mcr[k].nx[q] && q == 3) t[iter] = 10; if (q == 1) { if (j == Mcr[k].ny[q]) t[iter] = 2; if (j == 0) t[iter] = 4; } if (q == 3) { if (j == Mcr[k].ny[q]) t[iter] = 2; if (j == 0) t[iter] = 4; } iter++; } } } } } } void set_type_B() { int l, L; //int l1, l2, l3, l4; //inner for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { if (n1[L] != -1 && n2[L] != -1 && n3[L] != -1 && n4[L] != -1) t[L] = 0; } } } //rigid walls for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; //l1 = i - 1 + OFFSET*j; l2 = i + OFFSET*j + OFFSET; l3 = i + 1 + OFFSET*j; l4 = i + OFFSET*j - OFFSET; if (I[l] == 1) { if (I[l] == 1) { if (n1[L] == -1 && n2[L] != -1 && n3[L] != -1 && n4[L] != -1) t[L] = 1; if (n1[L] != -1 && n2[L] == -1 && n3[L] != -1 && n4[L] != -1) t[L] = 2; if (n1[L] != -1 && n2[L] != -1 && n3[L] == -1 && n4[L] != -1) t[L] = 3; if (n1[L] != -1 && n2[L] != -1 && n3[L] != -1 && n4[L] == -1) t[L] = 4; } } } } //corners for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; //cout << i << " " << j << " " << l << " " << endl; pause if (I[l] == 1) { if (n2[n1[L]] == -1 && n1[L] != -1 && n2[L] != -1) t[L] = 5; if (n2[n3[L]] == -1 && n3[L] != -1 && n2[L] != -1) t[L] = 6; if (n3[n4[L]] == -1 && n3[L] != -1 && n4[L] != -1) t[L] = 7; if (n1[n4[L]] == -1 && n1[L] != -1 && n4[L] != -1) t[L] = 8; } } } //inlet, outlet for (unsigned int i = 0; i <= nxg; i = i + nxg) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { if (i == 0) { t[L] = 9; if (t[n3[L]] == 2) t[L] = 2; if (t[n3[L]] == 4) t[L] = 4; } if (i == nxg) { t[L] = 10; if (t[n1[L]] == 2) t[L] = 2; if (t[n1[L]] == 4) t[L] = 4; } } } } /* //near border for (int i = 0; i <= nxg; i = i + nxg) { for (int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (t[n1[L]] == 1) t[L] = 11; if (t[n2[L]] == 2) t[L] = 12; if (t[n3[L]] == 3) t[L] = 13; if (t[n4[L]] == 4) t[L] = 14; if (t[n1[L]] == 1 && t[n2[L]] == 2) t[L] = 15; if (t[n2[L]] == 2 && t[n3[L]] == 3) t[L] = 16; if (t[n3[L]] == 3 && t[n4[L]] == 4) t[L] = 17; if (t[n4[L]] == 4 && t[n1[L]] == 1) t[L] = 18; } } */ } void set_neighbor() { if (Msize == 0 || iter == 0) { printf("hop hey la la ley, stop it, bro, ya doin it wron' \n"); } n1 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n2 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n3 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n4 = (int*)malloc(TOTAL_SIZE * sizeof(int)); for (unsigned int i = 0; i < TOTAL_SIZE; i++) { n1[i] = -1; n2[i] = -1; n3[i] = -1; n4[i] = -1; } unsigned int k, it = 0; for (int jm = 0; jm <= My; jm++) { for (int im = 0; im <= Mx; im++) { k = im + Moffset*jm; for (unsigned int q = 0; q < 5; q++) { if (Mcr[k].size[q] == 0) continue; for (int j = 0; j <= Mcr[k].ny[q]; j++) { for (int i = 0; i <= Mcr[k].nx[q]; i++) { //cout << i << " " << j << " " << q << " " << it<< endl; //joint central pore and tubes if (q == 0) { if (i == 0 && (t[it] == 0 || t[it] == 2 || t[it] == 4 || t[it] == 5 || t[it] == 8)) { //n1[it] = it - (i + Mcr[k].offset[0]) + Mcr[k].size[0] + Mcr[k].nx[1] + Mcr[k].offset[1] * j; int in = Mcr[k].block[1] + Mcr[k].nx[1] + Mcr[k].offset[1] * j; n1[it] = in; n3[in] = it; } if (i == Mcr[k].nx[0] && (t[it] == 0 || t[it] == 2 || t[it] == 4 || t[it] == 6 || t[it] == 7)) { int in = Mcr[k].block[3] + Mcr[k].offset[3] * j; n3[it] = in; n1[in] = it; } if (j == 0 && (t[it] == 0 || t[it] == 1 || t[it] == 3 || t[it] == 7 || t[it] == 8)) { int in = Mcr[k].block[4] + i + Mcr[k].offset[4] * Mcr[k].ny[4]; n4[it] = in; n2[in] = it; } if (j == Mcr[k].ny[0] && (t[it] == 0 || t[it] == 1 || t[it] == 3 || t[it] == 5 || t[it] == 6)) { int in = Mcr[k].block[2] + i; n2[it] = in; n4[in] = it; } } //inner nodes if (i < Mcr[k].nx[q]) n3[it] = it + 1; if (i > 0) n1[it] = it - 1; if (j < Mcr[k].ny[q]) n2[it] = it + Mcr[k].offset[q]; if (j > 0) n4[it] = it - Mcr[k].offset[q]; //borders and inlet/outlet if (i == 0 && (t[it] == 1 || t[it] == 9)) { n1[it] = it + 2; n3[it] = it + 1; } if (i == Mcr[k].nx[q] && (t[it] == 3 || t[it] == 10)) { n1[it] = it - 1; n3[it] = it - 2; } if (t[it] == 4) { n2[it] = it + Mcr[k].offset[q]; n4[it] = it + 2 * Mcr[k].offset[q]; } if (t[it] == 2) { n2[it] = it - 2 * Mcr[k].offset[q]; n4[it] = it - Mcr[k].offset[q]; } //join crosses if (q == 3) { if (i == Mcr[k].nx[3] && (t[it] == 0 || t[it] == 2 || t[it] == 4) && im < Mx) { int in = Mcr[k + 1].block[1] + Mcr[k + 1].offset[1] * j; n3[it] = in; n1[in] = it; } } if (q == 2) { if (j == Mcr[k].ny[2] && (t[it] == 0 || t[it] == 1 || t[it] == 3) && jm < My) { int in = Mcr[k + Moffset].block[4] + i; n4[in] = it; n2[it] = in; //printf("n4 = %i n2 = %i\n", n4[in], n2[it]); } } //if (n2[it] == -1) printf("q=%i t=%i i=%i j=%i nx=%i ny=%i \n", q, t[it], i, j, Mcr[k].nx[q], Mcr[k].ny[q]); it++; } } } } } } void set_neighbor_B() { int l, L, l1, l2, l3, l4; for (unsigned int j = 0; j <= nyg; j++) { for (unsigned int i = 0; i <= nxg; i++) { l = i + OFFSET*j; L = J[l]; l1 = i - 1 + OFFSET*j; l2 = i + OFFSET*j + OFFSET; l3 = i + 1 + OFFSET*j; l4 = i + OFFSET*j - OFFSET; if (I[l] == 1) { if (i > 0) if (I[l1] == 1) n1[L] = J[l1]; if (i < nxg) if (I[l3] == 1) n3[L] = J[l3]; if (j < nyg) if (I[l2] == 1) n2[L] = J[l2]; if (j > 0) if (I[l4] == 1) n4[L] = J[l4]; } else { } } } } void set_global_id() { nxg = 0, nyg = 0; for (int im = 0; im <= Mx; im++) nxg += Mcr[im].nx[0] + 1 + Mcr[im].nx[1] + 1 + Mcr[im].nx[3] + 1; for (int jm = 0; jm <= My; jm++) nyg += Mcr[jm*Moffset].ny[0] + 1 + Mcr[jm*Moffset].ny[2] + 1 + Mcr[jm*Moffset].ny[4] + 1; if (nxg != 0) nxg--; if (nyg != 0) nyg--; I = new int[(nxg + 1)*(nyg + 1)]; J = new int[(nxg + 1)*(nyg + 1)]; J_back = new int[TOTAL_SIZE]; OFFSET = nxg + 1; for (unsigned int i = 0; i < (nxg + 1)*(nyg + 1); i++) { I[i] = 0; J[i] = -1; } for (unsigned int i = 0; i < TOTAL_SIZE; i++) { J_back[i] = -1; } int *shift_x, *shift_y; shift_x = new int[Mx + 1]; shift_y = new int[My + 1]; shift_x[0] = 0; shift_y[0] = 0; for (int im = 1; im <= Mx; im++) shift_x[im] = (Mcr[im - 1].nx[0] + 1 + Mcr[im - 1].nx[1] + 1 + Mcr[im - 1].nx[3] + 1 + shift_x[im - 1]); for (int jm = 1; jm <= My; jm++) shift_y[jm] = (Mcr[(jm - 1)*Moffset].ny[0] + 1 + Mcr[(jm - 1)*Moffset].ny[2] + 1 + Mcr[(jm - 1)*Moffset].ny[4] + 1 + shift_y[jm - 1]); if (Msize == 0) { printf("set_global_id , hop hey la la ley, stop it, bro, ya doin it wron' \n"); } unsigned int k, it = 0, in, ii, jj; for (int jm = 0; jm <= My; jm++) { for (int im = 0; im <= Mx; im++) { k = im + Moffset*jm; for (unsigned int q = 0; q < 5; q++) { if (Mcr[k].size[q] == 0) continue; for (int j = 0; j <= Mcr[k].ny[q]; j++) { for (int i = 0; i <= Mcr[k].nx[q]; i++) { if (q == 1) { ii = i + shift_x[im]; jj = j + shift_y[jm] + (Mcr[k].ny[4] + 1); } if (q == 0) { ii = i + shift_x[im] + Mcr[k].nx[1] + 1; jj = j + shift_y[jm] + (Mcr[k].ny[4] + 1); } if (q == 3) { ii = i + shift_x[im] + Mcr[k].nx[1] + 1 + Mcr[k].nx[0] + 1; jj = j + shift_y[jm] + (Mcr[k].ny[4] + 1); } if (q == 2) { ii = i + shift_x[im] + Mcr[k].nx[1] + 1; jj = j + shift_y[jm] + (Mcr[k].ny[4] + 1) + (Mcr[k].ny[0] + 1); } if (q == 4) { ii = i + shift_x[im] + Mcr[k].nx[1] + 1; jj = j + shift_y[jm]; } in = ii + OFFSET*jj; I[in] = 1; J[in] = it; J_back[it] = in; it++; } } } } } } void set_global_id_B() { nxg = 0, nyg = 0; for (int im = 0; im <= Mx; im++) nxg += Mcr[im].nx[0] + 1 + Mcr[im].nx[1] + 1 + Mcr[im].nx[3] + 1; for (int jm = 0; jm <= My; jm++) nyg += Mcr[jm*Moffset].ny[0] + 1 + Mcr[jm*Moffset].ny[2] + 1 + Mcr[jm*Moffset].ny[4] + 1; if (nxg != 0) nxg--; if (nyg != 0) nyg--; I = new int[(nxg + 1)*(nyg + 1)]; J = new int[(nxg + 1)*(nyg + 1)]; J_back = new int[TOTAL_SIZE]; n1 = new int[TOTAL_SIZE]; n2 = new int[TOTAL_SIZE]; n3 = new int[TOTAL_SIZE]; n4 = new int[TOTAL_SIZE]; t = new int[TOTAL_SIZE]; OFFSET = nxg + 1; for (unsigned int i = 0; i < (nxg + 1)*(nyg + 1); i++) { I[i] = 0; J[i] = -1; } for (unsigned int i = 0; i < TOTAL_SIZE; i++) { J_back[i] = -1; n1[i] = -1; n2[i] = -1; n3[i] = -1; n4[i] = -1; t[i] = -1; } int *shift_x, *shift_y; shift_x = new int[Mx + 1]; shift_y = new int[My + 1]; shift_x[0] = 0; shift_y[0] = 0; for (int im = 1; im <= Mx; im++) shift_x[im] = (Mcr[im - 1].nx[0] + 1 + Mcr[im - 1].nx[1] + 1 + Mcr[im - 1].nx[3] + 1 + shift_x[im - 1]); for (int jm = 1; jm <= My; jm++) shift_y[jm] = (Mcr[(jm - 1)*Moffset].ny[0] + 1 + Mcr[(jm - 1)*Moffset].ny[2] + 1 + Mcr[(jm - 1)*Moffset].ny[4] + 1 + shift_y[jm - 1]); if (Msize == 0) { printf("set_global_id , hop hey la la ley, stop it, bro, ya doin it wron' \n"); } unsigned int k, it = 0, in, ii, jj; for (int jm = 0; jm <= My; jm++) { for (int im = 0; im <= Mx; im++) { k = im + Moffset*jm; for (unsigned int q = 0; q < 5; q++) { if (Mcr[k].size[q] == 0) continue; for (int j = 0; j <= Mcr[k].ny[q]; j++) { for (int i = 0; i <= Mcr[k].nx[q]; i++) { if (q == 1) { ii = i + shift_x[im]; jj = j + shift_y[jm] + (Mcr[k].ny[4] + 1); } if (q == 0) { ii = i + shift_x[im] + Mcr[k].nx[1] + 1; jj = j + shift_y[jm] + (Mcr[k].ny[4] + 1); } if (q == 3) { ii = i + shift_x[im] + Mcr[k].nx[1] + 1 + Mcr[k].nx[0] + 1; jj = j + shift_y[jm] + (Mcr[k].ny[4] + 1); } if (q == 2) { ii = i + shift_x[im] + Mcr[k].nx[1] + 1; jj = j + shift_y[jm] + (Mcr[k].ny[4] + 1) + (Mcr[k].ny[0] + 1); } if (q == 4) { ii = i + shift_x[im] + Mcr[k].nx[1] + 1; jj = j + shift_y[jm]; } in = ii + OFFSET*jj; I[in] = 1; J[in] = it; J_back[it] = in; it++; } } } } } } void set_global_size_box(int input_nx, int input_ny) { nx = input_nx; nxg = nx; ny = input_ny; nyg = ny; offset = nx + 1; OFFSET = offset; TOTAL_SIZE = (input_nx + 1) * (input_ny + 1); } void set_global_id_box() { I = new int[(nx + 1)*(ny + 1)]; J = new int[(nx + 1)*(ny + 1)]; J_back = new int[TOTAL_SIZE]; OFFSET = nx + 1; for (unsigned int i = 0; i < (nx + 1)*(ny + 1); i++) { I[i] = 0; J[i] = -1; } for (unsigned int i = 0; i < TOTAL_SIZE; i++) { J_back[i] = -1; } unsigned int k, it = 0; // in, ii, jj; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; I[k] = 1; J[k] = k; J_back[k] = k; it++; } } } void set_type_box() { l = new int[TOTAL_SIZE]; t = new int[TOTAL_SIZE]; for (unsigned int i = 0; i < TOTAL_SIZE; i++) { l[i] = 0; t[i] = 0; } unsigned int k; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; if (i == 0) t[k] = 1; if (i == nx) t[k] = 3; if (j == 0) t[k] = 4; if (j == ny) t[k] = 2; iter++; } } } void set_neighbor_box() { n1 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n2 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n3 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n4 = (int*)malloc(TOTAL_SIZE * sizeof(int)); for (unsigned int i = 0; i < TOTAL_SIZE; i++) { n1[i] = -1; n2[i] = -1; n3[i] = -1; n4[i] = -1; } unsigned int k, it = 0; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; if (t[k] == 0) { n1[k] = k - 1; n2[k] = k + offset; n3[k] = k + 1; n4[k] = k - offset; } if (t[k] == 2) { n1[k] = k - 1; n3[k] = k + 1; n4[k] = k - offset; } if (t[k] == 4) { n1[k] = k - 1; n2[k] = k + offset; n3[k] = k + 1; } if (t[k] == 9 || t[k] == 1) { n3[k] = k + 1; n4[k] = k - offset; n2[k] = k + offset; } if (t[k] == 10 || t[k] == 3) { n1[k] = k - 1; n4[k] = k - offset; n2[k] = k + offset; } if (i == nxg) { n3[k] = -1; } if (i == 0) { n1[k] = -1; } if (j == nyg) { n2[k] = -1; } if (j == 0) { n4[k] = -1; } it++; } } } void set_type_tube() { l = new int[TOTAL_SIZE]; t = new int[TOTAL_SIZE]; for (unsigned int i = 0; i < TOTAL_SIZE; i++) { l[i] = 0; t[i] = 0; } unsigned int k; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; if (i == 0) t[k] = 9; if (i == nx) t[k] = 10; if (j == 0) t[k] = 4; if (j == ny) t[k] = 2; iter++; } } } void write_field(double *f, string file_name, double time, int step) { #ifdef __linux__ ofstream to_file(("fields/" + file_name + ".dat").c_str()); #endif #ifdef _WIN32 ofstream to_file(("fields\\" + file_name + ".dat").c_str()); #endif int l, L; to_file << time << endl; for (unsigned int j = 0; j <= nyg; j = j + step) { for (unsigned int i = 0; i <= nxg; i = i + step) { l = i + OFFSET*j; L = J[l]; //if (J[l] == J[l]) to_file << i << " " << j << " " << f[L] << endl; if (I[l] == 1) { //to_file << i << " " << j << " " << f[L] << " " << t[L] << " " << L << " " << n1[L] << " " << n2[L] << " " << n3[L] << " " << n4[L] << endl; to_file << i << " " << j << " " << f[L] << endl; } else { to_file << "skip" << endl; //to_file << i << " " << j << " " << NAN << endl; //to_file << i << " " << j << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << endl; } } } to_file.close(); } void write_linear_profile(string file_name, string head, double time, int step, double hx, double **f, int N_fields, int j_ = -1) { #ifdef __linux__ ofstream to_file(("horizontal_profile/" + file_name + ".dat").c_str()); #endif #ifdef _WIN32 ofstream to_file(("horizontal_profile\\" + file_name + ".dat").c_str()); #endif int l, L; to_file << head << " t=" << time << endl; //for (unsigned int j = 0; j <= nyg; j = j + step) { unsigned int j = nyg / 2; if (j_ > -1) j = j_; for (unsigned int i = 0; i <= nxg; i = i + step) { l = i + OFFSET*j; L = J[l]; //if (J[l] == J[l]) to_file << i << " " << j << " " << f[L] << endl; if (I[l] == 1) { to_file << i << " " << hx*i; for (int k = 0; k < N_fields; k++) { to_file << " " << f[k][L]; } to_file << endl; } else { to_file << "skip" << endl; //to_file << i << " " << j << " " << NAN << endl; //to_file << i << " " << j << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << endl; } } //} to_file.close(); } void write_section_profile(string file_name, string head, double time, int step, double hy, double **f, int N_fields, unsigned int i) { #ifdef __linux__ ofstream to_file(("vertical_profile/" + file_name + ".dat").c_str()); #endif #ifdef _WIN32 ofstream to_file(("vertical_profile\\" + file_name + ".dat").c_str()); #endif int l, L; to_file << head << " t=" << time << endl; for (unsigned int j = 0; j <= nyg; j = j + step) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { to_file << j << " " << hy*j; for (int k = 0; k < N_fields; k++) { to_file << " " << f[k][L]; } to_file << endl; } else { to_file << "skip" << endl; } } to_file.close(); } void write_field_tecplot(double blank, double hx, double hy, string file_name, double time, int step, int iter, double **f, unsigned int N_fields, string head) { ofstream to_file; if (iter == 1) to_file.open((file_name + ".dat").c_str()); else to_file.open((file_name + ".dat").c_str(), ofstream::app); //make time to be string type stringstream ss; ss << time; string str_time = ss.str(); //count the number of x and y elements unsigned int II = 0, JJ = 0; for (unsigned int j = 0; j <= nyg; j = j + step) JJ++; for (unsigned int i = 0; i <= nxg; i = i + step) II++; //to_file << "VARIABLES=\"x\",\"y\",\"C\",\"mu\",\"vx\",\"vy\",\"p\"" << endl; to_file << head << endl; to_file << "ZONE T=\"" + str_time + "\", " << "I=" << II << ", J=" << JJ << endl; int l, L; //to_file << time << endl; for (unsigned int j = 0; j <= nyg; j = j + step) { for (unsigned int i = 0; i <= nxg; i = i + step) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { //to_file << hx*i << " " << hy*j << " " << C[L] << " " << mu[L] << " " << vx[L] << " " << vy[L] << " " << p[L] << endl; to_file << hx*i << " " << hy*j; for (int k = 0; k < N_fields; k++) { to_file << " " << f[k][L]; } to_file << endl; } else { to_file << hx*i << " " << hy*j; for (int k = 0; k < N_fields; k++) { to_file << " " << blank; } to_file << endl; } } } //to_file.close(); } //left to be normal void left_normal_in(int first, int last) { unsigned int k, it = 0; for (int jm = 0; jm <= My; jm++) { for (int im = 0; im <= Mx; im++) { k = im + Moffset*jm; for (unsigned int q = 0; q < 5; q++) { if (Mcr[k].size[q] == 0) continue; for (int j = 0; j <= Mcr[k].ny[q]; j++) { for (int i = 0; i <= Mcr[k].nx[q]; i++) { //if (Mcr[k].idx == 0 && Mcr[k].idy >= in_y) if (Mcr[k].idx == 0 && (Mcr[k].idy < first || Mcr[k].idy > last)) if (t[it] == 9) t[it] = 1; it++; } } } } } } void left_normal_out(int first, int last) { unsigned int k, it = 0; for (int jm = 0; jm <= My; jm++) { for (int im = 0; im <= Mx; im++) { k = im + Moffset*jm; for (unsigned int q = 0; q < 5; q++) { if (Mcr[k].size[q] == 0) continue; for (int j = 0; j <= Mcr[k].ny[q]; j++) { for (int i = 0; i <= Mcr[k].nx[q]; i++) { //if (Mcr[k].idx == Mx && Mcr[k].idy <= My - out_y) if (Mcr[k].idx == Mx && (Mcr[k].idy < first || Mcr[k].idy > last)) if (t[it] == 10) t[it] = 3; it++; } } } } } } void save(double *vx, double *vy, double *p, double *C, double *mu, unsigned int i_time, unsigned int i_write, double timeq, double kk, unsigned int extended = 0, double* vib = NULL) { ofstream to_file("recovery.dat"); ofstream to_file2("recovery2.dat"); to_file << i_time << " " << i_write << " " << timeq << " " << kk << endl; to_file2 << i_time << " " << i_write << " " << timeq << " " << kk << endl; for (unsigned int i = 0; i < TOTAL_SIZE; i++) { to_file << vx[i] << " " << vy[i] << " " << p[i] << " " << C[i] << " " << mu[i] << endl; } for (unsigned int i = 0; i < TOTAL_SIZE; i++) { to_file2 << vx[i] << " " << vy[i] << " " << p[i] << " " << C[i] << " " << mu[i] << endl; } to_file.close(); to_file2.close(); } void save(double** f, unsigned int n, unsigned int i_time, unsigned int i_write, double timeq, double kk) { ofstream to_file("recovery.dat"); ofstream to_file2("recovery2.dat"); to_file << i_time << " " << i_write << " " << timeq << " " << kk << endl; to_file2 << i_time << " " << i_write << " " << timeq << " " << kk << endl; for (unsigned int i = 0; i < TOTAL_SIZE; i++) { for (unsigned int k = 0; k < n; k++) { to_file << f[k][i] << " "; } to_file << endl; } for (unsigned int i = 0; i < TOTAL_SIZE; i++) { for (unsigned int k = 0; k < n; k++) { to_file2 << f[k][i] << " "; } to_file2 << endl; } to_file.close(); to_file2.close(); } void recover(double *vx, double *vy, double *p, double *C, double *mu, unsigned int &i_time, unsigned int &i_write, double &timeq, unsigned int &kk, unsigned int extended = 0, double *vib = NULL) { ifstream from_file("recovery.dat"); string str; string substr; stringstream ss; getline(from_file, str); ss << str; ss >> substr; i_time = atoi(substr.c_str()); ss >> substr; i_write = atoi(substr.c_str()); ss >> substr; timeq = atof(substr.c_str()); ss >> substr; kk = atoi(substr.c_str()); for (unsigned int i = 0; i < TOTAL_SIZE; i++) { getline(from_file, str); ss.str(""); ss.clear(); ss << str; ss >> substr; vx[i] = atof(substr.c_str()); ss >> substr; vy[i] = atof(substr.c_str()); ss >> substr; p[i] = atof(substr.c_str()); ss >> substr; C[i] = atof(substr.c_str()); if (extended) ss >> substr; vib[i] = atof(substr.c_str()); } from_file.close(); } void recover(double** f, unsigned int n, unsigned int& i_time, unsigned int& i_write, double& timeq, unsigned int& kk) { ifstream from_file("recovery.dat"); string str; string substr; stringstream ss; getline(from_file, str); ss << str; ss >> substr; i_time = atoi(substr.c_str()); ss >> substr; i_write = atoi(substr.c_str()); ss >> substr; timeq = atof(substr.c_str()); ss >> substr; kk = atoi(substr.c_str()); for (unsigned int i = 0; i < TOTAL_SIZE; i++) { getline(from_file, str); ss.str(""); ss.clear(); ss << str; for (unsigned int k = 0; k < n; k++) { ss >> substr; f[k][i] = atof(substr.c_str()); } } from_file.close(); } void read_concentration(double *C, string file_name, int column, int skip_lines = 1, int invert_C = 1) { ifstream from_file(file_name); string str; string substr; stringstream ss; for (int k = 0; k < skip_lines; k++) { getline(from_file, str); } for (unsigned int i = 0; i < TOTAL_SIZE; i++) { getline(from_file, str); ss.str(""); ss.clear(); ss << str; for (int k = 0; k < column; k++) { ss >> substr; } C[i] = atof(substr.c_str()); if (invert_C == 1) C[i] = C[i] * (-1); } from_file.close(); } void read_grid_geometry() { ifstream from_file("GRID.dat"); if (from_file.good()) { cout << endl << "GRID.dat has been read" << endl << endl; } string str; string substr; stringstream ss; getline(from_file, str); ss << str; ss >> substr; nx = nxg = atoi(substr.c_str()); ss >> substr; ny = nyg = atoi(substr.c_str()); ss >> substr; offset = OFFSET = atoi(substr.c_str()); ss >> substr; TOTAL_SIZE = atoi(substr.c_str()); getline(from_file, str); //head n1 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n2 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n3 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n4 = (int*)malloc(TOTAL_SIZE * sizeof(int)); J_back = (int*)malloc(TOTAL_SIZE * sizeof(int)); t = (int*)malloc(TOTAL_SIZE * sizeof(int)); I = new int[(nxg + 1) * (nyg + 1)]; J = new int[(nxg + 1) * (nyg + 1)]; int L, l; for (int j = 0; j <= nyg; j++) { for (int i = 0; i <= nxg; i++) { getline(from_file, str); ss.str(""); ss.clear(); ss << str; //read a line L = i + OFFSET * j; ss >> substr; I[L] = atoi(substr.c_str()); ss >> substr; // i ss >> substr; // j ss >> substr; J[L] = atoi(substr.c_str()); if (I[L] == 1) { l = J[L]; ss >> substr; t[l] = atoi(substr.c_str()); ss >> substr; J_back[l] = atoi(substr.c_str()); ss >> substr; n1[l] = atoi(substr.c_str()); ss >> substr; n2[l] = atoi(substr.c_str()); ss >> substr; n3[l] = atoi(substr.c_str()); ss >> substr; n4[l] = atoi(substr.c_str()); } } } from_file.close(); } void linear_pressure(double *p, double hx, double hy, double cosA, double sinA, double Lx, double Ly, double coefficient = 1) { for (unsigned int l = 0; l < TOTAL_SIZE; l++) { p[l] = coefficient*((Lx - hx*iG(l))*cosA - (Ly - hy*jG(l))*sinA); } } //C0(qx,qy)=0.5d0*dtanh((qx*hx-0.5d0)/delta) void fill_gradually(double *C, double hx, double hy, double delta, double shift) { unsigned int i, j; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { i = iG(l); j = jG(l); C[l] = 0.5*tanh((i*hx - shift) / delta); } } void fill_with_sphere(double *C, double hx, double hy, double x0, double y0, double R0, double C_outer, double C_inner) { unsigned int i, j; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { i = iG(l); j = jG(l); double x = i*hx, y = j*hy; if (sqrt(pow(x - x0, 2) + pow(y - y0, 2)) < R0) { C[l] = C_inner; } else { C[l] = C_outer; } } } void fill_horizontal_way(double *C, double hx, double hy, double eq_C, double y0, double amp, double k, double delta) { unsigned int i, j; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { i = iG(l); j = jG(l); double x = i*hx, y = j*hy; C[l] = eq_C*(tanh((y - y0 - amp*cos(k*x)) / delta)); } } void fast_test_writing(double *f) { ofstream to_file("test_field.dat"); to_file << "i, j, f" << endl; int l, L; for (unsigned int j = 0; j <= nyg; j = j++) { for (unsigned int i = 0; i <= nxg; i = i++) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { to_file << i << " " << j << " " << f[L] << endl; } else { to_file << "skip" << endl; } } } to_file.close(); } double isoline(double hx, double hy, double *C, signed char *mark, double *fx, double *fy, double val) { //integer, intent(in) ::nx, ny // real * 8, intent(in) ::Lx, C(0:nx, 0 : ny), val //real * 8, intent(inout) ::fx(0:nx - 1, 0 : ny), fy(0:nx, 0 : ny - 1) //integer, intent(inout) ::mark(0:nx, 0 : ny) //integer qx, qy, i, j //real * 8 hx, hy, len //real(8), parameter::nan = transfer(-2251799813685248_int64, 1._real64) double len = 0; double nan = NAN; unsigned int i, j, ii, jj; unsigned int lr, lu, lru; //l right, up and right-up for (unsigned int l = 0; l < TOTAL_SIZE; l++) { fx[l] = nan; fy[l] = nan; } // int l = 0; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { if (C[l] < val) mark[l] = -1; else if (C[l] > val) mark[l] = +1; else mark[l] = 0; } for (unsigned int l = 0; l < TOTAL_SIZE; l++) { i = iG(l); j = jG(l); if (t[l] == 2 || t[l] == 3 || t[l] == 10 || t[l] == 6 || t[l] == 7) continue; if (t[n2[l]] == 10 || t[n4[l]] == 10) continue; if (n3[l] == -1 || n2[l] == -1) continue; ii = iG(n3[l]); jj = jG(n2[l]); //if (ii > nxg || jj > nyg) continue; //cout << "l " << l << endl; lr = n3[l]; lu = n2[l]; lru = n3[n2[l]]; if (abs(mark[l] + mark[lr] + mark[lu] + mark[lru]) == 4) continue; else { //case a //************ //************ //************ //************ // if (mark[l] == 0 && mark[lr] == 0) { fy[l] = hy*j; fy[lr] = hy*j; len = len + hx; continue; } //case b //| *********** //| *********** //| *********** //| *********** //| *********** if (mark[l] == 0 && mark[lu] == 0) { fx[l] = hx*i; fx[lu] = hx*i; len = len + hy; continue; } //case 1 //************ //************ // //************ //************ if (mark[l] * mark[lu] <= 0 && mark[lr] * mark[lru] <= 0 && mark[l] * mark[lu] + mark[lr] * mark[lru] != 0) { fy[l] = (val - C[l])*hy / (C[lu] - C[l]) + hy*j; //left fy[lr] = (val - C[lr])*hy / (C[lru] - C[lr]) + hy*j; //right len = len + sqrt(hx*hx + pow(fy[lr] - fy[l], 2)); continue; } //case 2 //***** | ****** //***** | ****** //***** | ****** //***** | ****** //***** | ****** if (mark[l] * mark[lr] <= 0 && mark[lu] * mark[lru] <= 0 && mark[l] * mark[lr] + mark[lu] * mark[lru] != 0) { fx[l] = (val - C[l])*hx / (C[lr] - C[l]) + hx*i; //down fx[lu] = (val - C[lu])*hx / (C[lru] - C[lu]) + hx*i; //up len = len + sqrt(pow(fx[lu] - fx[l], 2) + hy*hy); continue; } //case 3 //***** | ****** //***** | ****** //****** //************ //************ if (mark[l] * mark[lu] <= 0 && mark[lu] * mark[lru] <= 0 && mark[l] * mark[lu] + mark[lu] * mark[lru] != 0) { fx[lu] = (val - C[lu])*hx / (C[lru] - C[lu]) + hx*i; //up fy[l] = (val - C[l])*hy / (C[lu] - C[l]) + hy*j; //left len = len + sqrt(pow(fx[lu] - hx*i, 2) + pow(fy[l] - hy*(jj), 2)); continue; } //case 4 //***** | ****** //***** | ****** //***** //************ //************ if (mark[lr] * mark[lru] <= 0 && mark[lu] * mark[lru] <= 0 && mark[lr] * mark[lru] + mark[lu] * mark[lru] != 0) { fx[lu] = (val - C[lu])*hx / (C[lru] - C[lu]) + hx*i; //up fy[lr] = (val - C[lr])*hy / (C[lru] - C[lr]) + hy*j; //right len = len + sqrt(pow(fx[lu] - hx*(ii), 2) + pow(fy[lr] - hy*(jj), 2)); continue; } //case 5 //************ //************ //****** //***** | ****** //***** | ****** if (mark[l] * mark[lr] <= 0 && mark[lr] * mark[lru] <= 0 && mark[l] * mark[lr] + mark[lr] * mark[lru] != 0) { fy[lr] = (val - C[lr])*hy / (C[lru] - C[lr]) + hy*j; //right fx[l] = (val - C[l])*hx / (C[lr] - C[l]) + hx*i; //down len = len + sqrt(pow(fx[l] - hx*(ii), 2) + pow(fy[lr] - hy*j, 2)); continue; } //case 6 //************ //************ //****** //***** | ****** //***** | ****** if (mark[l] * mark[lr] <= 0 && mark[l] * mark[lu] <= 0 && mark[l] * mark[lr] + mark[l] * mark[lu] != 0) { fy[l] = (val - C[l])*hy / (C[lu] - C[l]) + hy*j; //left fx[l] = (val - C[l])*hx / (C[lr] - C[l]) + hx*i; //down len = len + sqrt(pow(fx[l] - hx*i, 2) + pow(fy[l] - hy*j, 2)); continue; } }//end of main if } return len; } double volume(double hx, double hy, double *C, double lim) { double vol = 0; //unsigned int i, j; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { if (t[l] == 2 || t[l] == 3 || t[l] == 10 || t[l] == 6 || t[l] == 7) continue; if (abs(C[l]) < lim) vol += hx*hy; } return vol; } double change_sign_at_X(double hx, double hy, double *F, unsigned int j) { int l1, l2, L1, L2; double x = 0; double F_ = 0; //unsigned int j = nyg / 2; l1 = 0 + OFFSET*j; L1 = J[l1]; for (unsigned int i = 1; i <= nxg; i++) { l2 = i + OFFSET*j; L2 = J[l2]; if (I[l2] == 1) { if (F[L2] * F[L1] <= 0) { x = hx*(F_ - F[L1]) / (F[L2] - F[L1]) + ((i - 1)*hx); return x; } L1 = L2; } else { cout << "not a good sign you see it" << endl; exit(0); } } return x; } double pressure_jump(double hx, double hy, double *p, double x_, double border_width) { int l, L; unsigned int j = nyg / 2; double P1 = 0, P2 = 0; int i1, i2, n1 = 0, n2 = 0; i1 = (int)((x_ - border_width) / hx); i2 = (int)((x_ + border_width) / hx); for (unsigned int i = 0; i <= nxg; i++) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { if (i < i1) { P1 += p[L]; n1++; } if (i > i2) { P2 += p[L]; n2++; } } else { cout << "not a good sign you see it" << endl; exit(0); } } P1 = P1 / n1; P2 = P2 / n2; double ret = abs(P2 - P1); if (!std::isfinite(ret)) return 0.0; else return ret; } double flow_rate(double hx, double hy, double *vx, double Ly, unsigned int i) { int l, L; double Q = 0.0; for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { Q += vx[L]; } else { cout << "not a good sign you see it" << endl; exit(0); } } Q = Q * hy / Ly; return Q; } double tension(double hx, double hy, double *C) { double ten = 0; //unsigned int lr, lu, lru; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { if (t[l] == 0) { ten += 0.25 / hx / hx*pow(C[n3[l]] - C[n1[l]], 2) + 0.25 / hy / hy*pow(C[n2[l]] - C[n4[l]], 2); } } return ten*hx*hy; } void X_averaged_in_each_phase(double hx, double hy, double *C, double *X, double &X1av, double &X2av, double &Xav, double level = 0.0) { Xav = 0; X1av = 0; /*plus*/ X2av = 0; /*minus*/ unsigned int n = 0, n2 = 0, n_plus = 0, n2_plus = 0, n_minus = 0, n2_minus = 0; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { if (t[l] == 0) { Xav += X[l]; n++; if (C[l] > level) { X1av += X[l]; n_plus++; } if (C[l] < -level) { X2av += X[l]; n_minus++; } } else { Xav += X[l] / 2; n2++; if (C[l] > level) { X1av += X[l] / 2; n2_plus++; } if (C[l] < -level) { X2av += X[l] / 2; n2_minus++; } } } if (n + n2 > 0) Xav /= (n + 0.5*n2); if (n_plus + n2_plus > 0) X1av /= (n_plus + 0.5*n2_plus); if (n_minus + n2_minus > 0) X2av /= (n_minus + 0.5*n2_minus); } #define DX(F) 0.5 / hx * (F[n3[l]] - F[n1[l]]) #define DY(F) 0.5 / hy * (F[n2[l]] - F[n4[l]]) #define DX2(F) 1.0 / (hx * hx) * (F[n3[l]] + F[n1[l]] - 2.0 * F[l]) #define DY2(F) 1.0 / (hy * hy) * (F[n2[l]] + F[n4[l]] - 2.0 * F[l]) //#define DXY(F) (-F[l - 1 + OFFSET] + F[l + 1 + OFFSET] - F[l + 1 - OFFSET] + f[l - 1 - OFFSET]) / hx / hy / 4.0; #define DXY(F) (-F[n1[n2[l]]] + F[n3[n2[l]]] - F[n3[n4[l]]] + F[n1[n4[l]]]) / hx / hy / 4.0 void curvature_direct(double *C, double hx, double hy, double *curv, double add = 0.0) { double dCx, dCy, abs_dC; unsigned int i, j; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { i = iG(l); j = jG(l); if (t[l] == 0) { dCx = DX(C); dCy = DY(C); abs_dC = sqrt(dCx*dCx + dCy*dCy); double abs_dC3 = abs_dC*abs_dC*abs_dC + add; curv[l] = (dCx*dCx*DY2(C) + dCy*dCy*DX2(C) - 2.0*dCx*dCy*DXY(C)) / abs_dC3; //if (abs_dC < 1e-6) curv[l] = 0; } else { curv[l] = 0.0; } } } void curvature_direct2(double *C, double hx, double hy, double *curv) { double dCx, dCy, abs_dC; unsigned int i, j; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { i = iG(l); j = jG(l); if (t[l] == 0) { dCx = DX(C); dCy = DY(C); abs_dC = sqrt(dCx*dCx + dCy*dCy); double abs_dC3 = abs_dC*abs_dC*abs_dC; curv[l] = (dCx*dCx*DY2(C) + dCy*dCy*DX2(C) - 2.0*dCx*dCy*DXY(C)) / abs_dC3; if (abs_dC < 1e-6) curv[l] = 0; } else { curv[l] = 0.0; } } } void curvature_2_steps(double *C, double *nx, double *ny, double hx, double hy, double *curv) { //1 for (unsigned int l = 0; l < TOTAL_SIZE; l++) { if (t[l] == 0) { double dCx = DX(C); double dCy = DY(C); double abs_dC = sqrt(dCx*dCx + dCy*dCy) + 0.001; nx[l] = dCx / abs_dC; ny[l] = dCy / abs_dC; } else { nx[l] = 0.0; ny[l] = 0.0; } } //2 for (unsigned int l = 0; l < TOTAL_SIZE; l++) { if (t[l] == 0) { curv[l] = DX(nx) + DY(ny); } else { curv[l] = 0.0; } } } void check() { int l, L; ofstream write("geomSettingCheck.txt"); write << "i, j, 1, L, t[L], n1[L], n2[L], n3[L], n4[L]" << endl; for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (I[i + OFFSET*j] == 1) write << i << " " << j << " " << 1 << " " << L << " " << t[L] << " " << n1[L] << " " << n2[L] << " " << n3[L] << " " << n4[L] << endl; else write << i << " " << j << " " << -1 << " " << -1 << " " << -1 << " " << -1 << " " << -1 << " " << -1 << " " << -1 << endl; } } write.close(); } unsigned int checkExit(double *C) { for (unsigned int l = 0; l < TOTAL_SIZE; l++) { if (t[l] == 10) { if (C[l] < 0) return 1; } } return 0; } }; struct multi_line { int *l, *t, *I, *J, *J_back; unsigned int line_N; // number of lines of the whole porous media unsigned int tube_N; // number of tubes per line unsigned int iter = 0; unsigned int TOTAL_SIZE = 0; unsigned int OFFSET; int *n1, *n2, *n3, *n4; unsigned int nxg, nyg; double *C0, *C, *p, *p0, *ux, *uy, *vx, *vy, *mu; unsigned int x; //length of porous block unsigned int y; //width of porous block unsigned int z; //width of capillary tube unsigned int line_x; //length of line unsigned int line_y; //width of line double hx, hy, Lx, Ly; unsigned int *gi, *gj; unsigned int shiftX = 0, shiftY = 0; vector <unsigned int> li, lj; void generate_levels(unsigned int x_in, unsigned int y_in, unsigned int z_in, unsigned int N_in, unsigned int tube_in) { x = x_in; y = y_in; z = z_in; line_N = N_in; tube_N = tube_in; line_x = 2 * (x + z); line_y = tube_N*(z + y) - y; hy = 1.0 / z; hx = hy; Lx = line_x*hx; Ly = line_y*hy; nyg = line_y; nxg = line_N*line_x + x; OFFSET = nxg + 1; J = new int[(nxg + 1)*(nyg + 1)]; I = new int[(nxg + 1)*(nyg + 1)]; for (unsigned int i = 0; i < (nxg + 1)*(nyg + 1); i++) { J[i] = -1; I[i] = 0; } // zero-level while (shiftY < line_y) { for (unsigned int j = 0; j <= z; j++) { for (unsigned int i = 0; i <= x - 1; i++) { //gi[i] = 1; gj[j] = 1; li.push_back(i + shiftX); lj.push_back(j + shiftY); iter++; } } shiftY += y + z; } cout << iter << endl; // main levels for (unsigned int C = 1; C <= line_N; C++) { //column shiftX += x; for (unsigned int i = 0; i <= z; i++) { for (unsigned int j = 0; j <= line_y; j++) { li.push_back(i + shiftX); lj.push_back(j); iter++; } } //blocks shiftX += z; shiftY = (y + z) / 2; while (shiftY < line_y) { for (unsigned int i = 1; i <= x - 1; i++) { for (unsigned int j = 0; j <= z; j++) { li.push_back(i + shiftX); lj.push_back(j + shiftY); iter++; } } shiftY += y + z; } //column shiftX += x; for (unsigned int i = 0; i <= z; i++) { for (unsigned int j = 0; j <= line_y; j++) { li.push_back(i + shiftX); lj.push_back(j); iter++; } } //blocks shiftX += z; shiftY = 0; while (shiftY < line_y) { for (unsigned int j = 0; j <= z; j++) { for (unsigned int i = 1; i <= x - 1; i++) { //gi[i] = 1; gj[j] = 1; li.push_back(i + shiftX); lj.push_back(j + shiftY); iter++; } if (C == line_N) { li.push_back(x + shiftX); lj.push_back(j + shiftY); iter++; } } shiftY += y + z; } } TOTAL_SIZE = iter; J_back = new int[TOTAL_SIZE]; n1 = new int[TOTAL_SIZE]; n2 = new int[TOTAL_SIZE]; n3 = new int[TOTAL_SIZE]; n4 = new int[TOTAL_SIZE]; t = new int[TOTAL_SIZE]; for (unsigned int i = 0; i < TOTAL_SIZE; i++) { n1[i] = -1; n2[i] = -1; n3[i] = -1; n4[i] = -1; t[i] = -1; } for (unsigned int i = 0; i < iter; i++) { J_back[i] = li[i] + OFFSET*lj[i]; J[J_back[i]] = i; I[J_back[i]] = 1; } } void set_neighbor() { int l, L, l1, l2, l3, l4; for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; l1 = i - 1 + OFFSET*j; l2 = i + OFFSET*j + OFFSET; l3 = i + 1 + OFFSET*j; l4 = i + OFFSET*j - OFFSET; if (I[l] == 1) { if (i > 0) if (I[l1] == 1) n1[L] = J[l1]; if (i < nxg) if (I[l3] == 1) n3[L] = J[l3]; if (j < nyg) if (I[l2] == 1) n2[L] = J[l2]; if (j > 0) if (I[l4] == 1) n4[L] = J[l4]; } else { } } } } void set_type() { int l, L; //int l1, l2, l3, l4; //inner for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { if (n1[L] != -1 && n2[L] != -1 && n3[L] != -1 && n4[L] != -1) t[L] = 0; } } } //rigid walls for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; // l1 = i - 1 + OFFSET*j; l2 = i + OFFSET*j + OFFSET; l3 = i + 1 + OFFSET*j; l4 = i + OFFSET*j - OFFSET; if (I[l] == 1) { if (I[l] == 1) { if (n1[L] == -1 && n2[L] != -1 && n3[L] != -1 && n4[L] != -1) t[L] = 1; if (n1[L] != -1 && n2[L] == -1 && n3[L] != -1 && n4[L] != -1) t[L] = 2; if (n1[L] != -1 && n2[L] != -1 && n3[L] == -1 && n4[L] != -1) t[L] = 3; if (n1[L] != -1 && n2[L] != -1 && n3[L] != -1 && n4[L] == -1) t[L] = 4; } } } } //corners for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { if (n2[n1[L]] == -1 && n1[L] != -1 && n2[L] != -1) t[L] = 5; if (n2[n3[L]] == -1 && n3[L] != -1 && n2[L] != -1) t[L] = 6; if (n3[n4[L]] == -1 && n3[L] != -1 && n4[L] != -1) t[L] = 7; if (n1[n4[L]] == -1 && n1[L] != -1 && n4[L] != -1) t[L] = 8; } } } //inlet, outlet for (unsigned int i = 0; i <= nxg; i = i + nxg) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { if (i == 0) { t[L] = 9; if (t[n3[L]] == 2) t[L] = 2; if (t[n3[L]] == 4) t[L] = 4; } if (i == nxg) { t[L] = 10; if (t[n1[L]] == 2) t[L] = 2; if (t[n1[L]] == 4) t[L] = 4; } } } } } void check() { int l, L; ofstream write("out.txt"); for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (I[i + OFFSET*j] == 1) write << i << " " << j << " " << 1 << " " << L << " " << t[L] << " " << n1[L] << " " << n2[L] << " " << n3[L] << " " << n4[L] << endl; else write << i << " " << j << " " << -1 << " " << -1 << " " << -1 << " " << -1 << " " << -1 << " " << -1 << " " << -1 << endl; } } write.close(); } void write_field(double *f, string file_name, double time, int step) { #ifdef __linux__ ofstream to_file(("fields/" + file_name + ".dat").c_str()); #endif #ifdef _WIN32 ofstream to_file(("fields\\" + file_name + ".dat").c_str()); #endif int l, L; to_file << time << endl; for (unsigned int j = 0; j <= nyg; j = j + step) { for (unsigned int i = 0; i <= nxg; i = i + step) { l = i + OFFSET*j; L = J[l]; //if (J[l] == J[l]) to_file << i << " " << j << " " << f[L] << endl; if (I[l] == 1) { //to_file << i << " " << j << " " << f[L] << " " << t[L] << " " << L << " " << n1[L] << " " << n2[L] << " " << n3[L] << " " << n4[L] << endl; to_file << i << " " << j << " " << f[L] << endl; } else { to_file << "skip" << endl; //to_file << i << " " << j << " " << NAN << endl; //to_file << i << " " << j << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << endl; } } } to_file.close(); } void save(double *vx, double *vy, double *p, double *C, double *mu, unsigned int i_time, unsigned int i_write) { ofstream to_file("recovery.dat"); ofstream to_file2("recovery2.dat"); to_file << i_time << " " << i_write << endl; to_file2 << i_time << " " << i_write << endl; for (unsigned int i = 0; i < TOTAL_SIZE; i++) to_file << vx[i] << " " << vy[i] << " " << p[i] << " " << C[i] << " " << mu[i] << endl; for (unsigned int i = 0; i < TOTAL_SIZE; i++) to_file2 << vx[i] << " " << vy[i] << " " << p[i] << " " << C[i] << " " << mu[i] << endl; to_file.close(); to_file2.close(); } void recover(double *vx, double *vy, double *p, double *C, double *mu, unsigned int &i_time, unsigned int &i_write) { ifstream from_file("recovery.dat"); string str; string substr; stringstream ss; getline(from_file, str); ss << str; ss >> substr; i_time = atoi(substr.c_str()); ss >> substr; i_write = atoi(substr.c_str()); for (unsigned int i = 0; i < TOTAL_SIZE; i++) { getline(from_file, str); ss.str(""); ss.clear(); ss << str; ss >> substr; vx[i] = atof(substr.c_str()); ss >> substr; vy[i] = atof(substr.c_str()); ss >> substr; p[i] = atof(substr.c_str()); ss >> substr; C[i] = atof(substr.c_str()); } from_file.close(); } }; struct box { cross *Mcr; int *l, *t, *I, *J, *J_back; int Mx, My, Msize, Moffset, OFFSET; unsigned int iter = 0; unsigned int TOTAL_SIZE = 0; int *n1, *n2, *n3, *n4; unsigned int nx, ny, offset; unsigned int nxg, nyg; double *C0, *C, *p, *p0, *ux, *uy, *vx, *vy, *mu; double LX, LY; void set_global_size(int input_nx, int input_ny) { nx = input_nx; nxg = nx; ny = input_ny; nyg = ny; offset = nx + 1; OFFSET = offset; TOTAL_SIZE = (input_nx + 1) * (input_ny + 1); } void set_type() { l = new int[TOTAL_SIZE]; t = new int[TOTAL_SIZE]; for (unsigned int i = 0; i < TOTAL_SIZE; i++) { l[i] = 0; t[i] = 0; } unsigned int k; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; if (i == 0) t[k] = 9; if (i == nx) t[k] = 10; if (j == 0) t[k] = 4; if (j == ny) t[k] = 2; iter++; } } } void set_neighbor() { n1 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n2 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n3 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n4 = (int*)malloc(TOTAL_SIZE * sizeof(int)); for (unsigned int i = 0; i < TOTAL_SIZE; i++) { n1[i] = -1; n2[i] = -1; n3[i] = -1; n4[i] = -1; } unsigned int k, it = 0; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; if (t[k] == 0) { n1[k] = k - 1; n2[k] = k + offset; n3[k] = k + 1; n4[k] = k - offset; } if (t[k] == 2) n4[k] = k - offset; if (t[k] == 4) n2[k] = k + offset; if (t[k] == 9) { n3[k] = k + 1; n4[k] = k - offset; n2[k] = k + offset; } if (t[k] == 10) { n1[k] = k - 1; n4[k] = k - offset; n2[k] = k + offset; } it++; } } } void set_global_id() { I = new int[(nx + 1)*(ny + 1)]; J = new int[(nx + 1)*(ny + 1)]; J_back = new int[TOTAL_SIZE]; OFFSET = nx + 1; for (unsigned int i = 0; i < (nx + 1)*(ny + 1); i++) { I[i] = 0; J[i] = -1; } for (unsigned int i = 0; i < TOTAL_SIZE; i++) { J_back[i] = -1; } unsigned int k, it = 0; // , in, ii, jj; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; I[k] = 1; J[k] = k; J_back[k] = k; it++; } } } void write_field(double *f, string file_name, double time, int step) { #ifdef __linux__ ofstream to_file(("fields/" + file_name + ".dat").c_str()); #endif #ifdef _WIN32 ofstream to_file(("fields\\" + file_name + ".dat").c_str()); #endif unsigned int l, L; to_file << time << endl; for (unsigned int j = 0; j <= nyg; j = j + step) { for (unsigned int i = 0; i <= nxg; i = i + step) { l = i + OFFSET*j; L = J[l]; //if (J[l] == J[l]) to_file << i << " " << j << " " << f[L] << endl; if (I[l] == 1) { //to_file << i << " " << j << " " << f[L] << " " << t[L] << " " << L << " " << n1[L] << " " << n2[L] << " " << n3[L] << " " << n4[L] << " " << //(J_back[L] - (J_back[L] / OFFSET)*OFFSET) << " " << (J_back[L] / OFFSET) << endl; to_file << i << " " << j << " " << f[L] << endl; } else { to_file << "skip" << endl; //to_file << i << " " << j << " " << NAN << endl; //to_file << i << " " << j << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << endl; } } } to_file.close(); } void save(double *vx, double *vy, double *p, double *C, double *mu, unsigned int i_time, unsigned int i_write) { ofstream to_file("recovery.dat"); ofstream to_file2("recovery2.dat"); to_file << i_time << " " << i_write << endl; to_file2 << i_time << " " << i_write << endl; for (unsigned int i = 0; i < TOTAL_SIZE; i++) to_file << vx[i] << " " << vy[i] << " " << p[i] << " " << C[i] << " " << mu[i] << endl; for (unsigned int i = 0; i < TOTAL_SIZE; i++) to_file2 << vx[i] << " " << vy[i] << " " << p[i] << " " << C[i] << " " << mu[i] << endl; to_file.close(); to_file2.close(); } void recover(double *vx, double *vy, double *p, double *C, double *mu, unsigned int &i_time, unsigned int &i_write) { ifstream from_file("recovery.dat"); string str; string substr; stringstream ss; getline(from_file, str); ss << str; ss >> substr; i_time = atoi(substr.c_str()); ss >> substr; i_write = atoi(substr.c_str()); for (unsigned int i = 0; i < TOTAL_SIZE; i++) { getline(from_file, str); ss.str(""); ss.clear(); ss << str; ss >> substr; vx[i] = atof(substr.c_str()); ss >> substr; vy[i] = atof(substr.c_str()); ss >> substr; p[i] = atof(substr.c_str()); ss >> substr; C[i] = atof(substr.c_str()); } from_file.close(); } }; struct box_inherited :multi_cross { unsigned int nx, ny, offset; void set_global_size(int input_nx, int input_ny) { nx = input_nx; nxg = nx; ny = input_ny; nyg = ny; offset = nx + 1; OFFSET = offset; TOTAL_SIZE = (input_nx + 1) * (input_ny + 1); } void set_type() { l = new int[TOTAL_SIZE]; t = new int[TOTAL_SIZE]; for (unsigned int i = 0; i < TOTAL_SIZE; i++) { l[i] = 0; t[i] = 0; } unsigned int k; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; if (i == 0) t[k] = 9; if (i == nx) t[k] = 10; if (j == 0) t[k] = 4; if (j == ny) t[k] = 2; iter++; } } } void set_neighbor() { n1 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n2 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n3 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n4 = (int*)malloc(TOTAL_SIZE * sizeof(int)); for (unsigned int i = 0; i < TOTAL_SIZE; i++) { n1[i] = -1; n2[i] = -1; n3[i] = -1; n4[i] = -1; } unsigned int k, it = 0; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; if (t[k] == 0) { n1[k] = k - 1; n2[k] = k + offset; n3[k] = k + 1; n4[k] = k - offset; } if (t[k] == 2) n4[k] = k - offset; if (t[k] == 4) n2[k] = k + offset; if (t[k] == 9) { n3[k] = k + 1; n4[k] = k - offset; n2[k] = k + offset; } if (t[k] == 10) { n1[k] = k - 1; n4[k] = k - offset; n2[k] = k + offset; } it++; } } } void set_global_id() { I = new int[(nx + 1)*(ny + 1)]; J = new int[(nx + 1)*(ny + 1)]; J_back = new int[TOTAL_SIZE]; OFFSET = nx + 1; for (unsigned int i = 0; i < (nx + 1)*(ny + 1); i++) { I[i] = 0; J[i] = -1; } for (unsigned int i = 0; i < TOTAL_SIZE; i++) { J_back[i] = -1; } unsigned int k, it = 0; // in, ii, jj; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; I[k] = 1; J[k] = k; J_back[k] = k; it++; } } } }; __global__ void stupid_alloc(unsigned int TS) { n1 = new int[TS]; n2 = new int[TS]; n3 = new int[TS]; n4 = new int[TS]; t = new int[TS]; J_back = new int[TS]; } __global__ void stupid_swap(int *nn1, int *nn2, int *nn3, int *nn4, int *tt, int* JJ, unsigned int TS) { //unsigned int l = blockIdx.x*blockDim.x + threadIdx.x; for (unsigned int l = 0; l < TS; l++) { //printf("%i \n", l); n1[l] = nn1[l]; n2[l] = nn2[l]; n3[l] = nn3[l]; n4[l] = nn4[l]; t[l] = tt[l]; J_back[l] = JJ[l]; } } //this "stupid" step is designed to keep some objects in the global GPU scope //it is supposed to simplify some other parts of the code void stupid_step(int *nn1, int *nn2, int *nn3, int *nn4, int *tt, int *JJ, unsigned int TS) { /* , GPU*/ unsigned int TSB = TS * sizeof(int); int *n1_temp, *n2_temp, *n3_temp, *n4_temp, *t_temp, *J_temp; hipMalloc((void**)&n1_temp, TSB); hipMalloc((void**)&n2_temp, TSB); hipMalloc((void**)&n3_temp, TSB); hipMalloc((void**)&n4_temp, TSB); hipMalloc((void**)&t_temp, TSB); hipMalloc((void**)&J_temp, TSB); hipMemcpy(n1_temp, nn1, TSB, hipMemcpyHostToDevice); hipMemcpy(n2_temp, nn2, TSB, hipMemcpyHostToDevice); hipMemcpy(n3_temp, nn3, TSB, hipMemcpyHostToDevice); hipMemcpy(n4_temp, nn4, TSB, hipMemcpyHostToDevice); hipMemcpy(t_temp, tt, TSB, hipMemcpyHostToDevice); hipMemcpy(J_temp, JJ, TSB, hipMemcpyHostToDevice); stupid_alloc << <1, 1 >> > (TS); stupid_swap << <1, 1 >> > (n1_temp, n2_temp, n3_temp, n4_temp, t_temp, J_temp, TS); //so hipFree(n1_temp); hipFree(n2_temp); hipFree(n3_temp); hipFree(n4_temp); hipFree(t_temp); hipFree(J_temp); } //pressure transformation to real pressure void true_pressure(double *p, double *p_true, double *C, double *mu, int *t, int *n1, int *n2, int *n3, int *n4, int *J_back, double tau, unsigned int size, double hx, double hy, double Ca, double A, double Gr, double M, int OFFSET, double sinA, double cosA, unsigned int PHASE, double VV_h, double vibrX, double vibrY, double *Phi, double *Wx, double *Wy) { /* , , , , , , */ // double WX = 0.0, WY = 0.0; double dxC = 0, dyC = 0; int left, right, up, down, left2, right2, up2, down2; for (unsigned int l = 0; l < size; l++) { if (PHASE == 0) { p_true[l] = p[l]; continue; } left = n1[l]; right = n3[l]; up = n2[l]; down = n4[l]; if (left == -1) left = right; if (right == -1) right = left; if (up == -1) up = down; if (down == -1) down = up; left2 = n1[left]; right2 = n3[right]; up2 = n2[up]; down2 = n4[down]; p_true[l] = p[l] + (+mu[l] * C[l] - A*pow(C[l], 2) - pow(C[l], 4)) / M + C[l] * Gr*((J_back[l] - (J_back[l] / OFFSET)*OFFSET) * hx*cosA + (J_back[l] / OFFSET) * hy*sinA); if (VV_h > 0) { //WX = -C[l] * vibrX + 0.5*(C[right] - C[left]) / hx; //WY = -C[l] * vibrY + 0.5*(C[up] - C[down]) / hy; p_true[l] += -VV_h*0.5*(Wx[l] * Wx[l] + Wy[l] * Wy[l]); } switch (t[l]) { case 0: //inner p_true[l] += -0.5*Ca / M*( pow((0.5*(C[right] - C[left]) / hx), 2) + pow((0.5*(C[up] - C[down]) / hy), 2)); break; case 1: //left rigid p_true[l] += -0.5*Ca / M*( pow((-0.5*(3.0*C[l] - 4.0*C[right] + C[right2]) / hx), 2) + pow((0.5*(C[up] - C[down]) / hy), 2)); break; case 2: //upper rigid p_true[l] += -0.5*Ca / M*( pow((0.5*(C[right] - C[left]) / hx), 2) + pow((0.5*(3.0*C[l] - 4.0*C[down] + C[down2]) / hy), 2)); break; case 3: //right rigid p_true[l] += -0.5*Ca / M*( pow((0.5*(3.0*C[l] - 4.0*C[left] + C[left2]) / hx), 2) + pow((0.5*(C[up] - C[down]) / hy), 2)); break; case 4: //lower rigid p_true[l] += -0.5*Ca / M*( pow((0.5*(C[right] - C[left]) / hx), 2) + pow((-0.5*(3.0*C[l] - 4.0*C[up] + C[up2]) / hy), 2)); break; case 5: //left upper rigid corner p_true[l] += -0.5*Ca / M*( pow((-0.5*(3.0*C[l] - 4.0*C[right] + C[right2]) / hx), 2) + pow((0.5*(3.0*C[l] - 4.0*C[down] + C[down2]) / hy), 2)); break; case 6: //right upper rigid corner p_true[l] += -0.5*Ca / M*( pow((0.5*(3.0*C[l] - 4.0*C[left] + C[left2]) / hx), 2) + pow((0.5*(3.0*C[l] - 4.0*C[down] + C[down2]) / hy), 2)); break; case 7: //right lower rigid corner p_true[l] += -0.5*Ca / M*( pow((0.5*(3.0*C[l] - 4.0*C[left] + C[left2]) / hx), 2) + pow((-0.5*(3.0*C[l] - 4.0*C[up] + C[up2]) / hy), 2)); break; case 8: //left lower rigid corner p_true[l] += -0.5*Ca / M*( pow((-0.5*(3.0*C[l] - 4.0*C[right] + C[right2]) / hx), 2) + pow((-0.5*(3.0*C[l] - 4.0*C[up] + C[up2]) / hy), 2)); break; case 9: //inlet (from left) dxC = -0.5*(3.0*C[l] - 4.0*C[right] + C[right2]) / hx; dyC = 0.5*(C[up] - C[down]) / hy; p_true[l] += -0.5*Ca / M*(pow(dxC, 2) + pow(dyC, 2)); break; case 10://outlet (to right) dxC = 0.5*(3.0*C[l] - 4.0*C[left] + C[left2]) / hx; dyC = 0.5*(C[up] - C[down]) / hy; p_true[l] += -0.5*Ca / M*(pow(dxC, 2)+ pow(dyC, 2)); break; default: break; } } } void signalHandler(int signum) { cout << "Interrupt signal (" << signum << ") received.\n"; cout << "state: " << state << endl; // cleanup and close up stuff here // terminate program exit(signum); } void create_folder(string name) { #ifdef __linux__ string str = "mkdir -p " + name + "/"; system(str.c_str()); #endif #ifdef _WIN32 CreateDirectoryA(name.c_str(), NULL); #endif } int main(int argc, char **argv) { state = 0; signal(SIGINT, signalHandler); cout << "Version: " << ThisSoftwareVersion << endl; cout << "Compilation time: " << __DATE__ << " " << __TIME__ << endl; cout << "command line: " << endl; for (int i = 0; i < argc; i++) cout << i << ": " << argv[i] << endl; int devID = 0, deviceCount = 0; hipGetDeviceCount(&deviceCount); if (deviceCount == 0) cout << "there is no detected GPU" << endl; double heap_GB = 1.0; double timer1, timer2; double pi = 3.1415926535897932384626433832795; double eps0 = 1e-5; double *C0, *C, *p, *p0, *ux, *uy, *vx, *vy, *mu, *zero, *Phi, *Phi0, *WX, *WY; //_d - device (GPU) double *C_h, *p_h, *vx_h, *vy_h, *mu_h, *p_true_h, *zero_h, *Phi_h, *WX_h, *WY_h; //_h - host (CPU) double *curv1, *curv2, *nx_dC, *ny_dC; double *psiav_array, *psiav_array_Phi; // temporal variables //psiav0_h, eps_h *psiav_d, *psiav_array_h, *psiav_h; double hx_h, hy_h, Lx_h, Ly_h, tau_h, tau_p_h, psiav, psiav0, eps, A_h, Ca_h, Gr_h, Pe_h, Re_h, MM_h, dP_h, Gs_h; //parameters double alpha_h, sinA_h, cosA_h, theta_h, sinTh_h, cosTh_h; double Ek, Ek_old, Vmax, Q_in, Q_out, C_average, Cv; unsigned int nx_h, ny_h, Matrix_X, Matrix_Y, iter = 0, offset_h, Phi_kk, kk, k = 0, tt, write_i = 0, each = 1, stop = 0; //parameters double time_fields, time_recovery, time_display; double timeq = 0.0, C_av, C_plus, C_minus; double tecplot, limit_timeq; bool copied = false; unsigned int linear_pressure, fill_gradually, wetting, read_C, stop_at_exit; unsigned int sphere_distribution, curv_calc, vibration, simple_geometry = 0; double fill_gradually_x; unsigned int reset_timeq, invert_initial_C, reset_velocity, reset_pressure; unsigned int PHASE_h, DIFFUSION_h; string geometry; //1 is 'yes' / true, 0 is 'no' / false //unsigned int clean_fields; unsigned int picture_switch = 1; //write fields to a file? unsigned int read_switch = 1; //read to continue or not? //reading_parameters(ny_h, nx_h, each_t, each, Matrix_X, Matrix_Y, tau_h, A_h, Ca_h, Gr_h, Pe_h, Re_h, alpha_h, MM_h, tecplot, PHASE_h); create_folder("fields"); string file_name = "inp.dat"; if (argc == 2) file_name = argv[1]; ReadingFile File(file_name); #define constUint(VAR) \ unsigned int VAR##_h; File.reading<unsigned int>(VAR##_h, #VAR, 0); \ hipMemcpyToSymbol(VAR, &VAR##_h, sizeof(unsigned int), 0, hipMemcpyHostToDevice); #define constDouble(VAR) \ double VAR##_h; File.reading<double>(VAR##_h, #VAR, 0.0); \ hipMemcpyToSymbol(VAR, &VAR##_h, sizeof(double), 0, hipMemcpyHostToDevice); File.reading<unsigned int>(ny_h, "ny", 200); File.reading<unsigned int>(nx_h, "nx", 200); File.reading<double>(time_fields, "time_fields", 0.5); File.reading<double>(time_recovery, "time_recovery", 0.5); File.reading<double>(time_display, "time_display", 0.1); File.reading<unsigned int>(each, "each_xy", 10); File.reading<unsigned int>(Matrix_X, "Matrix_X", 3); File.reading<unsigned int>(Matrix_Y, "Matrix_Y", 3); File.reading<double>(tau_h, "tau", 5.0e-5); File.reading<double>(A_h, "A", -0.5); if (File.reading<double>(Ca_h, "Ca", 4e-4) == 0) File.reading<double>(Ca_h, "Cn", 4e-4); File.reading<double>(Gr_h, "Gr", 0.0); if (File.reading<double>(Pe_h, "Pe", 1e+4) == 0) File.reading<double>(Pe_h, "Sc", 1e+4); File.reading<double>(Re_h, "Re", 1.0); File.reading<double>(alpha_h, "alpha", 0.0); File.reading<double>(theta_h, "theta", 90.0); File.reading<double>(MM_h, "MM", 1.0); File.reading<double>(Gs_h, "Gs", 0.0); File.reading<double>(dP_h, "dP", 1.0); File.reading<double>(tecplot, "tecplot", 10000); File.reading<unsigned int>(PHASE_h, "Phase_field", 1, 0, 1); File.reading<unsigned int>(read_switch, "read_recovery", 1, 0, 1); File.reading<unsigned int>(picture_switch, "picture_switch", 1, 0, 1); File.reading<double>(limit_timeq, "time_limit", 5000.0); File.reading<unsigned int>(linear_pressure, "linear_pressure", 0, 0, 1); File.reading<unsigned int>(fill_gradually, "fill_gradually", 0, 0, 1); File.reading<double>(fill_gradually_x, "fill_gradually_x", 0.5); File.reading<unsigned int>(DIFFUSION_h, "pure_diffusion", 0, 0, 1); File.reading<unsigned int>(wetting, "wetting", 0, 0, 4); File.reading_string(geometry, "geometry", "matrix"); File.reading<unsigned int>(reset_timeq, "reset_time", 0, 0, 1); File.reading<unsigned int>(invert_initial_C, "invert_C", 0, 0, 1); File.reading<unsigned int>(reset_velocity, "reset_velocity", 0, 0, 1); File.reading<unsigned int>(reset_pressure, "reset_pressure", 0, 0, 1); File.reading<double>(heap_GB, "heap_GB", 1.0); File.reading<int>(devID, "GPU_id", 0, 0, deviceCount - 1); File.reading<unsigned int>(read_C, "read_concentration", 0, 0, 1); File.reading<unsigned int>(stop_at_exit, "stop_at_exit", 0, 0, 1); File.reading<unsigned int>(sphere_distribution, "sphere", 0, 0, 1); File.reading<unsigned int>(curv_calc, "curv_calc", 0, 0, 1); unsigned int horizontal_profile; File.reading<unsigned int>(horizontal_profile, "horizontal_profile", 0, 0, 1); if (horizontal_profile) create_folder("horizontal_profile"); unsigned int vertical_profile; File.reading<unsigned int>(vertical_profile, "vertical_profile", 0, 0, 1); if (vertical_profile) create_folder("vertical_profile"); File.reading<double>(Lx_h, "Lx", 0.0); File.reading<double>(Ly_h, "Ly", 0.0); File.reading<unsigned int>(vibration, "vibration", 0, 0, 3); double Amp_h; File.reading<double>(Amp_h, "Amp", 0.0); double Omega_h; File.reading<double>(Omega_h, "Omega", 0.0); double vibr_X_h; File.reading<double>(vibr_X_h, "vibr_X", 0.0, 0.0, 1.0); double vibr_Y_h; File.reading<double>(vibr_Y_h, "vibr_Y", 0.0, 0.0, 1.0); double VV_h; File.reading<double>(VV_h, "VV", 0.0); if (vibration == 0) { VV_h = 0; } unsigned int integrals_add1; File.reading<unsigned int>(integrals_add1, "integrals_add1", 0); string filling; File.reading_string(filling, "filling", "no"); //unsigned int W_BORDER_h; File.reading<unsigned int>(W_BORDER_h, "W_BORDER", 0); //hipMemcpyToSymbol(W_BORDER, &W_BORDER_h, sizeof(unsigned int), 0, hipMemcpyHostToDevice); //unsigned int PHI_BORDER_LEFT_h; File.reading<unsigned int>(PHI_BORDER_LEFT_h, "PHI_BORDER_LEFT", 0); //hipMemcpyToSymbol(PHI_BORDER_LEFT, &PHI_BORDER_LEFT_h, sizeof(unsigned int), 0, hipMemcpyHostToDevice); //unsigned int PHI_BORDER_RIGHT_h; File.reading<unsigned int>(PHI_BORDER_RIGHT_h, "PHI_BORDER_RIGHT", 0); //hipMemcpyToSymbol(PHI_BORDER_RIGHT, &PHI_BORDER_RIGHT_h, sizeof(unsigned int), 0, hipMemcpyHostToDevice); //File.reading<unsigned int>(clean_fields, "clean_fields", 1, 0, 1); constUint(W_BORDER); constUint(PHI_border_left); constUint(PHI_border_right); constDouble(PHI_value_left); constDouble(PHI_value_right); //GPU setting hipSetDevice(devID); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, devID); printf("\nDevice %d: \"%s\"\n", devID, deviceProp.name); //allocate heap size size_t limit = (size_t)(1024 * 1024 * 1024 * heap_GB); hipDeviceSetLimit(hipLimitMallocHeapSize, limit); hipDeviceGetLimit(&limit, hipLimitMallocHeapSize); //if (clean_fields == 1) { //system("exec rm -r /fields/*"); //cout << "fields cleaned" << endl; //} //the main class for geometry multi_cross Geom; hy_h = 1.0 / ny_h; hx_h = hy_h; tt = (unsigned int)round(1.0 / tau_h); cosA_h = cos(alpha_h*pi / 180); sinA_h = sin(alpha_h*pi / 180); tau_p_h = 0.20*hx_h*hx_h; Ek = 0; Ek_old = 0; kk = 1000000; //Poisson iteration limit Phi_kk = 1000000; //geometry { if (geometry == "matrix") { Geom.set_global_size(nx_h, ny_h, Matrix_X, Matrix_Y); //Geom.set_global_size_narrow_tubes(2*nx_h, nx_h, ny_h/2, Matrix_X, Matrix_Y); Geom.set_type(); //Geom.left_normal_in((Matrix_Y - 1) / 2, (Matrix_Y - 1) / 2); //Geom.left_normal_out((Matrix_Y - 1) / 2, (Matrix_Y - 1) / 2); Geom.set_neighbor(); Geom.set_global_id(); cout << "Matrix_X = " << Matrix_X << ", Matrix_Y = " << Matrix_Y << endl; Geom.check(); } else if (geometry == "matrix2") { Geom.set_global_size(nx_h, ny_h, Matrix_X, Matrix_Y); Geom.set_global_id_B(); Geom.set_neighbor_B(); Geom.set_type_B(); Geom.check(); cout << "Matrix_X = " << Matrix_X << ", Matrix_Y = " << Matrix_Y << endl; } else if (geometry == "box") { if (Ly_h != 0) hy_h = Ly_h / ny_h; if (Lx_h != 0) hx_h = Lx_h / nx_h; Geom.set_global_size_box(nx_h, ny_h); Geom.set_type_box(); Geom.set_neighbor_box(); Geom.set_global_id_box(); Geom.check(); simple_geometry = 1; } else if (geometry == "tube") { Geom.set_global_size_box(nx_h, ny_h); Geom.set_type_tube(); Geom.set_neighbor_box(); Geom.set_global_id_box(); Geom.check(); simple_geometry = 1; } else if (geometry == "grid") { Geom.read_grid_geometry(); File.reading<double>(hy_h, "h_step", 0.001); hx_h = hy_h; tau_p_h = 0.20 * hx_h * hx_h; nx_h = Geom.nx; ny_h = Geom.ny; offset_h = Geom.offset; //sure you need all this? Geom.check(); } else { cout << "what are you trying to do?" << endl; return 0; } } /* box_inherited Geom; Geom.set_global_size(nx_h, ny_h); Geom.set_type(); Geom.set_neighbor(); Geom.set_global_id(); cout << "SIZE = " << " " << Geom.TOTAL_SIZE << endl; */ //alternative geometry /* multi_line Geom; Geom.generate_levels(30, 30, 30, 3, 5); cout << "approximate memory amount = " << 100 * Geom.TOTAL_SIZE / 1024 / 1024 << " MB" << endl << endl << endl; Geom.set_neighbor(); Geom.set_type(); pause */ //int sss = 0; for (int i = 0; i < Geom.TOTAL_SIZE; i++) if (Geom.t[i] == 9) sss++; cout << "S=" << sss << endl; //here we copy the arrays responsible for the geometry to GPU stupid_step(Geom.n1, Geom.n2, Geom.n3, Geom.n4, Geom.t, Geom.J_back, Geom.TOTAL_SIZE); cudaCheckError() //total Length and Width of the porous matrix Lx_h = hx_h * (Geom.nxg); Ly_h = hy_h * (Geom.nyg); hipDeviceSynchronize(); //size setting //you may just skip it, that is weird offset_h = nx_h + 1; unsigned int size_l = Geom.TOTAL_SIZE; //Number of all nodes/elements if (size_l <= 1024 || size_l >= 1024 * 1024 * 1024) { cout << "data is too small or too large" << endl; return 0; } std::cout << "size_l=" << size_l << endl; size_t size_b /*size (in) bytes*/ = size_l * sizeof(double); //sizeof(double) = 8 bytes size_t thread_x_d /*the dimension of x in a block*/ = 1024; //size_t threads_per_block = thread_x_d; dim3 gridD((unsigned int)ceil((size_l + 0.0) / thread_x_d)); dim3 blockD((unsigned int)thread_x_d); std::cout << "gridD.x=" << gridD.x << endl; std::cout << "blockD.x=" << blockD.x << endl; //setting for the reduction procedure //that is even weirder, skip it, don't hesitate unsigned long long int *Gp, *Np; unsigned int s = 0; unsigned int GN = size_l; while (true) { s++; GN = (unsigned int)ceil(GN / (thread_x_d + 0.0)); if (GN == 1) break; } GN = size_l; std::cout << "the number of reduction = " << s << endl; Gp = new unsigned long long int[s]; Np = new unsigned long long int[s]; for (unsigned int i = 0; i < s; i++) Gp[i] = GN = (unsigned int)ceil(GN / (thread_x_d + 0.0)); Np[0] = size_l; for (unsigned int i = 1; i < s; i++) Np[i] = Gp[i - 1]; int last_reduce = (int)pow(2, ceil(log2(Np[s - 1] + 0.0))); std::cout << "last reduction = " << last_reduce << endl; (s != 1) ? std::cout << "sub array for the Poisson solver = " << Np[1] << endl : std::cout << "it shouldn't be here" << endl; double *arr[10]; double *arr2[10]; //allocating memory for arrays on CPU and initializing them { if (DIFFUSION_h == 1) { C_h = (double*)malloc(size_b); mu_h = (double*)malloc(size_b); p_true_h = (double*)malloc(size_b); zero_h = (double*)malloc(size_b); p_h = vx_h = vy_h = zero_h; for (unsigned int l = 0; l < size_l; l++) { C_h[l] = 0.5; mu_h[l] = 0; p_true_h[l] = 0.0; zero_h[l] = 0.0; } } else { C_h = (double*)malloc(size_b); mu_h = (double*)malloc(size_b); p_h = (double*)malloc(size_b); p_true_h = (double*)malloc(size_b); vx_h = (double*)malloc(size_b); vy_h = (double*)malloc(size_b); // psiav_h = (double*)malloc(sizeof(double)); // psiav_array_h = (double*)malloc(size_b / threads_per_block); for (unsigned int l = 0; l < size_l; l++) { C_h[l] = 0.5; mu_h[l] = 0; p_h[l] = 0.0; p_true_h[l] = 0.0; vx_h[l] = 0.0; vy_h[l] = 0.0; } } if (curv_calc == 1) { curv1 = (double*)malloc(size_b); curv2 = (double*)malloc(size_b); nx_dC = (double*)malloc(size_b); ny_dC = (double*)malloc(size_b); for (unsigned int l = 0; l < size_l; l++) { curv1[l] = 0.0; curv2[l] = 0.0; nx_dC[l] = 0.0; ny_dC[l] = 0.0; } } if (vibration == 1) { Phi_h = (double*)malloc(size_b); WX_h = (double*)malloc(size_b); WY_h = (double*)malloc(size_b); for (unsigned int l = 0; l < size_l; l++) { Phi_h[l] = 0.0; WX_h[l] = 0.0; WY_h[l] = 0.0; } } } if (linear_pressure == 1) { Geom.linear_pressure(p_h, hx_h, hy_h, cosA_h, sinA_h, Lx_h, Ly_h, 8.0 / Re_h); } if (filling == "shift") { double delta = sqrt(Ca_h / 0.5); Geom.fill_gradually(C_h, hx_h, hy_h, delta, fill_gradually_x); } if (filling == "sphere") { double sphere_x0, sphere_y0, sphere_R0; double C_outer, C_inner; File.reading<double>(sphere_x0, "sphere_x0", Lx_h * 0.5); File.reading<double>(sphere_y0, "sphere_y0", Ly_h * 0.5); File.reading<double>(sphere_R0, "sphere_R0", 0.1); File.reading<double>(C_outer, "sphere_C_outer", +sqrt(abs(-A_h) / 2.0)); File.reading<double>(C_inner, "sphere_C_inner", -sqrt(abs(-A_h) / 2.0)); Geom.fill_with_sphere(C_h, hx_h, hy_h, sphere_x0, sphere_y0, sphere_R0, C_outer, C_inner); } if (filling == "horizontal") { double delta = sqrt(Ca_h / 0.5); double horizontal_amp; File.reading<double>(horizontal_amp, "horizontal_amp", 0); Geom.fill_horizontal_way(C_h, hx_h, hy_h, 0.5, Ly_h*0.5, horizontal_amp, 2.0*Pi / Lx_h, delta); } //additional allocation on CPU for statistics if necessary // !? double *fx, *fy; signed char *mark; { fx = (double*)malloc(sizeof(double)*size_l); fy = (double*)malloc(sizeof(double)*size_l); mark = (signed char*)malloc(sizeof(signed char)*size_l); } //allocating memory for arrays on GPU { if (DIFFUSION_h == 1) { hipMalloc((void**)&C, size_b); hipMalloc((void**)&C0, size_b); hipMalloc((void**)&mu, size_b); hipMalloc((void**)&zero, size_b); p = p0 = ux = uy = vx = vy = zero; } else { hipMalloc((void**)&C, size_b); hipMalloc((void**)&C0, size_b); hipMalloc((void**)&p, size_b); hipMalloc((void**)&p0, size_b); hipMalloc((void**)&ux, size_b); hipMalloc((void**)&uy, size_b); hipMalloc((void**)&vx, size_b); hipMalloc((void**)&vy, size_b); hipMalloc((void**)&mu, size_b); (s != 1) ? hipMalloc((void**)&psiav_array, sizeof(double)*Np[1]) : hipMalloc((void**)&psiav_array, sizeof(double)); } if (vibration == 1) { hipMalloc((void**)&Phi, size_b); hipMalloc((void**)&Phi0, size_b); hipMalloc((void**)&WX, size_b); hipMalloc((void**)&WY, size_b); (s != 1) ? hipMalloc((void**)&psiav_array_Phi, sizeof(double)*Np[1]) : hipMalloc((void**)&psiav_array_Phi, sizeof(double)); } } //for Poisson procedure shortness arr[0] = p; for (unsigned int i = 1; i <= s; i++) arr[i] = psiav_array; if (vibration == 1) { arr2[0] = Phi; for (unsigned int i = 1; i <= s; i++) arr2[i] = psiav_array_Phi; } //ofstream is a class to write data in a file, ifstream is a class to read data from a file ofstream integrals; ofstream test_output; int test_output_switch; File.reading<int>(test_output_switch, "test_output", 0, 0, 1); if (test_output_switch) test_output.open("test_output.dat"); ofstream k_number; ifstream read; read.open("recovery.dat"); //checking whether a recovery file exists or not //if not we start at t = 0, otherwise we continue from the saved data bool file_exists = read.good(); if (read_switch == 0) file_exists = false; if (file_exists == true) { read_switch = 1; std::cout << endl << "CONTINUE" << endl; } else { read_switch = 0; iter = 0; std::cout << endl << "from the Start" << endl; if (read_C == 1) { std::cout << "initial concentration reading" << endl; Geom.read_concentration(C_h, "recovery.dat", 4, 1, 1); } } //continue if (read_switch == 1) { //Geom.recover(vx_h, vy_h, p_h, C_h, mu_h, iter, write_i, timeq, kk); double* var[10]; unsigned int n = 0; var[n] = vx_h; n++; var[n] = vy_h; n++; var[n] = p_h; n++; var[n] = C_h; n++; var[n] = mu_h; n++; if (vibration == 1) { var[n] = Phi_h; n++; } Geom.recover(var, n, iter, write_i, timeq, kk); if (reset_timeq == 0) { integrals.open("integrals.dat", std::ofstream::app); cout << "from time: " << timeq << " iter:" << iter << endl; } else if (reset_timeq == 1) { cout << "reset time" << endl; integrals.open("integrals.dat"); iter = 0; write_i = 0; timeq = 0; if (invert_initial_C == 1) for (unsigned int l = 0; l < size_l; l++) C_h[l] = C_h[l] * (-1); if (reset_velocity == 1) for (unsigned int l = 0; l < size_l; l++) { vx_h[l] = 0.0; vy_h[l] = 0.0; } if (reset_pressure == 1) for (unsigned int l = 0; l < size_l; l++) p_h[l] = 0.0; } } //from the start if (read_switch == 0) integrals.open("integrals.dat"); //copying values from host variables to device ones { hipMemcpy(C, C_h, size_b, hipMemcpyHostToDevice); hipMemcpy(C0, C_h, size_b, hipMemcpyHostToDevice); hipMemcpy(p0, p_h, size_b, hipMemcpyHostToDevice); hipMemcpy(p, p_h, size_b, hipMemcpyHostToDevice); hipMemcpy(ux, vx_h, size_b, hipMemcpyHostToDevice); hipMemcpy(uy, vy_h, size_b, hipMemcpyHostToDevice); hipMemcpy(vx, vx_h, size_b, hipMemcpyHostToDevice); hipMemcpy(vy, vy_h, size_b, hipMemcpyHostToDevice); hipMemcpy(mu, mu_h, size_b, hipMemcpyHostToDevice); if (vibration == 1) { hipMemcpy(Phi, Phi_h, size_b, hipMemcpyHostToDevice); hipMemcpy(Phi0, Phi_h, size_b, hipMemcpyHostToDevice); } } //copying some constant parameters to the fast constant memory { hipMemcpyToSymbol(hx, &hx_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(hy, &hy_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(Lx, &Lx_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(Ly, &Ly_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(nx, &nx_h, sizeof(unsigned int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(ny, &ny_h, sizeof(unsigned int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(n, &size_l, sizeof(unsigned int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(offset, &offset_h, sizeof(unsigned int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(A, &A_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(Ca, &Ca_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(Gr, &Gr_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(Gs, &Gs_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(Pe, &Pe_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(Re, &Re_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(MM, &MM_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(tau, &tau_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(tau_p, &tau_p_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(alpha, &alpha_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(sinA, &sinA_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(cosA, &cosA_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(theta, &theta_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(sinTh, &sinTh_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(cosTh, &cosTh_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(OFFSET, &Geom.OFFSET, sizeof(int), 0, hipMemcpyHostToDevice); // hipMemcpyToSymbol(Mx, &Geom.Mx, sizeof(int), 0, hipMemcpyHostToDevice); // hipMemcpyToSymbol(My, &Geom.My, sizeof(int), 0, hipMemcpyHostToDevice); // hipMemcpyToSymbol(Msize, &Geom.Msize, sizeof(int), 0, hipMemcpyHostToDevice); // hipMemcpyToSymbol(Moffset, &Geom.Moffset, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(TOTAL_SIZE, &Geom.TOTAL_SIZE, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(PHASE, &PHASE_h, sizeof(unsigned int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(dP, &dP_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(Amp, &Amp_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(Omega, &Omega_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(VV, &VV_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(vibr_X, &vibr_X_h, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(vibr_Y, &vibr_Y_h, sizeof(double), 0, hipMemcpyHostToDevice); } { cout << "approximate memory amount = " << 100 * Geom.TOTAL_SIZE / 1024 / 1024 << " MB" << endl; PrintVar(wetting) PrintVar(DIFFUSION_h) PrintVar(geometry) PrintVar(filling) } //just printing parameters from GPU to be confident they are passed correctly hello << <1, 1 >> > (); hipDeviceSynchronize(); //Geom.fast_test_writing(C_h); Geom.write_field(C_h, "0", timeq, each); //measure real time of calculating timer1 = clock() / CLOCKS_PER_SEC; //Geom.write_field(C_h, "test", 0, 1); //write the file with parameters //this step was written for making movies { #ifdef __linux__ ofstream to_file("fields/param.dat"); #endif #ifdef _WIN32 ofstream to_file("fields\\param.dat"); #endif #define space << " " << to_file << Geom.nxg / each space Geom.nyg / each space hx_h*each space hy_h*each space Lx_h space Ly_h space Gr_h space Ca_h space Pe_h space Re_h space A_h space MM_h space alpha_h << endl; to_file.close(); } true_pressure(p_h, p_true_h, C_h, mu_h, Geom.t, Geom.n1, Geom.n2, Geom.n3, Geom.n4, Geom.J_back, tau_h, Geom.TOTAL_SIZE, hx_h, hy_h, Ca_h, A_h, Gr_h, MM_h, Geom.OFFSET, sinA_h, cosA_h, PHASE_h, VV_h, vibr_X_h, vibr_Y_h, Phi_h, WX_h, WY_h); //pause // the main time loop of the whole calculation procedure while (true) { iter = iter + 1; timeq = timeq + tau_h; //Poisson equation for pulsation potential if (vibration == 1) { double eps_Phi = 1.0; double psiav0_Phi = -1.0; double psiav_Phi = 0.0; unsigned int k_Phi = 0; //while ((eps_Phi > eps0*psiav0_Phi && k_Phi < Phi_kk)) while ((eps_Phi > eps0*psiav0_Phi)) { psiav_Phi = 0.0; k_Phi++; Poisson_Phi << <gridD, blockD >> >(Phi, Phi0, C0, WX, WY); for (unsigned int i = 0; i < s; i++) reduction00 << < Gp[i], 1024, 1024 * sizeof(double) >> > (arr2[i], Np[i], arr2[i + 1]); swap_one << <gridD, blockD >> > (Phi0, Phi); hipMemcpy(&psiav_Phi, psiav_array_Phi, sizeof(double), hipMemcpyDeviceToHost); eps_Phi = abs(psiav_Phi - psiav0_Phi); psiav0_Phi = psiav_Phi; if (k_Phi % 1000 == 0) { cout << "Phi_iter=" << k_Phi << " " << eps_Phi << endl; } } Phi_kk = k_Phi; if (iter % (int)(tt *time_display) == 0 || iter == 1) { cout << "Phi_iter=" << Phi_kk << endl; } Phi_normalization << < gridD, blockD >> > (Phi); WW_from_Phi << < gridD, blockD >> > (WX, WY, Phi, C0); } if (DIFFUSION_h == 1) { //only diffusion if (PHASE_h == 1) { chemical_potential << <gridD, blockD >> > (mu, C); concentration << < gridD, blockD >> > (C, C0, vx, vy, mu); //concentration_surface_energy_wetting << < gridD, blockD >> > (C, C0, vx, vy, mu); } else if (PHASE_h == 0) { concentration << < gridD, blockD >> > (C, C0, vx, vy, C0); } swap_one << <gridD, blockD >> > (C0, C); } else { //flow //1st step, calculating of time evolutionary parts of velocity (quasi-velocity) and concentration and chemical potential { if (PHASE_h == 1) { chemical_potential << <gridD, blockD >> > (mu, C); //chemical_potential_inside << <gridD, blockD >> > (mu, C); //chemical_potential_border << <gridD, blockD >> > (mu, C); //quasi_velocity_upstream << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu); switch (vibration) { case 0: quasi_velocity << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu); break; case 1: quasi_velocity_pulsation_with_Phi << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu, timeq, Phi, WX, WY); break; case 2: quasi_velocity_pulsation << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu, timeq); break; case 3: quasi_velocity << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu); break; default: break; } switch (wetting) { case 0: //as it is concentration << < gridD, blockD >> > (C, C0, vx, vy, mu); //concentration_upstream << < gridD, blockD >> > (C, C0, vx, vy, mu); break; case 1: //const initial concentration at walls, which is not washed out concentration_no_wetting << < gridD, blockD >> > (C, C0, vx, vy, mu); break; case 2: //ongoing concentration devours initial one concentration_wetting << < gridD, blockD >> > (C, C0, vx, vy, mu); break; case 3: //surface energy formulation by Jacqmin // not finished concentration_surface_energy_wetting << < gridD, blockD >> > (C, C0, vx, vy, mu); break; default: break; } } else if (PHASE_h == 0) { chemical_potential_Gr << <gridD, blockD >> > (mu); switch (vibration) { case 0: //as it is quasi_velocity << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu); //quasi_velocity_no_phase_field << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu); break; case 1: quasi_velocity_pulsation_with_Phi << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu, timeq, Phi, WX, WY); break; case 2: quasi_velocity_pulsation << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu, timeq); break; case 3: quasi_velocity << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu); break; default: break; } //if (timeq < 1) concentration << < gridD, blockD >> > (C, C0, vx, vy, C0); //else concentration_no_input_C << < gridD, blockD >> > (C, C0, vx, vy, C0); } } //2nd step, Poisson equation for pressure { eps = 1.0; psiav0 = -1.0; psiav = 0.0; k = 0; //while (eps > eps0*psiav0 || k < 10) //while (eps > eps0*psiav0 ) while ((eps > eps0*psiav0 && k < kk)) { psiav = 0.0; k++; if (vibration == 1) Poisson_pulsation_Phi << <gridD, blockD >> > (p, p0, ux, uy, mu, C, Phi, WX, WY); else if (vibration == 3) Poisson_pulsation << <gridD, blockD >> >(p, p0, ux, uy, mu, C, timeq); else Poisson << <gridD, blockD >> > (p, p0, ux, uy, mu, C); for (unsigned int i = 0; i < s; i++) reduction00 << < Gp[i], 1024, 1024 * sizeof(double) >> > (arr[i], Np[i], arr[i + 1]); swap_one << <gridD, blockD >> > (p0, p); hipMemcpy(&psiav, psiav_array, sizeof(double), hipMemcpyDeviceToHost); eps = abs(psiav - psiav0); psiav0 = psiav; if (k % 1000 == 0) { cout << "p_iter=" << k << endl; } } } kk = k; if (iter % (int)(tt *time_display) == 0 || iter == 1) { cout << "p_iter=" << k << endl; } //3rd step, velocity correction and swapping field values velocity_correction << <gridD, blockD >> > (vx, vy, ux, uy, p); swap_3 << <gridD, blockD >> > (ux, vx, uy, vy, C0, C); } //4th step, printing results, writing data and whatever you want if (iter % (int)(tt *time_display) == 0 || iter == 1) { cout << setprecision(15) << endl; cout << fixed << endl; hipMemcpy(vx_h, vx, size_b, hipMemcpyDeviceToHost); hipMemcpy(vy_h, vy, size_b, hipMemcpyDeviceToHost); hipMemcpy(p_h, p, size_b, hipMemcpyDeviceToHost); hipMemcpy(C_h, C, size_b, hipMemcpyDeviceToHost); hipMemcpy(mu_h, mu, size_b, hipMemcpyDeviceToHost); if (vibration == 1) { WW_from_Phi << <gridD, blockD >> > (WX, WY, Phi, C); hipMemcpy(Phi_h, Phi, size_b, hipMemcpyDeviceToHost); hipMemcpy(WX_h, WX, size_b, hipMemcpyDeviceToHost); hipMemcpy(WY_h, WY, size_b, hipMemcpyDeviceToHost); } copied = true; true_pressure(p_h, p_true_h, C_h, mu_h, Geom.t, Geom.n1, Geom.n2, Geom.n3, Geom.n4, Geom.J_back, tau_h, Geom.TOTAL_SIZE, hx_h, hy_h, Ca_h, A_h, Gr_h, MM_h, Geom.OFFSET, sinA_h, cosA_h, PHASE_h, VV_h, vibr_X_h, vibr_Y_h, Phi_h, WX_h, WY_h); double len, ten, vol, width, p_plusAv, p_minusAv, p_Av, vx_plusAv, vx_minusAv, vx_Av; velocity(size_l, hx_h, hy_h, vx_h, vy_h, Ek, Vmax); VFR(vx_h, Geom.t, size_l, hy_h, Q_in, Q_out, C_h, C_average, Cv); C_statistics(Geom.TOTAL_SIZE, hx_h, hy_h, Geom.t, C_h, C_av, C_plus, C_minus); len = Geom.isoline(hx_h, hy_h, C_h, mark, fx, fy, 0.0); ten = Ca_h / len / MM_h * Geom.tension(hx_h, hy_h, C_h); if (!std::isfinite(ten)) ten = 0; //if (ten != ten) ten = 0; vol = Geom.volume(hx_h, hy_h, C_h, 0.2); width = vol / len; if (!std::isfinite(width)) width = 0; //if (abs(width) > 100000) width = 0; Geom.X_averaged_in_each_phase(hx_h, hy_h, C_h, p_true_h, p_plusAv, p_minusAv, p_Av, 0.05); Geom.X_averaged_in_each_phase(hx_h, hy_h, C_h, vx_h, vx_plusAv, vx_minusAv, vx_Av); //Display timer cout << "t= " << tau_h*iter << endl; cout << "Vmax= " << Vmax << endl; cout << "Ek= " << Ek << endl; cout << "dEk= " << (Ek - Ek_old) << endl; cout << "p_iter=" << k << endl; cout << "Q_in=" << Q_in << endl; cout << "Q_out=" << Q_out << endl; cout << "Vx_max=" << maxval(vx_h, size_l) << endl; cout << "C_max=" << maxval(C_h, size_l) << endl; cout << "p_max=" << maxval(p_h, size_l) << endl; cout << "C_av=" << C_av << endl; cout << "C_plus=" << C_plus << endl; cout << "C_minus=" << C_minus << endl; //Integrals if (iter == 1) { integrals << "t, Ek, Vmax, time(min), dEk, Q_in, Q_out, C_average, Q_per_cap, Q_per_width" << ", Cv_per_cap, Cv_per_width, C_av, C_plus, C_minus, L, ten, width" << ", p_plusAv, p_minusAv, vx_plusAv, vx_minusAv"; if (integrals_add1) { integrals << ", Xtip, Xwall, Qtip, Qwall, Cap_pres"; } integrals << endl; } integrals << setprecision(20) << fixed; integrals << timeq << " " << Ek << " " << Vmax << " " << (timer2 - timer1) / 60 << " " << abs(Ek - Ek_old) << " " << Q_in << " " << Q_out << " " << C_average / Matrix_Y << " " << Q_out / Matrix_Y << " " << Q_out / Ly_h << " " << Cv / Matrix_Y << " " << Cv / Ly_h << " " << C_av << " " << C_plus << " " << C_minus << " " << len << " " << ten << " " << width << " " << p_plusAv << " " << p_minusAv << " " << vx_plusAv << " " << vx_minusAv; if (integrals_add1) { double x_tip = Geom.change_sign_at_X(hx_h, hy_h, C_h, Geom.nyg / 2); double x_wall = Geom.change_sign_at_X(hx_h, hy_h, C_h, 0); double cap_pres = Geom.pressure_jump(hx_h, hy_h, p_true_h, x_tip, 0.1); double Q_tip = Geom.flow_rate(hx_h, hy_h, vx_h, Ly_h, (unsigned int)(x_tip / hx_h)); double Q_wall = Geom.flow_rate(hx_h, hy_h, vx_h, Ly_h, (unsigned int)(x_wall / hx_h)); integrals << " " << x_tip << " " << x_wall << " " << Q_tip << " " << Q_wall << " " << cap_pres; cout << "x_tip=" << x_tip << endl; } integrals << endl; Ek_old = Ek; if (stop_at_exit == 1) { stop = Geom.checkExit(C_h); if (stop == 1) cout << "stop command is applied" << endl; } } //fields writing if (iter % (int(time_fields * tt)) == 0 || iter == 1 || stop == 1) { if (copied == false) { hipMemcpy(vx_h, vx, size_b, hipMemcpyDeviceToHost); hipMemcpy(vy_h, vy, size_b, hipMemcpyDeviceToHost); hipMemcpy(p_h, p, size_b, hipMemcpyDeviceToHost); hipMemcpy(C_h, C, size_b, hipMemcpyDeviceToHost); hipMemcpy(mu_h, mu, size_b, hipMemcpyDeviceToHost); if (vibration == 1) { WW_from_Phi << <gridD, blockD >> > (WX, WY, Phi, C); hipMemcpy(Phi_h, Phi, size_b, hipMemcpyDeviceToHost); hipMemcpy(WX_h, WX, size_b, hipMemcpyDeviceToHost); hipMemcpy(WY_h, WY, size_b, hipMemcpyDeviceToHost); } true_pressure(p_h, p_true_h, C_h, mu_h, Geom.t, Geom.n1, Geom.n2, Geom.n3, Geom.n4, Geom.J_back, tau_h, Geom.TOTAL_SIZE, hx_h, hy_h, Ca_h, A_h, Gr_h, MM_h, Geom.OFFSET, sinA_h, cosA_h, PHASE_h, VV_h, vibr_X_h, vibr_Y_h, Phi_h, WX_h, WY_h); copied = true; } write_i++; stringstream ss; string file_name; ss.str(""); ss.clear(); ss << write_i; file_name = ss.str(); Geom.write_field(C_h, file_name, timeq, each); Geom.write_field(p_true_h, "true_p_" + file_name, timeq, each); Geom.write_field(mu_h, "mu_" + file_name, timeq, each); if (curv_calc) { Geom.curvature_direct(C_h, hx_h, hy_h, curv1, 0.1); Geom.curvature_direct(C_h, hx_h, hy_h, curv2, 0.001); //Geom.curvature_2_steps(C_h, nx_dC, ny_dC, hx_h, hy_h, curv2); Geom.write_field(curv1, "curv_" + file_name, timeq, each); Geom.write_field(curv2, "curv2_" + file_name, timeq, each); } if (vibration == 1) { Geom.write_field(Phi_h, "Phi_" + file_name, timeq, each); Geom.write_field(WX_h, "WX_" + file_name, timeq, each); Geom.write_field(WY_h, "WY_" + file_name, timeq, each); if (test_output_switch) test_output << timeq << " " << MAXval(Phi_h, size_l) << " " << MINval(Phi_h, size_l) << " " << Phi_h[0 + Geom.OFFSET*Geom.nyg] - Phi_h[Geom.nxg + Geom.OFFSET*Geom.nyg] << endl; } if (horizontal_profile) { double *var[20]; string head = "i x C P_true P Mu vx vy"; var[0] = C_h; var[1] = p_true_h; var[2] = p_h; var[3] = mu_h; var[4] = vx_h; var[5] = vy_h; int n = 6; if (curv_calc) { head.append(" curv1 curv2"); var[n] = curv1; n++; var[n] = curv2; n++; } if (vibration == 1) { head.append(" Phi"); head.append(" WX"); head.append(" WY"); var[n] = Phi_h; n++; var[n] = WX_h; n++; var[n] = WY_h; n++; } Geom.write_linear_profile(file_name, head, timeq, 1, hx_h, var, n); Geom.write_linear_profile(file_name + "_bot", head, timeq, 1, hx_h, var, n, 0); Geom.write_linear_profile(file_name + "_top", head, timeq, 1, hx_h, var, n, Geom.nyg); } if (vertical_profile && integrals_add1) { double *var[20]; string head = "j y C P_true P Mu vx vy"; var[0] = C_h; var[1] = p_true_h; var[2] = p_h; var[3] = mu_h; var[4] = vx_h; var[5] = vy_h; int n = 6; double x_tip = Geom.change_sign_at_X(hx_h, hy_h, C_h, Geom.nyg / 2); double x_wall = Geom.change_sign_at_X(hx_h, hy_h, C_h, 0); if (vibration == 1) { head.append(" Phi"); head.append(" WX"); head.append(" WY"); var[n] = Phi_h; n++; var[n] = WX_h; n++; var[n] = WY_h; n++; } Geom.write_section_profile(file_name + "_tip", head, timeq, 1, hy_h, var, n, (unsigned int)(x_tip / hx_h)); Geom.write_section_profile(file_name + "_wall", head, timeq, 1, hy_h, var, n, (unsigned int)(x_wall / hx_h)); Geom.write_section_profile(file_name + "_end", head, timeq, 1, hy_h, var, n, Geom.nxg - 1); Geom.write_section_profile(file_name + "_start", head, timeq, 1, hy_h, var, n, 0); //Geom.write_section_profile(file_name + "_j10", head, timeq, 1, hy_h, var, n, 10); } //Geom.write_field(mu_h, "mu" + file_name, timeq, each); } //fields writting for tecplot if (tecplot != 0 && (iter % (int(time_fields * tt)) == 0 || iter == 1 || stop == 1)) { if (copied == false) { hipMemcpy(vx_h, vx, size_b, hipMemcpyDeviceToHost); hipMemcpy(vy_h, vy, size_b, hipMemcpyDeviceToHost); hipMemcpy(p_h, p, size_b, hipMemcpyDeviceToHost); hipMemcpy(C_h, C, size_b, hipMemcpyDeviceToHost); hipMemcpy(mu_h, mu, size_b, hipMemcpyDeviceToHost); if (vibration == 1) { WW_from_Phi << <gridD, blockD >> > (WX, WY, Phi, C); hipMemcpy(Phi_h, Phi, size_b, hipMemcpyDeviceToHost); hipMemcpy(WX_h, WX, size_b, hipMemcpyDeviceToHost); hipMemcpy(WY_h, WY, size_b, hipMemcpyDeviceToHost); } true_pressure(p_h, p_true_h, C_h, mu_h, Geom.t, Geom.n1, Geom.n2, Geom.n3, Geom.n4, Geom.J_back, tau_h, Geom.TOTAL_SIZE, hx_h, hy_h, Ca_h, A_h, Gr_h, MM_h, Geom.OFFSET, sinA_h, cosA_h, PHASE_h, VV_h, vibr_X_h, vibr_Y_h, Phi_h, WX_h, WY_h); copied = true; } double *var[10]; int n = 0; string head = "VARIABLES=\"x\",\"y\",\"C\",\"p\",\"mu\",\"vx\",\"vy\""; var[n] = C_h; n++; var[n] = p_true_h; n++; var[n] = mu_h; n++; var[n] = vx_h; n++; var[n] = vy_h; n++; if (vibration == 1) { head.append(",\"WX\",\"WY\",\"Phi\""); var[n] = WX_h; n++; var[n] = WY_h; n++; var[n] = Phi_h; n++; } Geom.write_field_tecplot(tecplot, hx_h, hy_h, "fields", timeq, each, iter, var, n, head); } //recovery fields writing if (iter % (int)(tt*time_recovery) == 0 || timeq > limit_timeq || stop == 1) { if (copied == false) { hipMemcpy(vx_h, vx, size_b, hipMemcpyDeviceToHost); hipMemcpy(vy_h, vy, size_b, hipMemcpyDeviceToHost); hipMemcpy(p_h, p, size_b, hipMemcpyDeviceToHost); hipMemcpy(C_h, C, size_b, hipMemcpyDeviceToHost); hipMemcpy(mu_h, mu, size_b, hipMemcpyDeviceToHost); if (vibration == 1) { WW_from_Phi << <gridD, blockD >> > (WX, WY, Phi, C); hipMemcpy(Phi_h, Phi, size_b, hipMemcpyDeviceToHost); hipMemcpy(WX_h, WX, size_b, hipMemcpyDeviceToHost); hipMemcpy(WY_h, WY, size_b, hipMemcpyDeviceToHost); } copied = true; } //Geom.save(vx_h, vy_h, p_h, C_h, mu_h, iter, write_i, timeq, kk); double * var[10]; unsigned int n = 0; var[n] = vx_h; n++; var[n] = vy_h; n++; var[n] = p_h; n++; var[n] = C_h; n++; var[n] = mu_h; n++; if (vibration == 1) { var[n] = Phi_h; n++; } Geom.save(var, n, iter, write_i, timeq, kk); } copied = false; // the end of 4th step if (timeq > limit_timeq || stop == 1) return 0; } //the end of the main time loop }
693be61c437d184d0bc09dc463bae522c9d615cc.cu
#define ThisSoftwareVersion "200722" #define CodeName "in the middle of nowhere" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <stdio.h> #include <string> #include <fstream> #include <iomanip> #include <sstream> #include <cstring> #include <cmath> #include <algorithm> #include <ctime> #include <cuda.h> #include <vector> #include <csignal> #include <time.h> #ifdef _WIN32 #include "windows.h" #endif using namespace std; using std::cout; int state; //__device__ double *C, *C0, *ux, *uy, *vx, *vy, *p, *p0, *mu; //__device__ multi_cross *Md; __constant__ double hx, hy, tau, Lx, Ly, tau_p; __constant__ double A, Ca, Gr, Pe, Re, Gs, MM, dP; __constant__ double Amp, Omega, vibr_X, vibr_Y, VV; __constant__ double alpha, sinA, cosA, theta, cosTh, sinTh; __constant__ unsigned int nx, ny, n, offset, border_type; __constant__ double eps0_d = 1e-5; __constant__ double pi = 3.1415926535897932384626433832795; __constant__ int Mx, My, Msize, Moffset, OFFSET; __constant__ unsigned int iter; __constant__ unsigned int TOTAL_SIZE; __constant__ unsigned int nxg, nyg; __constant__ unsigned int PHASE; __constant__ unsigned int PHI_border_left, PHI_border_right, W_BORDER; __constant__ double PHI_value_left, PHI_value_right; __device__ int* n1, *n2, *n3, *n4, *t, *J_back; __device__ double Phi_reference = 0.0; __global__ void hello() { printf("\n thread x:%i y:%i, information copied from device:\n", threadIdx.x, threadIdx.y); printf("A= %f Ca=%f \n", A, Ca); printf("Gr= %f Pe=%f \n", Gr, Pe); printf("Re= %f M=%f \n", Re, MM); printf("hx= %f hy=%f \n", hx, hy); printf("tau= %20.16f \n", tau); printf("tau_p= %20.16f \n", tau_p); printf("nx= %i ny=%i \n", nx, ny); printf("Lx= %f Ly=%f \n", Lx, Ly); printf("offset= %i \n", offset); printf("sinA= %f cosA=%f \n", sinA, cosA); printf("sinTh= %f cosTh=%f \n", sinTh, cosTh); printf("Total number of nodes = %i \n", TOTAL_SIZE); printf("P inject factor = %f \n", dP); printf("Amp= %f Omega=%f V=%f \n", Amp, Omega, VV); printf("vibr_X= %f vibr_Y=%f \n", vibr_X, vibr_Y); printf("Vibro border: W = %i, Phi_L = %i, Phi_R = %i, VALUE_L = %f, VALUE_R = %f \n", W_BORDER, PHI_border_left, PHI_border_right, PHI_value_left, PHI_value_right); if (PHASE == 1) printf("Phase field \n"); if (PHASE == 0) printf("Single phase flow \n"); printf("\n"); } #define Pi 3.1415926535897932384626433832795 #define pause system("pause"); #define timer timer2 = clock()/ CLOCKS_PER_SEC; cout << "time (seconds)= " << (timer2 - timer1) << endl; #define cudaCheckError() { \ cudaError_t e = cudaGetLastError(); \ if (e != cudaSuccess) {\ printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \ exit(0); \ } \ } #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) #define VarName(Variable) (#Variable) #define PrintVar(Variable) cout << (#Variable) << " = " << Variable << endl; #define defConst(F, type) type F##_h; File.reading<type>(F##_h, #F, 0.0); cudaMemcpyToSymbol(F, &F ## _h, sizeof(type), 0, cudaMemcpyHostToDevice); //getting Ek and Vmax void velocity(unsigned int N, double hx, double hy, double *vx, double *vy, double &Ek, double &Vmax) { double V = 0; Ek = 0.0; Vmax = 0.0; for (unsigned int C = 0; C < N; C++) { V = +vx[C] * vx[C] + vy[C] * vy[C]; Ek += V; if (sqrt(V) > Vmax) Vmax = sqrt(V); } Ek = Ek / 2.0 * hx * hy; } double maxval(double* f, unsigned int n) { double max = abs(f[0]); for (unsigned int i = 0; i < n; i++) { if (abs(f[i])>max) { max = abs(f[i]); } } return max; } double MINval(double* f, unsigned int n) { double min = abs(f[0]); for (unsigned int i = 0; i < n; i++) { if (f[i]<min) { min = f[i]; } } return min; } double MAXval(double* f, unsigned int n) { double max = (f[0]); for (unsigned int i = 0; i < n; i++) { if ((f[i])>max) { max = (f[i]); } } return max; } double sum(double* f, unsigned int n) { double sum = 0; for (unsigned int i = 0; i < n; i++) { sum += f[i]; } return sum; } //volumetric flow rate void VFR(double *vx, int *t, unsigned int size, double hy, double &Q_in, double &Q_out, double *C, double &C_average, double &Cv) { Q_in = 0; Q_out = 0; C_average = 0; Cv = 0; for (unsigned int i = 0; i < size; i++) { if (t[i] == 9) { Q_in += vx[i]; } if (t[i] == 10) { Q_out += vx[i]; C_average += C[i]; Cv += vx[i] * C[i]; } } Q_in = Q_in*hy; Q_out = Q_out*hy; C_average = C_average*hy / (1.0 - hy); Cv = Cv*hy; } void C_statistics(unsigned int size, double hx, double hy, int *t, double *C, double &C_av, double &C_plus, double &C_minus) { C_av = 0; C_plus = 0; C_minus = 0; unsigned int n = 0, n2 = 0, n_plus = 0, n2_plus = 0, n_minus = 0, n2_minus = 0; for (unsigned int l = 0; l < size; l++) { if (t[l] == 0) { C_av += C[l]; n++; if (C[l] > 0) { C_plus += C[l]; n_plus++; } if (C[l] < 0) { C_minus += C[l]; n_minus++; } } else { C_av += C[l] / 2; n2++; if (C[l] > 0) { C_plus += C[l] / 2; n2_plus++; } if (C[l] < 0) { C_minus += C[l] / 2; n2_minus++; } } } if (n + n2 > 0) C_av /= (n + 0.5*n2); if (n_plus + n2_plus > 0) C_plus /= (n_plus + 0.5*n2_plus); if (n_minus + n2_minus > 0) C_minus /= (n_minus + 0.5*n2_minus); } void reading_parameters(unsigned int &ny_h, unsigned int &nx_h, double &each_t, unsigned int &each, unsigned int &Matrix_X, unsigned int &Matrix_Y, double &tau_h, double &A_h, double &Ca_h, double &Gr_h, double &Pe_h, double &Re_h, double &alpha_h, double &MM_h, double &tecplot, unsigned int &PHASE_h) { ifstream read; string str, substr; stringstream ss; read.open("inp.dat"); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; ny_h = atoi(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; nx_h = atoi(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; each_t = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; each = atoi(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; Matrix_X = atoi(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; Matrix_Y = atoi(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; tau_h = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; A_h = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; Ca_h = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; Gr_h = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; Pe_h = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; Re_h = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; alpha_h = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; MM_h = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; tecplot = atof(substr.c_str()); ss.str(""); ss.clear(); getline(read, str); ss << str; ss >> substr; PHASE_h = atoi(substr.c_str()); read.close(); } struct ReadingFile { private: ifstream read; string str, substr, buffer; string file_name; stringstream ss; istringstream iss; ostringstream oss; int stat, pos; public: ReadingFile(string name) { file_name = name; open_file(file_name); stat = 0; } ReadingFile() { stat = 0; } void open_file(string file_name) { read.open(file_name.c_str()); if (read.good()) { cout << endl << "the parameter file \"" << file_name << "\" has been read " << endl << endl; oss << read.rdbuf(); buffer = oss.str(); iss.str(buffer); } else { cout << "the parameter file has been not found, default parameters will be initialized " << endl; buffer = ""; iss.str(buffer); } } template <typename T> int reading(T &var, string parameter_name, T def_var, T min = 0, T max = 0) { int ret = 0; stat = 0; transform(parameter_name.begin(), parameter_name.end(), parameter_name.begin(), ::tolower); iss.clear(); iss.seekg(0); while (getline(iss, str)) { //substr.clear(); ss.str(""); ss.clear(); ss << str; ss >> substr; transform(substr.begin(), substr.end(), substr.begin(), ::tolower); if (substr == parameter_name) { ret = 1; pos = (int)ss.tellg(); while (ss >> substr) { if (substr == "=") { ss >> var; stat = 1; break; } } if (stat == 0) { ss.clear(); ss.seekg(pos); ss >> var; } break; } } if (iss.fail()) { var = def_var; } if (min != max && (min + max) != 0) { if (var > max || var < min) { cout << "Warning: \"" + parameter_name + "\" should not be within this range" << endl; var = def_var; } } return ret; //return 1 if read } void reading_string(string &var, string parameter_name, string def_var) { stat = 0; transform(parameter_name.begin(), parameter_name.end(), parameter_name.begin(), ::tolower); iss.clear(); iss.seekg(0); while (getline(iss, str)) { //substr.clear(); ss.str(""); ss.clear(); ss << str; ss >> substr; transform(substr.begin(), substr.end(), substr.begin(), ::tolower); if (substr == parameter_name) { pos = (int)ss.tellg(); while (ss >> substr) { if (substr == "=") { ss >> var; stat = 1; break; } } if (stat == 0) { ss.clear(); ss.seekg(pos); ss >> var; } break; } } if (iss.fail()) { var = def_var; } } }; __device__ double dx1(unsigned int l, double *f) { return 0.5*(f[n3[l]] - f[n1[l]]) / hx; } __device__ double dy1(unsigned int l, double *f) { return 0.5*(f[n2[l]] - f[n4[l]]) / hy; } __device__ double dx2(unsigned int l, double *f) { return (f[n3[l]] - 2.0*f[l] + f[n1[l]]) / hx / hx; } __device__ double dy2(unsigned int l, double *f) { return (f[n2[l]] - 2.0*f[l] + f[n4[l]]) / hy / hy; } __device__ double dx1_eq_0_forward(unsigned int l, double *f) { return (4.0*f[n3[l]] - f[n3[n3[l]]]) / 3.0; } __device__ double dx1_eq_0_back(unsigned int l, double *f) { return (4.0*f[n1[l]] - f[n1[n1[l]]]) / 3.0; } __device__ double dy1_eq_0_up(unsigned int l, double *f) { return (4.0*f[n2[l]] - f[n2[n2[l]]]) / 3.0; } __device__ double dy1_eq_0_down(unsigned int l, double *f) { return (4.0*f[n4[l]] - f[n4[n4[l]]]) / 3.0; } __device__ double dx1_forward(unsigned int l, double *f) { return -0.5*(3.0*f[l] - 4.0*f[n3[l]] + f[n3[n3[l]]]) / hx; } __device__ double dx1_back(unsigned int l, double *f) { return 0.5*(3.0*f[l] - 4.0*f[n1[l]] + f[n1[n1[l]]]) / hx; } __device__ double dy1_up(unsigned int l, double *f) { return -0.5*(3.0*f[l] - 4.0*f[n2[l]] + f[n2[n2[l]]]) / hy; } __device__ double dy1_down(unsigned int l, double *f) { return 0.5*(3.0*f[l] - 4.0*f[n4[l]] + f[n4[n4[l]]]) / hy; } __device__ double dx2_forward(unsigned int l, double *f) { return (2.0 * f[l] - 5.0 * f[n3[l]] + 4.0 * f[n3[n3[l]]] - f[n3[n3[n3[l]]]]) / hx / hx; } __device__ double dx2_back(unsigned int l, double *f) { return (2.0 * f[l] - 5.0 * f[n1[l]] + 4.0 * f[n1[n1[l]]] - f[n1[n1[n1[l]]]]) / hx / hx; } __device__ double dy2_up(unsigned int l, double *f) { return (2.0 * f[l] - 5.0 * f[n2[l]] + 4.0 * f[n2[n2[l]]] - f[n2[n2[n2[l]]]]) / hy / hy; } __device__ double dy2_down(unsigned int l, double *f) { return (2.0 * f[l] - 5.0 * f[n4[l]] + 4.0 * f[n4[n4[l]]] - f[n4[n4[n4[l]]]]) / hy / hy; } __device__ double dx2_eq_0_forward(unsigned int l, double* f) { return (5.0 * f[n3[l]] - 4.0 * f[n3[n3[l]]] + f[n3[n3[n3[l]]]]) * 0.5; } __device__ double dx2_eq_0_back(unsigned int l, double* f) { return (5.0 * f[n1[l]] - 4.0 * f[n1[n1[l]]] + f[n1[n1[n1[l]]]]) * 0.5; } __device__ double dy2_eq_0_up(unsigned int l, double* f) { return (5.0 * f[n2[l]] - 4.0 * f[n2[n2[l]]] + f[n2[n2[n2[l]]]]) * 0.5; } __device__ double dy2_eq_0_down(unsigned int l, double* f) { return (5.0 * f[n4[l]] - 4.0 * f[n4[n4[l]]] + f[n4[n4[n4[l]]]]) * 0.5; } __device__ double dxy1(double *f, int l, int i, int j) { int ii = (J_back[l] - (J_back[l] / OFFSET)*OFFSET); int jj = (J_back[l] / OFFSET); if (i > 0 && i < nx && j > 0 && j < ny) { return (-f[l - 1 + offset] + f[l + 1 + offset] - f[l + 1 - offset] + f[l - 1 - offset]) / hx / hy / 4.0; } else { return 0; } } __device__ double extrapolate_back(unsigned int l, double *f) { return 2.0*f[n3[l]] - f[n3[n3[l]]]; } __device__ double extrapolate_forward(unsigned int l, double *f) { return 2.0*f[n1[l]] - f[n1[n1[l]]]; } __device__ double extrapolate_down(unsigned int l, double *f) { return 2.0*f[n2[l]] - f[n2[n2[l]]]; } __device__ double extrapolate_up(unsigned int l, double *f) { return 2.0*f[n4[l]] - f[n4[n4[l]]]; } __device__ double VgradF(unsigned int l, double *f, double *vx, double *vy) { double val = 0; double VR, VL, VU, VD; double FR, FL, FU, FD; FR = FL = FU = FD = 0; VR = (vx[n3[l]] + vx[l])*0.5; VL = (vx[l] + vx[n1[l]])*0.5; if (VR > 0) FR = f[l]; else if (VR < 0) FR = f[n3[l]]; if (VL > 0) FL = f[n1[l]]; else if (VL < 0) FL = f[l]; val += (VR*FR - VL*FL) / hx; VU = (vy[n2[l]] + vy[l])*0.5; VD = (vy[l] + vy[n4[l]])*0.5; if (VU > 0) FU = f[l]; else if (VU < 0) FU = f[n2[l]]; if (VD > 0) FD = f[n4[l]]; else if (VD < 0) FD = f[l]; val += (VU*FU - VD*FD) / hy; return val; } __device__ double VgradF_forward(unsigned int l, double *f, double *vx, double *vy) { double val = 0; double VR, VL, VU, VD; double FR, FL, FU, FD; FR = FL = FU = FD = 0; VR = (vx[n3[l]] + vx[l])*0.5; VL = (vx[l] + vx[n1[l]])*0.5; if (VR > 0) FR = f[l]; else if (VR < 0) FR = f[n3[l]]; if (VL > 0) FL = f[n1[l]]; else if (VL < 0) FL = f[l]; val += (VR*FR - VL*FL) / hx; VU = (vy[n2[l]] + vy[l])*0.5; VD = (vy[l] + vy[n4[l]])*0.5; if (VU > 0) FU = f[l]; else if (VU < 0) FU = f[n2[l]]; if (VD > 0) FD = f[n4[l]]; else if (VD < 0) FD = f[l]; val += (VU*FU - VD*FD) / hy; return val; } /* #define dx1(l, f) 0.5*(f[n3[l]] - f[n1[l]]) / hx #define dy1(l, f) 0.5*(f[n2[l]] - f[n4[l]]) / hy #define dx2(l, f) (f[n3[l]] - 2.0*f[l] + f[n1[l]]) / hx / hx #define dy2(l, f) (f[n2[l]] - 2.0*f[l] + f[n4[l]]) / hy / hy #define dx1_eq_0_forward(l, f) (4.0*f[n3[l]] - f[n3[n3[l]]]) / 3.0 #define dx1_eq_0_back(l, f) (4.0*f[n1[l]] - f[n1[n1[l]]]) / 3.0 #define dy1_eq_0_up(l, f) (4.0*f[n2[l]] - f[n2[n2[l]]]) / 3.0 #define dy1_eq_0_down(l, f) (4.0*f[n4[l]] - f[n4[n4[l]]]) / 3.0 #define dx1_forward(l, f) -0.5*(3.0*f[l] - 4.0*f[n3[l]] + f[n3[n3[l]]]) / hx #define dx1_back(l, f) 0.5*(3.0*f[l] - 4.0*f[n1[l]] + f[n1[n1[l]]]) / hx #define dy1_up(l, f) -0.5*(3.0*f[l] - 4.0*f[n2[l]] + f[n2[n2[l]]]) / hy #define dy1_down(l, f) 0.5*(3.0*f[l] - 4.0*f[n4[l]] + f[n4[n4[l]]]) / hy #define dx2_forward(l, f) (2.0 * f[l] - 5.0 * f[n3[l]] + 4.0 * f[n3[n3[l]]] - f[n3[n3[n3[l]]]]) / hx / hx #define dx2_back(l, f) (2.0 * f[l] - 5.0 * f[n1[l]] + 4.0 * f[n1[n1[l]]] - f[n1[n1[n1[l]]]]) / hx / hx #define dy2_up(l, f) (2.0 * f[l] - 5.0 * f[n2[l]] + 4.0 * f[n2[n2[l]]] - f[n2[n2[n2[l]]]]) / hy / hy #define dy2_down(l, f) (2.0 * f[l] - 5.0 * f[n4[l]] + 4.0 * f[n4[n4[l]]] - f[n4[n4[n4[l]]]]) / hy / hy */ //integer global inxed i __device__ unsigned int iG(unsigned int l) { return (J_back[l] - (J_back[l] / OFFSET)*OFFSET); } //integer global index j __device__ unsigned int jG(unsigned int l) { return (J_back[l] / OFFSET); } __device__ double r_gamma(unsigned int l) { return (J_back[l] - (J_back[l] / OFFSET)*OFFSET) * hx*cosA + //cosA*x (J_back[l] / OFFSET) * hy*sinA; //sinA*y } __device__ double x_gamma(unsigned int l) { return (J_back[l] - (J_back[l] / OFFSET)*OFFSET) * hx*cosA; //cosA*x } __device__ double y_gamma(unsigned int l) { return (J_back[l] / OFFSET) * hy*sinA; //sinA*y } __global__ void chemical_potential(double *mu, double *C) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner mu[l] = -MM*Gr* r_gamma(l) //nu takoe // da norm + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - Ca*(dx2(l, C) + dy2(l, C)); break; case 1: //left rigid mu[l] = dx1_eq_0_forward(l, mu); break; case 2: //upper rigid mu[l] = dy1_eq_0_down(l, mu); /* int down1 = n4[l]; int down2 = n4[n4[l]]; double m1 = -MM*Gr* r_gamma(down1) +2.0 * A * C[down1] + 4.0 * pow(C[down1], 3) - Ca*(dx2(down1, C) + dy2(down1, C)); double m2 = -MM*Gr* r_gamma(down2) +2.0 * A * C[down2] + 4.0 * pow(C[down2], 3) - Ca*(dx2(down2, C) + dy2(down2, C)); mu[l] = (4.0*m1 - m2) / 3.0; */ break; case 3: //right rigid mu[l] = dx1_eq_0_back(l, mu); break; case 4: //lower rigid mu[l] = dy1_eq_0_up(l, mu); /* int up1 = n2[l]; int up2 = n2[n2[l]]; double m1_ = -MM*Gr* r_gamma(up1) +2.0 * A * C[up1] + 4.0 * pow(C[up1], 3) - Ca*(dx2(up1, C) + dy2(up1, C)); double m2_ = -MM*Gr* r_gamma(up2) +2.0 * A * C[up2] + 4.0 * pow(C[up2], 3) - Ca*(dx2(up2, C) + dy2(up2, C)); mu[l] = (4.0*m1_ - m2_) / 3.0; */ break; case 5: //left upper rigid corner mu[l] = 0.5* (dx1_eq_0_forward(l, mu) + dy1_eq_0_down(l, mu)); break; case 6: //right upper rigid corner mu[l] = 0.5* (dx1_eq_0_back(l, mu) + dy1_eq_0_down(l, mu)); break; case 7: //right lower rigid corner mu[l] = 0.5* (dx1_eq_0_back(l, mu) + dy1_eq_0_up(l, mu)); break; case 8: //left lower rigid corner mu[l] = 0.5* (dx1_eq_0_forward(l, mu) + dy1_eq_0_up(l, mu)); break; case 9: //inlet (from left) mu[l] = -Ca*dx2_forward(l, C) - Ca*dy2(l, C) + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - MM*Gr* r_gamma(l); //dx1_eq_0_forward(l, mu); break; case 10://outlet (to right) mu[l] = -Ca*dx2_back(l, C) - Ca*dy2(l, C) + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - MM*Gr* r_gamma(l); //dx1_eq_0_back(l, mu); break; default: break; } } } __global__ void chemical_potential_border(double *mu, double *C) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner break; case 1: //left rigid mu[l] = dx1_eq_0_forward(l, mu); break; case 2: //upper rigid mu[l] = dy1_eq_0_down(l, mu);// -2.0 / 3.0*hy*(-Gr*r_gamma(l)*C[l]); /* int down1 = n4[l]; int down2 = n4[n4[l]]; double m1 = -MM*Gr* r_gamma(down1) +2.0 * A * C[down1] + 4.0 * pow(C[down1], 3) - Ca*(dx2(down1, C) + dy2(down1, C)); double m2 = -MM*Gr* r_gamma(down2) +2.0 * A * C[down2] + 4.0 * pow(C[down2], 3) - Ca*(dx2(down2, C) + dy2(down2, C)); mu[l] = (4.0*m1 - m2) / 3.0; */ break; case 3: //right rigid mu[l] = dx1_eq_0_back(l, mu); break; case 4: //lower rigid mu[l] = dy1_eq_0_up(l, mu);// +2.0 / 3.0*hy*(-Gr*r_gamma(l)*C[l]);; /* int up1 = n2[l]; int up2 = n2[n2[l]]; double m1_ = -MM*Gr* r_gamma(up1) +2.0 * A * C[up1] + 4.0 * pow(C[up1], 3) - Ca*(dx2(up1, C) + dy2(up1, C)); double m2_ = -MM*Gr* r_gamma(up2) +2.0 * A * C[up2] + 4.0 * pow(C[up2], 3) - Ca*(dx2(up2, C) + dy2(up2, C)); mu[l] = (4.0*m1_ - m2_) / 3.0; */ break; case 5: //left upper rigid corner mu[l] = 0.5* (dx1_eq_0_forward(l, mu) + dy1_eq_0_down(l, mu)); break; case 6: //right upper rigid corner mu[l] = 0.5* (dx1_eq_0_back(l, mu) + dy1_eq_0_down(l, mu)); break; case 7: //right lower rigid corner mu[l] = 0.5* (dx1_eq_0_back(l, mu) + dy1_eq_0_up(l, mu)); break; case 8: //left lower rigid corner mu[l] = 0.5* (dx1_eq_0_forward(l, mu) + dy1_eq_0_up(l, mu)); break; case 9: //inlet (from left) //mu[l] = dx1_eq_0_forward(l, mu); //mu[l] = dx2_eq_0_forward(l, mu); break; case 10://outlet (to right) //mu[l] = dx1_eq_0_back(l, mu); //mu[l] = dx2_eq_0_back(l, mu); break; default: break; } } } __global__ void chemical_potential_inside(double *mu, double *C) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner mu[l] = -MM*Gr* r_gamma(l) //nu takoe // da norm + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - Ca*(dx2(l, C) + dy2(l, C)); break; case 1: //left rigid break; case 2: //upper rigid break; case 3: //right rigid break; case 4: //lower rigid break; case 5: //left upper rigid corner mu[l] = 0.5* (dx1_eq_0_forward(l, mu) + dy1_eq_0_down(l, mu)); break; case 6: //right upper rigid corner mu[l] = 0.5* (dx1_eq_0_back(l, mu) + dy1_eq_0_down(l, mu)); break; case 7: //right lower rigid corner mu[l] = 0.5* (dx1_eq_0_back(l, mu) + dy1_eq_0_up(l, mu)); break; case 8: //left lower rigid corner mu[l] = 0.5* (dx1_eq_0_forward(l, mu) + dy1_eq_0_up(l, mu)); break; case 9: //inlet (from left) mu[l] = -Ca*dx2_forward(l, C) - Ca*dy2(l, C) + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - MM*Gr* r_gamma(l); break; case 10://outlet (to right) mu[l] = -Ca*dx2_back(l, C) - Ca*dy2(l, C) + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - MM*Gr* r_gamma(l); break; default: break; } } } __global__ void chemical_potential_Gr(double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { mu[l] = -MM*Gr* r_gamma(l); } } __global__ void quasi_velocity(double *ux, double *uy, double *vx, double *vy, double *C0, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner //ux_d ux[l] = vx[l] //+ Gr*C0[l] * x_gamma(l) + tau * ( -vx[l] * dx1(l, vx) - vy[l] * dy1(l, vx) + (dx2(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); //uy_d uy[l] = vy[l] //+ Gr*C0[l] * y_gamma(l) + tau * ( -vx[l] * dx1(l, vy) - vy[l] * dy1(l, vy) + (dx2(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 1: //left rigid ux[l] = tau / Re * dx2_forward(l, vx); break; case 2: //upper rigid uy[l] = tau / Re * dy2_down(l, vy); break; case 3: //right rigid ux[l] = tau / Re * dx2_back(l, vx); break; case 4: //lower rigid uy[l] = tau / Re * dy2_up(l, vy); break; case 5: //left upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 6: //right upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 7: //right lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 8: //left lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 9: //inlet (from left) ux[l] = vx[l] + tau*( -vx[l] * dx1_forward(l, vx) - vy[l] * dy1(l, vx) + (dx2_forward(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1_forward(l, mu) / MM ); uy[l] = tau * ( -vx[l] * dx1_forward(l, vy) - vy[l] * dy1(l, vy) + (dx2_forward(l, vy) + dy2(l, vy)) / Re // !быть может, !тут нужно дополнить - C0[l] * dy1(l, mu) / MM ); break; case 10: //outlet (to right) ux[l] = vx[l] + tau*( -vx[l] * dx1_back(l, vx) - vy[l] * dy1(l, vx) + (dx2_back(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1_back(l, mu) / MM //! ); uy[l] = tau * ( -vx[l] * dx1_back(l, vy) - vy[l] * dy1(l, vy) + (dx2_back(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM //! ); break; default: break; } } } __global__ void quasi_velocity_pulsation(double *ux, double *uy, double *vx, double *vy, double *C0, double *mu, double time) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner //ux_d ux[l] = vx[l] //+ Gr*C0[l] * x_gamma(l) + tau * ( -vx[l] * dx1(l, vx) - vy[l] * dy1(l, vx) + (dx2(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); //uy_d uy[l] = vy[l] //+ Gr*C0[l] * y_gamma(l) + tau * ( -vx[l] * dx1(l, vy) - vy[l] * dy1(l, vy) + (dx2(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 1: //left rigid ux[l] = tau / Re * dx2_forward(l, vx); break; case 2: //upper rigid uy[l] = tau / Re * dy2_down(l, vy); break; case 3: //right rigid ux[l] = tau / Re * dx2_back(l, vx); break; case 4: //lower rigid uy[l] = tau / Re * dy2_up(l, vy); break; case 5: //left upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 6: //right upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 7: //right lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 8: //left lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 9: //inlet (from left) ux[l] = vx[l] + tau*( -vx[l] * dx1_forward(l, vx) - vy[l] * dy1(l, vx) + (dx2_forward(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1_forward(l, mu) / MM ); uy[l] = tau * ( -vx[l] * dx1_forward(l, vy) - vy[l] * dy1(l, vy) + (dx2_forward(l, vy) + dy2(l, vy)) / Re // !быть может, !тут нужно дополнить - C0[l] * dy1(l, mu) / MM ); break; case 10: //outlet (to right) ux[l] = vx[l] + tau*( -vx[l] * dx1_back(l, vx) - vy[l] * dy1(l, vx) + (dx2_back(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1_back(l, mu) / MM //! ); uy[l] = tau * ( -vx[l] * dx1_back(l, vy) - vy[l] * dy1(l, vy) + (dx2_back(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM //! ); break; default: break; } ux[l] += tau*Amp*sin(Omega*time)*vibr_X; uy[l] += tau*Amp*sin(Omega*time)*vibr_Y; } } __global__ void quasi_velocity_pulsation_with_Phi(double *ux, double *uy, double *vx, double *vy, double *C0, double *mu, double time, double *Phi, double *WX, double *WY) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { double W0_W = WX[l] * vibr_X + WY[l] * vibr_Y; switch (t[l]) { case 0: //inner //ux_d ux[l] = vx[l] //+ Gr*C0[l] * x_gamma(l) + tau * ( -vx[l] * dx1(l, vx) - vy[l] * dy1(l, vx) + (dx2(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1(l, mu) / MM - VV*(W0_W)*dx1(l, C0) ); //uy_d uy[l] = vy[l] //+ Gr*C0[l] * y_gamma(l) + tau * ( -vx[l] * dx1(l, vy) - vy[l] * dy1(l, vy) + (dx2(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM - VV*(W0_W)*dy1(l, C0) ); break; case 1: //left rigid ux[l] = tau / Re * dx2_forward(l, vx); break; case 2: //upper rigid uy[l] = tau / Re * dy2_down(l, vy); break; case 3: //right rigid ux[l] = tau / Re * dx2_back(l, vx); break; case 4: //lower rigid uy[l] = tau / Re * dy2_up(l, vy); break; case 5: //left upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 6: //right upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 7: //right lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 8: //left lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 9: //inlet (from left) ux[l] = vx[l] + tau*( -vx[l] * dx1_forward(l, vx) - vy[l] * dy1(l, vx) + (dx2_forward(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1_forward(l, mu) / MM - VV*(W0_W)*dx1_forward(l, C0) ); uy[l] = tau * ( -vx[l] * dx1_forward(l, vy) - vy[l] * dy1(l, vy) + (dx2_forward(l, vy) + dy2(l, vy)) / Re // !быть может, !тут нужно дополнить - C0[l] * dy1(l, mu) / MM - VV*(W0_W)*dy1(l, C0) ); break; case 10: //outlet (to right) ux[l] = vx[l] + tau*( -vx[l] * dx1_back(l, vx) - vy[l] * dy1(l, vx) + (dx2_back(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1_back(l, mu) / MM //! - VV*(W0_W)*dx1_back(l, C0) ); uy[l] = tau * ( -vx[l] * dx1_back(l, vy) - vy[l] * dy1(l, vy) + (dx2_back(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM //! - VV*(W0_W)*dy1(l, C0) ); break; default: break; } } } __global__ void quasi_velocity_no_phase_field(double *ux, double *uy, double *vx, double *vy, double *C0, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner //ux_d ux[l] = vx[l] //+ Gr*C0[l] * x_gamma(l) + tau * ( -vx[l] * dx1(l, vx) - vy[l] * dy1(l, vx) + (dx2(l, vx) + dy2(l, vx)) / Re + Gr*C0[l] * cosA ); //uy_d uy[l] = vy[l] //+ Gr*C0[l] * y_gamma(l) + tau * ( -vx[l] * dx1(l, vy) - vy[l] * dy1(l, vy) + (dx2(l, vy) + dy2(l, vy)) / Re + Gr*C0[l] * sinA ); break; case 1: //left rigid ux[l] = tau / Re * dx2_forward(l, vx) + tau*Gr*C0[l] * cosA; break; case 2: //upper rigid uy[l] = tau / Re * dy2_down(l, vy) + tau*Gr*C0[l] * sinA; break; case 3: //right rigid ux[l] = tau / Re * dx2_back(l, vx) + tau*Gr*C0[l] * cosA; break; case 4: //lower rigid uy[l] = tau / Re * dy2_up(l, vy) + tau*Gr*C0[l] * sinA; break; case 5: //left upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 6: //right upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 7: //right lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 8: //left lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 9: //inlet (from left) ux[l] = vx[l] + tau*( -vx[l] * dx1_forward(l, vx) - vy[l] * dy1(l, vx) + (dx2_forward(l, vx) + dy2(l, vx)) / Re + Gr*C0[l] * cosA ); uy[l] = tau * ( -vx[l] * dx1_forward(l, vy) - vy[l] * dy1(l, vy) + (dx2_forward(l, vy) + dy2(l, vy)) / Re // !быть может, !тут нужно дополнить + Gr*C0[l] * sinA ); break; case 10: //outlet (to right) ux[l] = vx[l] + tau*( -vx[l] * dx1_back(l, vx) - vy[l] * dy1(l, vx) + (dx2_back(l, vx) + dy2(l, vx)) / Re + Gr*C0[l] * cosA ); uy[l] = tau * ( -vx[l] * dx1_back(l, vy) - vy[l] * dy1(l, vy) + (dx2_back(l, vy) + dy2(l, vy)) / Re + Gr*C0[l] * sinA ); break; default: break; } } } __global__ void concentration(double *C, double *C0, double *vx, double *vy, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner C[l] = C0[l] + tau * ( -vx[l] * dx1(l, C0) - vy[l] * dy1(l, C0) + (dx2(l, mu) + dy2(l, mu)) / Pe ); break; case 1: //left rigid C[l] = dx1_eq_0_forward(l, C0); break; case 2: //upper rigid C[l] = dy1_eq_0_down(l, C0); break; case 3: //right rigid C[l] = dx1_eq_0_back(l, C0); break; case 4: //lower rigid C[l] = dy1_eq_0_up(l, C0); break; case 5: //left upper rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) + dy1_eq_0_down(l, C0)); break; case 6: //right upper rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + dy1_eq_0_down(l, C0)); break; case 7: //right lower rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + dy1_eq_0_up(l, C0)); break; case 8: //left lower rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) + dy1_eq_0_up(l, C0)); break; case 9: //inlet (from left) C[l] = -0.5; break; case 10://outlet (to right) C[l] = dx1_eq_0_back(l, C0); break; default: break; } } } __global__ void concentration_surface_energy_wetting(double *C, double *C0, double *vx, double *vy, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; double Ca_test = sqrt(Ca) * 5; if (l < n) { switch (t[l]) { case 0: //inner C[l] = C0[l] + tau * ( -vx[l] * dx1(l, C0) - vy[l] * dy1(l, C0) + (dx2(l, mu) + dy2(l, mu)) / Pe ); break; case 1: //left rigid C[l] = dx1_eq_0_forward(l, C0) - 0.75*cosTh*(1.0 - 4.0*C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test; break; case 2: //upper rigid C[l] = dy1_eq_0_down(l, C0) - 0.75*cosTh*(1.0 - 4.0*C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test; break; case 3: //right rigid C[l] = dx1_eq_0_back(l, C0) - 0.75*cosTh*(1.0 - 4.0*C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test; break; case 4: //lower rigid C[l] = dy1_eq_0_up(l, C0) - 0.75*cosTh*(1.0 - 4.0*C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test; break; case 5: //left upper rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test + dy1_eq_0_down(l, C0) + 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test); break; case 6: //right upper rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test + dy1_eq_0_down(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test); break; case 7: //right lower rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test + dy1_eq_0_up(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test); break; case 8: //left lower rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test + dy1_eq_0_up(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test); break; case 9: //inlet (from left) C[l] = -0.5; break; case 10://outlet (to right) C[l] = dx1_eq_0_back(l, C0); break; default: break; } } } __global__ void concentration_geometrical_wetting(double *C, double *C0, double *vx, double *vy, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; double Ca_test = sqrt(Ca) * 5; if (l < n) { switch (t[l]) { case 0: //inner C[l] = C0[l] + tau * ( -vx[l] * dx1(l, C0) - vy[l] * dy1(l, C0) + (dx2(l, mu) + dy2(l, mu)) / Pe ); break; case 1: //left rigid C[l] = dx1_eq_0_forward(l, C0) - cosTh * 2.0 / 3.0*hx * sqrt(pow(dx1_forward(l, C0), 2) + pow(dy1(l, C0), 2)); break; case 2: //upper rigid C[l] = dy1_eq_0_down(l, C0) - 0.75*cosTh*(1.0 - 4.0*C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test; break; case 3: //right rigid C[l] = dx1_eq_0_back(l, C0) - 0.75*cosTh*(1.0 - 4.0*C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test; break; case 4: //lower rigid C[l] = dy1_eq_0_up(l, C0) - 0.75*cosTh*(1.0 - 4.0*C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test; break; case 5: //left upper rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test + dy1_eq_0_down(l, C0) + 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test); break; case 6: //right upper rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test + dy1_eq_0_down(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test); break; case 7: //right lower rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test + dy1_eq_0_up(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test); break; case 8: //left lower rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hx / Ca_test + dy1_eq_0_up(l, C0) - 0.75*cosTh*(1.0 - C0[l] * C0[l]) * 2.0 / 3.0*hy / Ca_test); break; case 9: //inlet (from left) C[l] = -0.5; break; case 10://outlet (to right) C[l] = dx1_eq_0_back(l, C0); break; default: break; } } } __global__ void concentration_wetting(double *C, double *C0, double *vx, double *vy, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner C[l] = C0[l] + tau * ( -vx[l] * dx1(l, C0) - vy[l] * dy1(l, C0) + (dx2(l, mu) + dy2(l, mu)) / Pe ); break; case 1: //left rigid if (C0[n3[l]] < C0[l]) C[l] = C0[n3[l]]; break; case 2: //upper rigid if (C0[n4[l]] < C0[l]) C[l] = C0[n4[l]]; break; case 3: //right rigid if (C0[n1[l]] < C0[l]) C[l] = C0[n1[l]]; break; case 4: //lower rigid if (C0[n2[l]] < C0[l]) C[l] = C0[n2[l]]; break; case 5: //left upper rigid corner if (C0[n3[n4[l]]] < C0[l]) C[l] = C0[n3[n4[l]]]; break; case 6: //right upper rigid corner if (C0[n1[n4[l]]] < C0[l]) C[l] = C0[n1[n4[l]]]; break; case 7: //right lower rigid corner if (C0[n2[n1[l]]] < C0[l]) C[l] = C0[n2[n1[l]]]; break; case 8: //left lower rigid corner if (C0[n2[n3[l]]] < C0[l]) C[l] = C0[n2[n3[l]]]; break; case 9: //inlet (from left) C[l] = -0.5; break; case 10://outlet (to right) C[l] = dx1_eq_0_back(l, C0); break; default: break; } } } __global__ void concentration_no_wetting(double *C, double *C0, double *vx, double *vy, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner C[l] = C0[l] + tau * ( -vx[l] * dx1(l, C0) - vy[l] * dy1(l, C0) + (dx2(l, mu) + dy2(l, mu)) / Pe ); break; case 1: //left rigid C[l] = 0.5; break; case 2: //upper rigid C[l] = 0.5; break; case 3: //right rigid C[l] = 0.5; break; case 4: //lower rigid C[l] = 0.5; break; case 5: //left upper rigid corner C[l] = 0.5; break; case 6: //right upper rigid corner C[l] = 0.5; break; case 7: //right lower rigid corner C[l] = 0.5; break; case 8: //left lower rigid corner C[l] = 0.5; break; case 9: //inlet (from left) C[l] = -0.5; break; case 10://outlet (to right) C[l] = dx1_eq_0_back(l, C0); break; default: break; } } } __global__ void concentration_no_input_C(double *C, double *C0, double *vx, double *vy, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner C[l] = C0[l] + tau * ( -vx[l] * dx1(l, C0) - vy[l] * dy1(l, C0) + (dx2(l, mu) + dy2(l, mu)) / Pe ); break; case 1: //left rigid C[l] = dx1_eq_0_forward(l, C0); break; case 2: //upper rigid C[l] = dy1_eq_0_down(l, C0); break; case 3: //right rigid C[l] = dx1_eq_0_back(l, C0); break; case 4: //lower rigid C[l] = dy1_eq_0_up(l, C0); break; case 5: //left upper rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) + dy1_eq_0_down(l, C0)); break; case 6: //right upper rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + dy1_eq_0_down(l, C0)); break; case 7: //right lower rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + dy1_eq_0_up(l, C0)); break; case 8: //left lower rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) + dy1_eq_0_up(l, C0)); break; case 9: //inlet (from left) C[l] = 0.5; dx1_eq_0_forward(l, C0); break; case 10://outlet (to right) C[l] = dx1_eq_0_back(l, C0); break; default: break; } } } __global__ void velocity_correction(double *vx, double *vy, double *ux, double *uy, double *p) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner vx[l] = ux[l] - tau * dx1(l, p); vy[l] = uy[l] - tau * dy1(l, p); break; case 1: //left rigid vx[l] = 0.0; vy[l] = 0.0; break; case 2: //upper rigid vx[l] = 0.0; vy[l] = 0.0; break; case 3: //right rigid vx[l] = 0.0; vy[l] = 0.0; break; case 4: //lower rigid vx[l] = 0.0; vy[l] = 0.0; break; case 5: //left upper rigid corner vx[l] = 0.0; vy[l] = 0.0; break; case 6: //right upper rigid corner vx[l] = 0.0; vy[l] = 0.0; break; case 7: //right lower rigid corner vx[l] = 0.0; vy[l] = 0.0; break; case 8: //left lower rigid corner vx[l] = 0.0; vy[l] = 0.0; break; case 9: //inlet (from left) vx[l] = ux[l] - tau * dx1_forward(l, p); vy[l] = uy[l] - tau * dy1(l, p); //double vx1 = ux[n3[l]] - tau * dx1(n3[l], p); //double vx2 = ux[n3[n3[l]]] - tau * dx1(n3[n3[l]], p); //double vy1 = uy[n3[l]] - tau * dy1(n3[l], p); //double vy2 = uy[n3[n3[l]]] - tau * dy1(n3[n3[l]], p); //vx[l] = 2.0*vx1 - vx2; //vy[l] = 2.0*vy1 - vy2; break; case 10: //outlet (to right) vx[l] = ux[l] - tau * dx1_back(l, p); vy[l] = uy[l] - tau * dy1(l, p); //vx[l] = ux[n1[l]] - tau * dx1(n1[l], p); //vy[l] = uy[n1[l]] - tau * dy1(n1[l], p); break; default: break; } } } //for solely pressure __global__ void Poisson(double *p, double *p0, double *ux, double *uy, double *mu, double *C) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner p[l] = p0[l] + tau_p*( -(dx1(l, ux) + dy1(l, uy)) / tau + dx2(l, p0) + dy2(l, p0) ); break; case 1: //left rigid p[l] = dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0; break; case 2: //upper rigid p[l] = dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0; break; case 3: //right rigid p[l] = dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0; break; case 4: //lower rigid p[l] = dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0; break; case 5: //left upper rigid corner p[l] = 0.5* (dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0); break; case 6: //right upper rigid corner p[l] = 0.5* (dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0); break; case 7: //right lower rigid corner p[l] = 0.5* (dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0); break; case 8: //left lower rigid corner p[l] = 0.5* (dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0); break; case 9: //inlet (from left) p[l] = 8.0 / Re*Lx*dP + PHASE*((0.5*Ca*pow(dx1_forward(l, C), 2) - mu[l] * C[l] + A*pow(C[l], 2) + pow(C[l], 4)) / MM - Gr*C[l] * r_gamma(l)); break; case 10://outlet (to right) p[l] = 0 + PHASE*((0.5*Ca*pow(dx1_back(l, C), 2) - mu[l] * C[l] + A*pow(C[l], 2) + pow(C[l], 4)) / MM - Gr*C[l] * r_gamma(l)); break; default: break; } } } //for pressure with Phi __global__ void Poisson_pulsation_Phi(double *p, double *p0, double *ux, double *uy, double *mu, double *C, double *Phi, double *WX, double *WY) { //vibration = 1 unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner p[l] = p0[l] + tau_p*( -(dx1(l, ux) + dy1(l, uy)) / tau + dx2(l, p0) + dy2(l, p0) ); break; case 1: //left rigid p[l] = dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0; break; case 2: //upper rigid p[l] = dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0; break; case 3: //right rigid p[l] = dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0; break; case 4: //lower rigid p[l] = dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0; break; case 5: //left upper rigid corner p[l] = 0.5* (dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0); break; case 6: //right upper rigid corner p[l] = 0.5* (dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0); break; case 7: //right lower rigid corner p[l] = 0.5* (dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0); break; case 8: //left lower rigid corner p[l] = 0.5* (dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0); break; case 9: //inlet (from left) p[l] = 8.0 / Re*Lx*dP + PHASE*((0.5*Ca*pow(dx1_forward(l, C), 2) - mu[l] * C[l] + A*pow(C[l], 2) + pow(C[l], 4)) / MM - Gr*C[l] * r_gamma(l)); p[l] += (WX[l] * WX[l] + WY[l] * WY[l])*0.5*VV; break; case 10://outlet (to right) p[l] = 0 + PHASE*((0.5*Ca*pow(dx1_back(l, C), 2) - mu[l] * C[l] + A*pow(C[l], 2) + pow(C[l], 4)) / MM - Gr*C[l] * r_gamma(l)); p[l] += (WX[l] * WX[l] + WY[l] * WY[l])*0.5*VV; break; default: break; } } } __global__ void Poisson_pulsation(double *p, double *p0, double *ux, double *uy, double *mu, double *C, double time) { //vibration = 3 unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner p[l] = p0[l] + tau_p*( -(dx1(l, ux) + dy1(l, uy)) / tau + dx2(l, p0) + dy2(l, p0) ); break; case 1: //left rigid p[l] = dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0; break; case 2: //upper rigid p[l] = dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0; break; case 3: //right rigid p[l] = dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0; break; case 4: //lower rigid p[l] = dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0; break; case 5: //left upper rigid corner p[l] = 0.5* (dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0); break; case 6: //right upper rigid corner p[l] = 0.5* (dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_down(l, p0) + uy[l] * 2.0 * hy / tau / 3.0); break; case 7: //right lower rigid corner p[l] = 0.5* (dx1_eq_0_back(l, p0) + ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0); break; case 8: //left lower rigid corner p[l] = 0.5* (dx1_eq_0_forward(l, p0) - ux[l] * 2.0 * hx / tau / 3.0 + dy1_eq_0_up(l, p0) - uy[l] * 2.0 * hy / tau / 3.0); break; case 9: //inlet (from left) p[l] = 8.0 / Re*Lx*dP*(1.0 + Amp*sin(Omega*time)) + PHASE*((0.5*Ca*pow(dx1_forward(l, C), 2) - mu[l] * C[l] + A*pow(C[l], 2) + pow(C[l], 4)) / MM - Gr*C[l] * r_gamma(l)); break; case 10://outlet (to right) p[l] = 0 + PHASE*((0.5*Ca*pow(dx1_back(l, C), 2) - mu[l] * C[l] + A*pow(C[l], 2) + pow(C[l], 4)) / MM - Gr*C[l] * r_gamma(l)); break; default: break; } } } __global__ void Poisson_Phi(double *Phi, double *Phi0, double *C, double *WX, double *WY) { //vibration = 1 unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { //if (l == 1) Phi0[l] = 0; switch (t[l]) { case 0: //inner Phi[l] = Phi0[l] + tau_p*( -(dx1(l, C)*vibr_X + dy1(l, C)*vibr_Y) + dx2(l, Phi0) + dy2(l, Phi0) ); break; case 1: //left rigid Phi[l] = dx1_eq_0_forward(l, Phi0) - C[l] * vibr_X * 2.0 * hx / 3.0; break; case 2: //upper rigid Phi[l] = dy1_eq_0_down(l, Phi0) + C[l] * vibr_Y * 2.0 * hy / 3.0; break; case 3: //right rigid Phi[l] = dx1_eq_0_back(l, Phi0) + C[l] * vibr_X * 2.0 * hx / 3.0; break; case 4: //lower rigid Phi[l] = dy1_eq_0_up(l, Phi0) - C[l] * vibr_Y * 2.0 * hy / 3.0; break; case 5: //left upper rigid corner Phi[l] = 0.5* (dx1_eq_0_forward(l, Phi0) - C[l] * vibr_X * 2.0 * hx / 3.0 + dy1_eq_0_down(l, Phi0) + C[l] * vibr_Y * 2.0 * hy / 3.0); break; case 6: //right upper rigid corner Phi[l] = 0.5* (dx1_eq_0_back(l, Phi0) + C[l] * vibr_X * 2.0 * hx / 3.0 + dy1_eq_0_down(l, Phi0) + C[l] * vibr_Y * 2.0 * hy / 3.0); break; case 7: //right lower rigid corner Phi[l] = 0.5* (dx1_eq_0_back(l, Phi0) + C[l] * vibr_X * 2.0 * hx / 3.0 + dy1_eq_0_up(l, Phi0) - C[l] * vibr_Y * 2.0 * hy / 3.0); break; case 8: //left lower rigid corner Phi[l] = 0.5* (dx1_eq_0_forward(l, Phi0) - C[l] * vibr_X * 2.0 * hx / 3.0 + dy1_eq_0_up(l, Phi0) - C[l] * vibr_Y * 2.0 * hy / 3.0); break; case 9: //inlet (from left) if (PHI_border_left == 0) { Phi[l] = PHI_value_left; } else if (PHI_border_left == 1) { Phi[l] = dx1_eq_0_forward(l, Phi0) - (PHI_value_left) * 2.0 * hx / 3.0; } else if (PHI_border_left == 2) { Phi[l] = dx2_eq_0_forward(l, Phi0) - (PHI_value_left)* hx * hx / 2.0; } else if (PHI_border_left == 3) { Phi[l] = dx1_eq_0_forward(l, Phi0) - (C[l] * vibr_X) * 2.0 * hx / 3.0; } else if (PHI_border_left == 4) { Phi[l] = C[l]*jG(l)*hy; } break; case 10://outlet (to right) if (PHI_border_right == 0) { Phi[l] = PHI_value_right; } else if (PHI_border_right == 1) { Phi[l] = dx1_eq_0_back(l, Phi0) + (PHI_value_right) * 2.0 * hx / 3.0; } else if (PHI_border_right == 2) { Phi[l] = dx2_eq_0_back(l, Phi0) - (PHI_value_right)* hx * hx / 2.0; } else if (PHI_border_right == 3) { Phi[l] = dx1_eq_0_back(l, Phi0) + (C[l] * vibr_X) * 2.0 * hx / 3.0; } else if (PHI_border_right == 4) { Phi[l] = C[l] * jG(l)*hy; } break; default: break; } if (l == 1) Phi_reference = Phi[l]; } } __global__ void Phi_normalization(double *Phi) { //vibration = 1 unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { Phi[l] = Phi[l] - Phi_reference; } } __global__ void WW_from_Phi(double *WX, double *WY, double *Phi, double *C) { { //vibration = 1 unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner WX[l] = -vibr_X*C[l] + dx1(l, Phi); WY[l] = -vibr_Y*C[l] + dy1(l, Phi); break; case 1: //left rigid WX[l] = 0; WY[l] = 0; // -vibr_Y*C[n3[l]] + dy1(n3[l], Phi);; //well, think of it if dC/dn != 0 break; case 2: //upper rigid WY[l] = 0; if (t[n4[l]] == 9) WX[l] = -vibr_X*C[l] + dx1_forward(l, Phi); else if (t[n4[l]] == 10) WX[l] = -vibr_X*C[l] + dx1_back(l, Phi); else WX[l] = -vibr_X*C[l] + dx1(l, Phi); break; case 3: //right rigid WX[l] = 0; WY[l] = 0; // -vibr_Y*C[n1[l]] + dy1(n1[l], Phi); break; case 4: //lower rigid WY[l] = 0; if (t[n2[l]] == 9) WX[l] = -vibr_X*C[l] + dx1_forward(l, Phi); else if (t[n2[l]] == 10) WX[l] = -vibr_X*C[l] + dx1_back(l, Phi); else WX[l] = -vibr_X*C[l] + dx1(l, Phi); break; case 5: //left upper rigid corner WX[l] = 0; WY[l] = 0; break; case 6: //right upper rigid corner WX[l] = 0; WY[l] = 0; break; case 7: //right lower rigid corner WX[l] = 0; WY[l] = 0; break; case 8: //left lower rigid corner WX[l] = 0; WY[l] = 0; break; case 9: //inlet (from left) WX[l] = -vibr_X * C[l] + dx1_forward(l, Phi); WY[l] = -vibr_Y * C[l] + dy1(l, Phi); break; /* if (W_BORDER == 0) { //dPhi/dn = 0 WX[l] = -vibr_X * C[l]; WY[l] = -vibr_Y*C[l] + dy1(l, Phi); } else if (W_BORDER == 1) { //W = 0 WX[l] = 0; WY[l] = 0; } else if (W_BORDER == 2) { WX[l] = 0; WY[l] = -vibr_Y*C[l] + dy1(l, Phi); } else if (W_BORDER == 3) { //dW/dn = 0 WX[l] = (4.0*(-vibr_X*C[n3[l]] + dx1(n3[l], Phi)) - (-vibr_X*C[n3[n3[l]]] + dx1(n3[n3[l]], Phi))) / 3.0; WY[l] = -vibr_Y*C[l] + dy1(l, Phi); } */ case 10://outlet (to right) WX[l] = -vibr_X * C[l] + dx1_back(l, Phi); WY[l] = -vibr_Y * C[l] + dy1(l, Phi); break; /* if (W_BORDER == 0) { //dPhi/dn = 0 WX[l] = -vibr_X*C[l]; WY[l] = -vibr_Y*C[l] + dy1(l, Phi); } else if (W_BORDER == 1) { //W = 0 WX[l] = 0; WY[l] = 0; } else if (W_BORDER == 2) { WX[l] = 0; WY[l] = -vibr_Y*C[l] + dy1(l, Phi); } else if (W_BORDER == 3) { //dW/dn = 0 WX[l] = (4.0*(-vibr_X*C[n1[l]] + dx1(n1[l], Phi)) - (-vibr_X*C[n1[n1[l]]] + dx1(n1[n1[l]], Phi))) / 3.0; WY[l] = -vibr_Y*C[l] + dy1(l, Phi); } */ default: break; } } } } __global__ void reduction00(double *data, unsigned int n, double* reduced) { extern __shared__ double shared[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x; shared[tid] = (i < n) ? abs(data[i]) : 0; if (i + blockDim.x < n) shared[tid] += abs(data[i + blockDim.x]); __syncthreads(); // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s>32; s >>= 1) { if (tid < s) { shared[tid] += shared[tid + s]; } __syncthreads(); } if (tid < 32) { // Fetch final intermediate sum from 2nd warp if (blockDim.x >= 64) shared[tid] += shared[tid + 32]; // Reduce final warp using shuffle for (int offset = warpSize / 2; offset > 0; offset /= 2) { shared[tid] += __shfl_down(shared[tid], offset); } } if (tid == 0) { reduced[blockIdx.x] = shared[0]; } } __global__ void reduction0(double *data, unsigned int n, double* reduced) { extern __shared__ double shared[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { shared[tid] = abs(data[i]); } else { shared[tid] = 0.0; } __syncthreads(); // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s>0; s >>= 1) { if (tid < s) { shared[tid] += shared[tid + s]; } __syncthreads(); } if (tid == 0) { reduced[blockIdx.x] = shared[0]; } } __global__ void reduction(double *data, unsigned int n, double* reduced) { extern __shared__ double shared[]; unsigned int tid = threadIdx.x; //unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { shared[tid] = abs(data[i]); //if (i + blockDim.x < n) shared[tid] += abs(data[i + blockDim.x]); } else { shared[tid] = 0.0; } __syncthreads(); if (blockDim.x >= 1024) { if (tid < 512) { shared[tid] += shared[tid + 512]; } __syncthreads(); } if (blockDim.x >= 512) { if (tid < 256) { shared[tid] += shared[tid + 256]; } __syncthreads(); } if (blockDim.x >= 256) { if (tid < 128) { shared[tid] += shared[tid + 128]; } __syncthreads(); } if (blockDim.x >= 128) { if (tid < 64) { shared[tid] += shared[tid + 64]; } __syncthreads(); } if (tid < 32) { if (blockDim.x >= 64) shared[tid] += shared[tid + 32]; if (blockDim.x >= 32) shared[tid] += shared[tid + 16]; if (blockDim.x >= 16) shared[tid] += shared[tid + 8]; if (blockDim.x >= 8) shared[tid] += shared[tid + 4]; if (blockDim.x >= 4) shared[tid] += shared[tid + 2]; if (blockDim.x >= 2) shared[tid] += shared[tid + 1]; } if (tid == 0) { reduced[blockIdx.x] = shared[0]; //if (blockDim.x==1) *last = shared[0]; } } __global__ void quasi_velocity_upstream(double *ux, double *uy, double *vx, double *vy, double *C0, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner //ux_d ux[l] = vx[l] //+ Gr*C0[l] * x_gamma(l) + tau * ( -VgradF(l, vx, vx, vy) //-vx[l] * dx1(l, vx) - vy[l] * dy1(l, vx) + (dx2(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); //uy_d uy[l] = vy[l] //+ Gr*C0[l] * y_gamma(l) + tau * ( -VgradF(l, vy, vx, vy) //-vx[l] * dx1(l, vy) - vy[l] * dy1(l, vy) + (dx2(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 1: //left rigid ux[l] = tau / Re * dx2_forward(l, vx); break; case 2: //upper rigid uy[l] = tau / Re * dy2_down(l, vy); break; case 3: //right rigid ux[l] = tau / Re * dx2_back(l, vx); break; case 4: //lower rigid uy[l] = tau / Re * dy2_up(l, vy); break; case 5: //left upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 6: //right upper rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_down(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_down(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 7: //right lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_back(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_back(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 8: //left lower rigid corner ux[l] = vx[l] + tau * ( +(dx2_forward(l, vx) + dy2_up(l, vx)) / Re - C0[l] * dx1(l, mu) / MM ); uy[l] = vy[l] + tau * ( +(dx2_forward(l, vy) + dy2_up(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 9: //inlet (from left) ux[l] = vx[l] + tau*( -vx[l] * dx1_forward(l, vx) - vy[l] * dy1(l, vx) + (dx2_forward(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1_forward(l, mu) / MM ); uy[l] = tau * ( -vx[l] * dx1_forward(l, vy) - vy[l] * dy1(l, vy) + (dx2_forward(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM ); break; case 10: //outlet (to right) ux[l] = vx[l] + tau*( -vx[l] * dx1_back(l, vx) - vy[l] * dy1(l, vx) + (dx2_back(l, vx) + dy2(l, vx)) / Re - C0[l] * dx1_back(l, mu) / MM //! ); uy[l] = tau * ( -vx[l] * dx1_back(l, vy) - vy[l] * dy1(l, vy) + (dx2_back(l, vy) + dy2(l, vy)) / Re - C0[l] * dy1(l, mu) / MM //! ); break; default: break; } } } __global__ void concentration_upstream(double *C, double *C0, double *vx, double *vy, double *mu) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner C[l] = C0[l] + tau * ( -VgradF(l, C0, vx, vy) + (dx2(l, mu) + dy2(l, mu)) / Pe ); break; case 1: //left rigid C[l] = dx1_eq_0_forward(l, C0); break; case 2: //upper rigid C[l] = dy1_eq_0_down(l, C0); break; case 3: //right rigid C[l] = dx1_eq_0_back(l, C0); break; case 4: //lower rigid C[l] = dy1_eq_0_up(l, C0); break; case 5: //left upper rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) + dy1_eq_0_down(l, C0)); break; case 6: //right upper rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + dy1_eq_0_down(l, C0)); break; case 7: //right lower rigid corner C[l] = 0.5* (dx1_eq_0_back(l, C0) + dy1_eq_0_up(l, C0)); break; case 8: //left lower rigid corner C[l] = 0.5* (dx1_eq_0_forward(l, C0) + dy1_eq_0_up(l, C0)); break; case 9: //inlet (from left) C[l] = -0.5; break; case 10://outlet (to right) //C[l] = dx1_eq_0_back(l, C0); C[l] = extrapolate_forward(l, C0); break; default: break; } } } __global__ void chemical_potential_upstream(double *mu, double *C) { unsigned int l = threadIdx.x + blockIdx.x*blockDim.x; if (l < n) { switch (t[l]) { case 0: //inner mu[l] = -Gr* r_gamma(l) //nu takoe + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - Ca*(dx2(l, C) + dy2(l, C)); break; case 1: //left rigid mu[l] = dx1_eq_0_forward(l, mu); break; case 2: //upper rigid mu[l] = dy1_eq_0_down(l, mu); break; case 3: //right rigid mu[l] = dx1_eq_0_back(l, mu); break; case 4: //lower rigid mu[l] = dy1_eq_0_up(l, mu); break; case 5: //left upper rigid corner mu[l] = 0.5* (dx1_eq_0_forward(l, mu) + dy1_eq_0_down(l, mu)); break; case 6: //right upper rigid corner mu[l] = 0.5* (dx1_eq_0_back(l, mu) + dy1_eq_0_down(l, mu)); break; case 7: //right lower rigid corner mu[l] = 0.5* (dx1_eq_0_back(l, mu) + dy1_eq_0_up(l, mu)); break; case 8: //left lower rigid corner mu[l] = 0.5* (dx1_eq_0_forward(l, mu) + dy1_eq_0_up(l, mu)); break; case 9: //inlet (from left) mu[l] = -Ca*dx2_forward(l, C) + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - Gr* r_gamma(l); //dx1_eq_0_forward(l, mu); break; case 10://outlet (to right) //mu[l] = -Ca*dx2_back(l, C) - Ca*dy2(l, C) + 2.0 * A * C[l] + 4.0 * pow(C[l], 3) - Gr* r_gamma(l); //dx1_eq_0_back(l, mu); mu[l] = extrapolate_forward(l, mu); break; default: break; } } } __global__ void swap_one(double* f_old, double* f_new) { unsigned int l = blockIdx.x*blockDim.x + threadIdx.x; if (l < n) f_old[l] = f_new[l]; } __global__ void swap_3(double* f1_old, double* f1_new, double* f2_old, double* f2_new, double* f3_old, double* f3_new) { unsigned int l = blockIdx.x*blockDim.x + threadIdx.x; if (l < n) { f1_old[l] = f1_new[l]; f2_old[l] = f2_new[l]; f3_old[l] = f3_new[l]; } } struct cross { int nx[5], ny[5]; int offset[5]; int size[5], total_size; int idx, idy, id; int block[5]; __host__ __device__ void set_geometry(unsigned int length, unsigned int height) { nx[0] = height; ny[0] = height; nx[1] = length - 1; ny[1] = height; nx[2] = height; ny[2] = length - 1; nx[3] = length - 1; ny[3] = height; nx[4] = height; ny[4] = length - 1; total_size = 0; for (int i = 0; i < 5; i++) { size[i] = (nx[i] + 1)*(ny[i] + 1); offset[i] = nx[i] + 1; total_size += size[i]; } }; __host__ __device__ void set_geometry_narrow_tubes(unsigned int L, /*length of horizontal tube*/ unsigned int H, /*length(height) of vertical tube*/ unsigned int D /*diameter(width) of tube*/) { nx[0] = D; ny[0] = D; nx[1] = L - 1; ny[1] = D; nx[2] = D; ny[2] = H - 1; nx[3] = L - 1; ny[3] = D; nx[4] = D; ny[4] = H - 1; total_size = 0; for (int i = 0; i < 5; i++) { size[i] = (nx[i] + 1)*(ny[i] + 1); offset[i] = nx[i] + 1; total_size += size[i]; } } __host__ __device__ void delete_block(int i) { total_size -= size[i]; nx[i] = -1; ny[i] = -1; offset[i] = 0; size[i] = 0; }; __host__ __device__ void set_block(int add) { block[0] = 0 + add; block[1] = block[0] + size[0]; block[2] = block[1] + size[1]; block[3] = block[2] + size[2]; block[4] = block[3] + size[3]; } //~cross() {} }; struct multi_cross { cross *Mcr; int *l, *t, *I, *J, *J_back; int Mx, My, Msize, Moffset, OFFSET; unsigned int iter = 0; unsigned int TOTAL_SIZE = 0; int *n1, *n2, *n3, *n4; unsigned int nxg, nyg, ng; unsigned int nx, ny, offset; double *C0, *C, *p, *p0, *ux, *uy, *vx, *vy, *mu; double LX, LY; //integer global inxed i unsigned int iG(unsigned int l) { return (J_back[l] - (J_back[l] / OFFSET)*OFFSET); } //integer global index j unsigned int jG(unsigned int l) { return (J_back[l] / OFFSET); } void set_global_size(int input_nx, int input_ny, int input_Mx, int input_My) { Mx = input_Mx - 1; My = input_My - 1; Msize = input_Mx*input_My; Moffset = input_Mx; //Mcr.resize(Msize, cr); Mcr = new cross[Msize]; for (int i = 0; i < Msize; i++) { Mcr[i].set_geometry(input_nx, input_ny); } for (int i = 0; i <= Mx; i++) { for (int j = 0; j <= My; j++) { Mcr[i + Moffset*j].id = i + Moffset*j; Mcr[i + Moffset*j].idx = i; Mcr[i + Moffset*j].idy = j; if (j == 0) Mcr[i + Moffset*j].delete_block(4); if (j == My) Mcr[i + Moffset*j].delete_block(2); } } for (int i = 0; i < Msize; i++) { Mcr[i].set_block(TOTAL_SIZE); TOTAL_SIZE += Mcr[i].total_size; } } void set_global_size_narrow_tubes(int input_L, int input_H, int input_D, int input_Mx, int input_My) { Mx = input_Mx - 1; My = input_My - 1; Msize = input_Mx*input_My; Moffset = input_Mx; //Mcr.resize(Msize, cr); Mcr = new cross[Msize]; for (int i = 0; i < Msize; i++) { Mcr[i].set_geometry_narrow_tubes(input_L, input_H, input_D); } for (int i = 0; i <= Mx; i++) { for (int j = 0; j <= My; j++) { Mcr[i + Moffset*j].id = i + Moffset*j; Mcr[i + Moffset*j].idx = i; Mcr[i + Moffset*j].idy = j; if (j == 0) Mcr[i + Moffset*j].delete_block(4); if (j == My) Mcr[i + Moffset*j].delete_block(2); } } for (int i = 0; i < Msize; i++) { Mcr[i].set_block(TOTAL_SIZE); TOTAL_SIZE += Mcr[i].total_size; } } void set_type() { if (Msize == 0) { printf("hop hey la la ley, stop it, bro, ya doin it wron' \n"); } l = new int[TOTAL_SIZE]; t = new int[TOTAL_SIZE]; for (unsigned int i = 0; i < TOTAL_SIZE; i++) { l[i] = 0; t[i] = 0; } unsigned int k; for (int jm = 0; jm <= My; jm++) { for (int im = 0; im <= Mx; im++) { k = im + Moffset*jm; for (unsigned int q = 0; q < 5; q++) { if (Mcr[k].size[q] == 0) continue; for (int j = 0; j <= Mcr[k].ny[q]; j++) { for (int i = 0; i <= Mcr[k].nx[q]; i++) { l[iter] = iter; if (q == 0) { if (i == 0 && j == Mcr[k].ny[q]) t[iter] = 5; if (i == Mcr[k].nx[q] && j == Mcr[k].ny[q]) t[iter] = 6; if (i == Mcr[k].nx[q] && j == 0) t[iter] = 7; if (i == 0 && j == 0) t[iter] = 8; if (i == 0 && Mcr[k].size[1] == 0) t[iter] = 1; if (i == Mcr[k].nx[q] && Mcr[k].size[3] == 0) t[iter] = 3; if (j == 0 && Mcr[k].size[4] == 0) t[iter] = 4; if (j == Mcr[k].ny[q] && Mcr[k].size[2] == 0) t[iter] = 2; } if (q == 2) { if (i == 0) t[iter] = 1; if (i == Mcr[k].nx[q]) t[iter] = 3; } if (q == 4) { if (i == 0) t[iter] = 1; if (i == Mcr[k].nx[q]) t[iter] = 3; } if (im == 0 && i == 0 && q == 1) t[iter] = 9; if (im == Mx && i == Mcr[k].nx[q] && q == 3) t[iter] = 10; if (q == 1) { if (j == Mcr[k].ny[q]) t[iter] = 2; if (j == 0) t[iter] = 4; } if (q == 3) { if (j == Mcr[k].ny[q]) t[iter] = 2; if (j == 0) t[iter] = 4; } iter++; } } } } } } void set_type_B() { int l, L; //int l1, l2, l3, l4; //inner for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { if (n1[L] != -1 && n2[L] != -1 && n3[L] != -1 && n4[L] != -1) t[L] = 0; } } } //rigid walls for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; //l1 = i - 1 + OFFSET*j; l2 = i + OFFSET*j + OFFSET; l3 = i + 1 + OFFSET*j; l4 = i + OFFSET*j - OFFSET; if (I[l] == 1) { if (I[l] == 1) { if (n1[L] == -1 && n2[L] != -1 && n3[L] != -1 && n4[L] != -1) t[L] = 1; if (n1[L] != -1 && n2[L] == -1 && n3[L] != -1 && n4[L] != -1) t[L] = 2; if (n1[L] != -1 && n2[L] != -1 && n3[L] == -1 && n4[L] != -1) t[L] = 3; if (n1[L] != -1 && n2[L] != -1 && n3[L] != -1 && n4[L] == -1) t[L] = 4; } } } } //corners for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; //cout << i << " " << j << " " << l << " " << endl; pause if (I[l] == 1) { if (n2[n1[L]] == -1 && n1[L] != -1 && n2[L] != -1) t[L] = 5; if (n2[n3[L]] == -1 && n3[L] != -1 && n2[L] != -1) t[L] = 6; if (n3[n4[L]] == -1 && n3[L] != -1 && n4[L] != -1) t[L] = 7; if (n1[n4[L]] == -1 && n1[L] != -1 && n4[L] != -1) t[L] = 8; } } } //inlet, outlet for (unsigned int i = 0; i <= nxg; i = i + nxg) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { if (i == 0) { t[L] = 9; if (t[n3[L]] == 2) t[L] = 2; if (t[n3[L]] == 4) t[L] = 4; } if (i == nxg) { t[L] = 10; if (t[n1[L]] == 2) t[L] = 2; if (t[n1[L]] == 4) t[L] = 4; } } } } /* //near border for (int i = 0; i <= nxg; i = i + nxg) { for (int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (t[n1[L]] == 1) t[L] = 11; if (t[n2[L]] == 2) t[L] = 12; if (t[n3[L]] == 3) t[L] = 13; if (t[n4[L]] == 4) t[L] = 14; if (t[n1[L]] == 1 && t[n2[L]] == 2) t[L] = 15; if (t[n2[L]] == 2 && t[n3[L]] == 3) t[L] = 16; if (t[n3[L]] == 3 && t[n4[L]] == 4) t[L] = 17; if (t[n4[L]] == 4 && t[n1[L]] == 1) t[L] = 18; } } */ } void set_neighbor() { if (Msize == 0 || iter == 0) { printf("hop hey la la ley, stop it, bro, ya doin it wron' \n"); } n1 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n2 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n3 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n4 = (int*)malloc(TOTAL_SIZE * sizeof(int)); for (unsigned int i = 0; i < TOTAL_SIZE; i++) { n1[i] = -1; n2[i] = -1; n3[i] = -1; n4[i] = -1; } unsigned int k, it = 0; for (int jm = 0; jm <= My; jm++) { for (int im = 0; im <= Mx; im++) { k = im + Moffset*jm; for (unsigned int q = 0; q < 5; q++) { if (Mcr[k].size[q] == 0) continue; for (int j = 0; j <= Mcr[k].ny[q]; j++) { for (int i = 0; i <= Mcr[k].nx[q]; i++) { //cout << i << " " << j << " " << q << " " << it<< endl; //joint central pore and tubes if (q == 0) { if (i == 0 && (t[it] == 0 || t[it] == 2 || t[it] == 4 || t[it] == 5 || t[it] == 8)) { //n1[it] = it - (i + Mcr[k].offset[0]) + Mcr[k].size[0] + Mcr[k].nx[1] + Mcr[k].offset[1] * j; int in = Mcr[k].block[1] + Mcr[k].nx[1] + Mcr[k].offset[1] * j; n1[it] = in; n3[in] = it; } if (i == Mcr[k].nx[0] && (t[it] == 0 || t[it] == 2 || t[it] == 4 || t[it] == 6 || t[it] == 7)) { int in = Mcr[k].block[3] + Mcr[k].offset[3] * j; n3[it] = in; n1[in] = it; } if (j == 0 && (t[it] == 0 || t[it] == 1 || t[it] == 3 || t[it] == 7 || t[it] == 8)) { int in = Mcr[k].block[4] + i + Mcr[k].offset[4] * Mcr[k].ny[4]; n4[it] = in; n2[in] = it; } if (j == Mcr[k].ny[0] && (t[it] == 0 || t[it] == 1 || t[it] == 3 || t[it] == 5 || t[it] == 6)) { int in = Mcr[k].block[2] + i; n2[it] = in; n4[in] = it; } } //inner nodes if (i < Mcr[k].nx[q]) n3[it] = it + 1; if (i > 0) n1[it] = it - 1; if (j < Mcr[k].ny[q]) n2[it] = it + Mcr[k].offset[q]; if (j > 0) n4[it] = it - Mcr[k].offset[q]; //borders and inlet/outlet if (i == 0 && (t[it] == 1 || t[it] == 9)) { n1[it] = it + 2; n3[it] = it + 1; } if (i == Mcr[k].nx[q] && (t[it] == 3 || t[it] == 10)) { n1[it] = it - 1; n3[it] = it - 2; } if (t[it] == 4) { n2[it] = it + Mcr[k].offset[q]; n4[it] = it + 2 * Mcr[k].offset[q]; } if (t[it] == 2) { n2[it] = it - 2 * Mcr[k].offset[q]; n4[it] = it - Mcr[k].offset[q]; } //join crosses if (q == 3) { if (i == Mcr[k].nx[3] && (t[it] == 0 || t[it] == 2 || t[it] == 4) && im < Mx) { int in = Mcr[k + 1].block[1] + Mcr[k + 1].offset[1] * j; n3[it] = in; n1[in] = it; } } if (q == 2) { if (j == Mcr[k].ny[2] && (t[it] == 0 || t[it] == 1 || t[it] == 3) && jm < My) { int in = Mcr[k + Moffset].block[4] + i; n4[in] = it; n2[it] = in; //printf("n4 = %i n2 = %i\n", n4[in], n2[it]); } } //if (n2[it] == -1) printf("q=%i t=%i i=%i j=%i nx=%i ny=%i \n", q, t[it], i, j, Mcr[k].nx[q], Mcr[k].ny[q]); it++; } } } } } } void set_neighbor_B() { int l, L, l1, l2, l3, l4; for (unsigned int j = 0; j <= nyg; j++) { for (unsigned int i = 0; i <= nxg; i++) { l = i + OFFSET*j; L = J[l]; l1 = i - 1 + OFFSET*j; l2 = i + OFFSET*j + OFFSET; l3 = i + 1 + OFFSET*j; l4 = i + OFFSET*j - OFFSET; if (I[l] == 1) { if (i > 0) if (I[l1] == 1) n1[L] = J[l1]; if (i < nxg) if (I[l3] == 1) n3[L] = J[l3]; if (j < nyg) if (I[l2] == 1) n2[L] = J[l2]; if (j > 0) if (I[l4] == 1) n4[L] = J[l4]; } else { } } } } void set_global_id() { nxg = 0, nyg = 0; for (int im = 0; im <= Mx; im++) nxg += Mcr[im].nx[0] + 1 + Mcr[im].nx[1] + 1 + Mcr[im].nx[3] + 1; for (int jm = 0; jm <= My; jm++) nyg += Mcr[jm*Moffset].ny[0] + 1 + Mcr[jm*Moffset].ny[2] + 1 + Mcr[jm*Moffset].ny[4] + 1; if (nxg != 0) nxg--; if (nyg != 0) nyg--; I = new int[(nxg + 1)*(nyg + 1)]; J = new int[(nxg + 1)*(nyg + 1)]; J_back = new int[TOTAL_SIZE]; OFFSET = nxg + 1; for (unsigned int i = 0; i < (nxg + 1)*(nyg + 1); i++) { I[i] = 0; J[i] = -1; } for (unsigned int i = 0; i < TOTAL_SIZE; i++) { J_back[i] = -1; } int *shift_x, *shift_y; shift_x = new int[Mx + 1]; shift_y = new int[My + 1]; shift_x[0] = 0; shift_y[0] = 0; for (int im = 1; im <= Mx; im++) shift_x[im] = (Mcr[im - 1].nx[0] + 1 + Mcr[im - 1].nx[1] + 1 + Mcr[im - 1].nx[3] + 1 + shift_x[im - 1]); for (int jm = 1; jm <= My; jm++) shift_y[jm] = (Mcr[(jm - 1)*Moffset].ny[0] + 1 + Mcr[(jm - 1)*Moffset].ny[2] + 1 + Mcr[(jm - 1)*Moffset].ny[4] + 1 + shift_y[jm - 1]); if (Msize == 0) { printf("set_global_id , hop hey la la ley, stop it, bro, ya doin it wron' \n"); } unsigned int k, it = 0, in, ii, jj; for (int jm = 0; jm <= My; jm++) { for (int im = 0; im <= Mx; im++) { k = im + Moffset*jm; for (unsigned int q = 0; q < 5; q++) { if (Mcr[k].size[q] == 0) continue; for (int j = 0; j <= Mcr[k].ny[q]; j++) { for (int i = 0; i <= Mcr[k].nx[q]; i++) { if (q == 1) { ii = i + shift_x[im]; jj = j + shift_y[jm] + (Mcr[k].ny[4] + 1); } if (q == 0) { ii = i + shift_x[im] + Mcr[k].nx[1] + 1; jj = j + shift_y[jm] + (Mcr[k].ny[4] + 1); } if (q == 3) { ii = i + shift_x[im] + Mcr[k].nx[1] + 1 + Mcr[k].nx[0] + 1; jj = j + shift_y[jm] + (Mcr[k].ny[4] + 1); } if (q == 2) { ii = i + shift_x[im] + Mcr[k].nx[1] + 1; jj = j + shift_y[jm] + (Mcr[k].ny[4] + 1) + (Mcr[k].ny[0] + 1); } if (q == 4) { ii = i + shift_x[im] + Mcr[k].nx[1] + 1; jj = j + shift_y[jm]; } in = ii + OFFSET*jj; I[in] = 1; J[in] = it; J_back[it] = in; it++; } } } } } } void set_global_id_B() { nxg = 0, nyg = 0; for (int im = 0; im <= Mx; im++) nxg += Mcr[im].nx[0] + 1 + Mcr[im].nx[1] + 1 + Mcr[im].nx[3] + 1; for (int jm = 0; jm <= My; jm++) nyg += Mcr[jm*Moffset].ny[0] + 1 + Mcr[jm*Moffset].ny[2] + 1 + Mcr[jm*Moffset].ny[4] + 1; if (nxg != 0) nxg--; if (nyg != 0) nyg--; I = new int[(nxg + 1)*(nyg + 1)]; J = new int[(nxg + 1)*(nyg + 1)]; J_back = new int[TOTAL_SIZE]; n1 = new int[TOTAL_SIZE]; n2 = new int[TOTAL_SIZE]; n3 = new int[TOTAL_SIZE]; n4 = new int[TOTAL_SIZE]; t = new int[TOTAL_SIZE]; OFFSET = nxg + 1; for (unsigned int i = 0; i < (nxg + 1)*(nyg + 1); i++) { I[i] = 0; J[i] = -1; } for (unsigned int i = 0; i < TOTAL_SIZE; i++) { J_back[i] = -1; n1[i] = -1; n2[i] = -1; n3[i] = -1; n4[i] = -1; t[i] = -1; } int *shift_x, *shift_y; shift_x = new int[Mx + 1]; shift_y = new int[My + 1]; shift_x[0] = 0; shift_y[0] = 0; for (int im = 1; im <= Mx; im++) shift_x[im] = (Mcr[im - 1].nx[0] + 1 + Mcr[im - 1].nx[1] + 1 + Mcr[im - 1].nx[3] + 1 + shift_x[im - 1]); for (int jm = 1; jm <= My; jm++) shift_y[jm] = (Mcr[(jm - 1)*Moffset].ny[0] + 1 + Mcr[(jm - 1)*Moffset].ny[2] + 1 + Mcr[(jm - 1)*Moffset].ny[4] + 1 + shift_y[jm - 1]); if (Msize == 0) { printf("set_global_id , hop hey la la ley, stop it, bro, ya doin it wron' \n"); } unsigned int k, it = 0, in, ii, jj; for (int jm = 0; jm <= My; jm++) { for (int im = 0; im <= Mx; im++) { k = im + Moffset*jm; for (unsigned int q = 0; q < 5; q++) { if (Mcr[k].size[q] == 0) continue; for (int j = 0; j <= Mcr[k].ny[q]; j++) { for (int i = 0; i <= Mcr[k].nx[q]; i++) { if (q == 1) { ii = i + shift_x[im]; jj = j + shift_y[jm] + (Mcr[k].ny[4] + 1); } if (q == 0) { ii = i + shift_x[im] + Mcr[k].nx[1] + 1; jj = j + shift_y[jm] + (Mcr[k].ny[4] + 1); } if (q == 3) { ii = i + shift_x[im] + Mcr[k].nx[1] + 1 + Mcr[k].nx[0] + 1; jj = j + shift_y[jm] + (Mcr[k].ny[4] + 1); } if (q == 2) { ii = i + shift_x[im] + Mcr[k].nx[1] + 1; jj = j + shift_y[jm] + (Mcr[k].ny[4] + 1) + (Mcr[k].ny[0] + 1); } if (q == 4) { ii = i + shift_x[im] + Mcr[k].nx[1] + 1; jj = j + shift_y[jm]; } in = ii + OFFSET*jj; I[in] = 1; J[in] = it; J_back[it] = in; it++; } } } } } } void set_global_size_box(int input_nx, int input_ny) { nx = input_nx; nxg = nx; ny = input_ny; nyg = ny; offset = nx + 1; OFFSET = offset; TOTAL_SIZE = (input_nx + 1) * (input_ny + 1); } void set_global_id_box() { I = new int[(nx + 1)*(ny + 1)]; J = new int[(nx + 1)*(ny + 1)]; J_back = new int[TOTAL_SIZE]; OFFSET = nx + 1; for (unsigned int i = 0; i < (nx + 1)*(ny + 1); i++) { I[i] = 0; J[i] = -1; } for (unsigned int i = 0; i < TOTAL_SIZE; i++) { J_back[i] = -1; } unsigned int k, it = 0; // in, ii, jj; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; I[k] = 1; J[k] = k; J_back[k] = k; it++; } } } void set_type_box() { l = new int[TOTAL_SIZE]; t = new int[TOTAL_SIZE]; for (unsigned int i = 0; i < TOTAL_SIZE; i++) { l[i] = 0; t[i] = 0; } unsigned int k; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; if (i == 0) t[k] = 1; if (i == nx) t[k] = 3; if (j == 0) t[k] = 4; if (j == ny) t[k] = 2; iter++; } } } void set_neighbor_box() { n1 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n2 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n3 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n4 = (int*)malloc(TOTAL_SIZE * sizeof(int)); for (unsigned int i = 0; i < TOTAL_SIZE; i++) { n1[i] = -1; n2[i] = -1; n3[i] = -1; n4[i] = -1; } unsigned int k, it = 0; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; if (t[k] == 0) { n1[k] = k - 1; n2[k] = k + offset; n3[k] = k + 1; n4[k] = k - offset; } if (t[k] == 2) { n1[k] = k - 1; n3[k] = k + 1; n4[k] = k - offset; } if (t[k] == 4) { n1[k] = k - 1; n2[k] = k + offset; n3[k] = k + 1; } if (t[k] == 9 || t[k] == 1) { n3[k] = k + 1; n4[k] = k - offset; n2[k] = k + offset; } if (t[k] == 10 || t[k] == 3) { n1[k] = k - 1; n4[k] = k - offset; n2[k] = k + offset; } if (i == nxg) { n3[k] = -1; } if (i == 0) { n1[k] = -1; } if (j == nyg) { n2[k] = -1; } if (j == 0) { n4[k] = -1; } it++; } } } void set_type_tube() { l = new int[TOTAL_SIZE]; t = new int[TOTAL_SIZE]; for (unsigned int i = 0; i < TOTAL_SIZE; i++) { l[i] = 0; t[i] = 0; } unsigned int k; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; if (i == 0) t[k] = 9; if (i == nx) t[k] = 10; if (j == 0) t[k] = 4; if (j == ny) t[k] = 2; iter++; } } } void write_field(double *f, string file_name, double time, int step) { #ifdef __linux__ ofstream to_file(("fields/" + file_name + ".dat").c_str()); #endif #ifdef _WIN32 ofstream to_file(("fields\\" + file_name + ".dat").c_str()); #endif int l, L; to_file << time << endl; for (unsigned int j = 0; j <= nyg; j = j + step) { for (unsigned int i = 0; i <= nxg; i = i + step) { l = i + OFFSET*j; L = J[l]; //if (J[l] == J[l]) to_file << i << " " << j << " " << f[L] << endl; if (I[l] == 1) { //to_file << i << " " << j << " " << f[L] << " " << t[L] << " " << L << " " << n1[L] << " " << n2[L] << " " << n3[L] << " " << n4[L] << endl; to_file << i << " " << j << " " << f[L] << endl; } else { to_file << "skip" << endl; //to_file << i << " " << j << " " << NAN << endl; //to_file << i << " " << j << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << endl; } } } to_file.close(); } void write_linear_profile(string file_name, string head, double time, int step, double hx, double **f, int N_fields, int j_ = -1) { #ifdef __linux__ ofstream to_file(("horizontal_profile/" + file_name + ".dat").c_str()); #endif #ifdef _WIN32 ofstream to_file(("horizontal_profile\\" + file_name + ".dat").c_str()); #endif int l, L; to_file << head << " t=" << time << endl; //for (unsigned int j = 0; j <= nyg; j = j + step) { unsigned int j = nyg / 2; if (j_ > -1) j = j_; for (unsigned int i = 0; i <= nxg; i = i + step) { l = i + OFFSET*j; L = J[l]; //if (J[l] == J[l]) to_file << i << " " << j << " " << f[L] << endl; if (I[l] == 1) { to_file << i << " " << hx*i; for (int k = 0; k < N_fields; k++) { to_file << " " << f[k][L]; } to_file << endl; } else { to_file << "skip" << endl; //to_file << i << " " << j << " " << NAN << endl; //to_file << i << " " << j << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << endl; } } //} to_file.close(); } void write_section_profile(string file_name, string head, double time, int step, double hy, double **f, int N_fields, unsigned int i) { #ifdef __linux__ ofstream to_file(("vertical_profile/" + file_name + ".dat").c_str()); #endif #ifdef _WIN32 ofstream to_file(("vertical_profile\\" + file_name + ".dat").c_str()); #endif int l, L; to_file << head << " t=" << time << endl; for (unsigned int j = 0; j <= nyg; j = j + step) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { to_file << j << " " << hy*j; for (int k = 0; k < N_fields; k++) { to_file << " " << f[k][L]; } to_file << endl; } else { to_file << "skip" << endl; } } to_file.close(); } void write_field_tecplot(double blank, double hx, double hy, string file_name, double time, int step, int iter, double **f, unsigned int N_fields, string head) { ofstream to_file; if (iter == 1) to_file.open((file_name + ".dat").c_str()); else to_file.open((file_name + ".dat").c_str(), ofstream::app); //make time to be string type stringstream ss; ss << time; string str_time = ss.str(); //count the number of x and y elements unsigned int II = 0, JJ = 0; for (unsigned int j = 0; j <= nyg; j = j + step) JJ++; for (unsigned int i = 0; i <= nxg; i = i + step) II++; //to_file << "VARIABLES=\"x\",\"y\",\"C\",\"mu\",\"vx\",\"vy\",\"p\"" << endl; to_file << head << endl; to_file << "ZONE T=\"" + str_time + "\", " << "I=" << II << ", J=" << JJ << endl; int l, L; //to_file << time << endl; for (unsigned int j = 0; j <= nyg; j = j + step) { for (unsigned int i = 0; i <= nxg; i = i + step) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { //to_file << hx*i << " " << hy*j << " " << C[L] << " " << mu[L] << " " << vx[L] << " " << vy[L] << " " << p[L] << endl; to_file << hx*i << " " << hy*j; for (int k = 0; k < N_fields; k++) { to_file << " " << f[k][L]; } to_file << endl; } else { to_file << hx*i << " " << hy*j; for (int k = 0; k < N_fields; k++) { to_file << " " << blank; } to_file << endl; } } } //to_file.close(); } //left to be normal void left_normal_in(int first, int last) { unsigned int k, it = 0; for (int jm = 0; jm <= My; jm++) { for (int im = 0; im <= Mx; im++) { k = im + Moffset*jm; for (unsigned int q = 0; q < 5; q++) { if (Mcr[k].size[q] == 0) continue; for (int j = 0; j <= Mcr[k].ny[q]; j++) { for (int i = 0; i <= Mcr[k].nx[q]; i++) { //if (Mcr[k].idx == 0 && Mcr[k].idy >= in_y) if (Mcr[k].idx == 0 && (Mcr[k].idy < first || Mcr[k].idy > last)) if (t[it] == 9) t[it] = 1; it++; } } } } } } void left_normal_out(int first, int last) { unsigned int k, it = 0; for (int jm = 0; jm <= My; jm++) { for (int im = 0; im <= Mx; im++) { k = im + Moffset*jm; for (unsigned int q = 0; q < 5; q++) { if (Mcr[k].size[q] == 0) continue; for (int j = 0; j <= Mcr[k].ny[q]; j++) { for (int i = 0; i <= Mcr[k].nx[q]; i++) { //if (Mcr[k].idx == Mx && Mcr[k].idy <= My - out_y) if (Mcr[k].idx == Mx && (Mcr[k].idy < first || Mcr[k].idy > last)) if (t[it] == 10) t[it] = 3; it++; } } } } } } void save(double *vx, double *vy, double *p, double *C, double *mu, unsigned int i_time, unsigned int i_write, double timeq, double kk, unsigned int extended = 0, double* vib = NULL) { ofstream to_file("recovery.dat"); ofstream to_file2("recovery2.dat"); to_file << i_time << " " << i_write << " " << timeq << " " << kk << endl; to_file2 << i_time << " " << i_write << " " << timeq << " " << kk << endl; for (unsigned int i = 0; i < TOTAL_SIZE; i++) { to_file << vx[i] << " " << vy[i] << " " << p[i] << " " << C[i] << " " << mu[i] << endl; } for (unsigned int i = 0; i < TOTAL_SIZE; i++) { to_file2 << vx[i] << " " << vy[i] << " " << p[i] << " " << C[i] << " " << mu[i] << endl; } to_file.close(); to_file2.close(); } void save(double** f, unsigned int n, unsigned int i_time, unsigned int i_write, double timeq, double kk) { ofstream to_file("recovery.dat"); ofstream to_file2("recovery2.dat"); to_file << i_time << " " << i_write << " " << timeq << " " << kk << endl; to_file2 << i_time << " " << i_write << " " << timeq << " " << kk << endl; for (unsigned int i = 0; i < TOTAL_SIZE; i++) { for (unsigned int k = 0; k < n; k++) { to_file << f[k][i] << " "; } to_file << endl; } for (unsigned int i = 0; i < TOTAL_SIZE; i++) { for (unsigned int k = 0; k < n; k++) { to_file2 << f[k][i] << " "; } to_file2 << endl; } to_file.close(); to_file2.close(); } void recover(double *vx, double *vy, double *p, double *C, double *mu, unsigned int &i_time, unsigned int &i_write, double &timeq, unsigned int &kk, unsigned int extended = 0, double *vib = NULL) { ifstream from_file("recovery.dat"); string str; string substr; stringstream ss; getline(from_file, str); ss << str; ss >> substr; i_time = atoi(substr.c_str()); ss >> substr; i_write = atoi(substr.c_str()); ss >> substr; timeq = atof(substr.c_str()); ss >> substr; kk = atoi(substr.c_str()); for (unsigned int i = 0; i < TOTAL_SIZE; i++) { getline(from_file, str); ss.str(""); ss.clear(); ss << str; ss >> substr; vx[i] = atof(substr.c_str()); ss >> substr; vy[i] = atof(substr.c_str()); ss >> substr; p[i] = atof(substr.c_str()); ss >> substr; C[i] = atof(substr.c_str()); if (extended) ss >> substr; vib[i] = atof(substr.c_str()); } from_file.close(); } void recover(double** f, unsigned int n, unsigned int& i_time, unsigned int& i_write, double& timeq, unsigned int& kk) { ifstream from_file("recovery.dat"); string str; string substr; stringstream ss; getline(from_file, str); ss << str; ss >> substr; i_time = atoi(substr.c_str()); ss >> substr; i_write = atoi(substr.c_str()); ss >> substr; timeq = atof(substr.c_str()); ss >> substr; kk = atoi(substr.c_str()); for (unsigned int i = 0; i < TOTAL_SIZE; i++) { getline(from_file, str); ss.str(""); ss.clear(); ss << str; for (unsigned int k = 0; k < n; k++) { ss >> substr; f[k][i] = atof(substr.c_str()); } } from_file.close(); } void read_concentration(double *C, string file_name, int column, int skip_lines = 1, int invert_C = 1) { ifstream from_file(file_name); string str; string substr; stringstream ss; for (int k = 0; k < skip_lines; k++) { getline(from_file, str); } for (unsigned int i = 0; i < TOTAL_SIZE; i++) { getline(from_file, str); ss.str(""); ss.clear(); ss << str; for (int k = 0; k < column; k++) { ss >> substr; } C[i] = atof(substr.c_str()); if (invert_C == 1) C[i] = C[i] * (-1); } from_file.close(); } void read_grid_geometry() { ifstream from_file("GRID.dat"); if (from_file.good()) { cout << endl << "GRID.dat has been read" << endl << endl; } string str; string substr; stringstream ss; getline(from_file, str); ss << str; ss >> substr; nx = nxg = atoi(substr.c_str()); ss >> substr; ny = nyg = atoi(substr.c_str()); ss >> substr; offset = OFFSET = atoi(substr.c_str()); ss >> substr; TOTAL_SIZE = atoi(substr.c_str()); getline(from_file, str); //head n1 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n2 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n3 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n4 = (int*)malloc(TOTAL_SIZE * sizeof(int)); J_back = (int*)malloc(TOTAL_SIZE * sizeof(int)); t = (int*)malloc(TOTAL_SIZE * sizeof(int)); I = new int[(nxg + 1) * (nyg + 1)]; J = new int[(nxg + 1) * (nyg + 1)]; int L, l; for (int j = 0; j <= nyg; j++) { for (int i = 0; i <= nxg; i++) { getline(from_file, str); ss.str(""); ss.clear(); ss << str; //read a line L = i + OFFSET * j; ss >> substr; I[L] = atoi(substr.c_str()); ss >> substr; // i ss >> substr; // j ss >> substr; J[L] = atoi(substr.c_str()); if (I[L] == 1) { l = J[L]; ss >> substr; t[l] = atoi(substr.c_str()); ss >> substr; J_back[l] = atoi(substr.c_str()); ss >> substr; n1[l] = atoi(substr.c_str()); ss >> substr; n2[l] = atoi(substr.c_str()); ss >> substr; n3[l] = atoi(substr.c_str()); ss >> substr; n4[l] = atoi(substr.c_str()); } } } from_file.close(); } void linear_pressure(double *p, double hx, double hy, double cosA, double sinA, double Lx, double Ly, double coefficient = 1) { for (unsigned int l = 0; l < TOTAL_SIZE; l++) { p[l] = coefficient*((Lx - hx*iG(l))*cosA - (Ly - hy*jG(l))*sinA); } } //C0(qx,qy)=0.5d0*dtanh((qx*hx-0.5d0)/delta) void fill_gradually(double *C, double hx, double hy, double delta, double shift) { unsigned int i, j; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { i = iG(l); j = jG(l); C[l] = 0.5*tanh((i*hx - shift) / delta); } } void fill_with_sphere(double *C, double hx, double hy, double x0, double y0, double R0, double C_outer, double C_inner) { unsigned int i, j; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { i = iG(l); j = jG(l); double x = i*hx, y = j*hy; if (sqrt(pow(x - x0, 2) + pow(y - y0, 2)) < R0) { C[l] = C_inner; } else { C[l] = C_outer; } } } void fill_horizontal_way(double *C, double hx, double hy, double eq_C, double y0, double amp, double k, double delta) { unsigned int i, j; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { i = iG(l); j = jG(l); double x = i*hx, y = j*hy; C[l] = eq_C*(tanh((y - y0 - amp*cos(k*x)) / delta)); } } void fast_test_writing(double *f) { ofstream to_file("test_field.dat"); to_file << "i, j, f" << endl; int l, L; for (unsigned int j = 0; j <= nyg; j = j++) { for (unsigned int i = 0; i <= nxg; i = i++) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { to_file << i << " " << j << " " << f[L] << endl; } else { to_file << "skip" << endl; } } } to_file.close(); } double isoline(double hx, double hy, double *C, signed char *mark, double *fx, double *fy, double val) { //integer, intent(in) ::nx, ny // real * 8, intent(in) ::Lx, C(0:nx, 0 : ny), val //real * 8, intent(inout) ::fx(0:nx - 1, 0 : ny), fy(0:nx, 0 : ny - 1) //integer, intent(inout) ::mark(0:nx, 0 : ny) //integer qx, qy, i, j //real * 8 hx, hy, len //real(8), parameter::nan = transfer(-2251799813685248_int64, 1._real64) double len = 0; double nan = NAN; unsigned int i, j, ii, jj; unsigned int lr, lu, lru; //l right, up and right-up for (unsigned int l = 0; l < TOTAL_SIZE; l++) { fx[l] = nan; fy[l] = nan; } // int l = 0; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { if (C[l] < val) mark[l] = -1; else if (C[l] > val) mark[l] = +1; else mark[l] = 0; } for (unsigned int l = 0; l < TOTAL_SIZE; l++) { i = iG(l); j = jG(l); if (t[l] == 2 || t[l] == 3 || t[l] == 10 || t[l] == 6 || t[l] == 7) continue; if (t[n2[l]] == 10 || t[n4[l]] == 10) continue; if (n3[l] == -1 || n2[l] == -1) continue; ii = iG(n3[l]); jj = jG(n2[l]); //if (ii > nxg || jj > nyg) continue; //cout << "l " << l << endl; lr = n3[l]; lu = n2[l]; lru = n3[n2[l]]; if (abs(mark[l] + mark[lr] + mark[lu] + mark[lru]) == 4) continue; else { //case a //************ //************ //************ //************ //−−−−−−−−−−−− if (mark[l] == 0 && mark[lr] == 0) { fy[l] = hy*j; fy[lr] = hy*j; len = len + hx; continue; } //case b //| *********** //| *********** //| *********** //| *********** //| *********** if (mark[l] == 0 && mark[lu] == 0) { fx[l] = hx*i; fx[lu] = hx*i; len = len + hy; continue; } //case 1 //************ //************ //−−−−−−−−−−−− //************ //************ if (mark[l] * mark[lu] <= 0 && mark[lr] * mark[lru] <= 0 && mark[l] * mark[lu] + mark[lr] * mark[lru] != 0) { fy[l] = (val - C[l])*hy / (C[lu] - C[l]) + hy*j; //left fy[lr] = (val - C[lr])*hy / (C[lru] - C[lr]) + hy*j; //right len = len + sqrt(hx*hx + pow(fy[lr] - fy[l], 2)); continue; } //case 2 //***** | ****** //***** | ****** //***** | ****** //***** | ****** //***** | ****** if (mark[l] * mark[lr] <= 0 && mark[lu] * mark[lru] <= 0 && mark[l] * mark[lr] + mark[lu] * mark[lru] != 0) { fx[l] = (val - C[l])*hx / (C[lr] - C[l]) + hx*i; //down fx[lu] = (val - C[lu])*hx / (C[lru] - C[lu]) + hx*i; //up len = len + sqrt(pow(fx[lu] - fx[l], 2) + hy*hy); continue; } //case 3 //***** | ****** //***** | ****** //−−−−−−****** //************ //************ if (mark[l] * mark[lu] <= 0 && mark[lu] * mark[lru] <= 0 && mark[l] * mark[lu] + mark[lu] * mark[lru] != 0) { fx[lu] = (val - C[lu])*hx / (C[lru] - C[lu]) + hx*i; //up fy[l] = (val - C[l])*hy / (C[lu] - C[l]) + hy*j; //left len = len + sqrt(pow(fx[lu] - hx*i, 2) + pow(fy[l] - hy*(jj), 2)); continue; } //case 4 //***** | ****** //***** | ****** //*****−−−−−−− //************ //************ if (mark[lr] * mark[lru] <= 0 && mark[lu] * mark[lru] <= 0 && mark[lr] * mark[lru] + mark[lu] * mark[lru] != 0) { fx[lu] = (val - C[lu])*hx / (C[lru] - C[lu]) + hx*i; //up fy[lr] = (val - C[lr])*hy / (C[lru] - C[lr]) + hy*j; //right len = len + sqrt(pow(fx[lu] - hx*(ii), 2) + pow(fy[lr] - hy*(jj), 2)); continue; } //case 5 //************ //************ //******−−−−−− //***** | ****** //***** | ****** if (mark[l] * mark[lr] <= 0 && mark[lr] * mark[lru] <= 0 && mark[l] * mark[lr] + mark[lr] * mark[lru] != 0) { fy[lr] = (val - C[lr])*hy / (C[lru] - C[lr]) + hy*j; //right fx[l] = (val - C[l])*hx / (C[lr] - C[l]) + hx*i; //down len = len + sqrt(pow(fx[l] - hx*(ii), 2) + pow(fy[lr] - hy*j, 2)); continue; } //case 6 //************ //************ //−−−−−−****** //***** | ****** //***** | ****** if (mark[l] * mark[lr] <= 0 && mark[l] * mark[lu] <= 0 && mark[l] * mark[lr] + mark[l] * mark[lu] != 0) { fy[l] = (val - C[l])*hy / (C[lu] - C[l]) + hy*j; //left fx[l] = (val - C[l])*hx / (C[lr] - C[l]) + hx*i; //down len = len + sqrt(pow(fx[l] - hx*i, 2) + pow(fy[l] - hy*j, 2)); continue; } }//end of main if } return len; } double volume(double hx, double hy, double *C, double lim) { double vol = 0; //unsigned int i, j; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { if (t[l] == 2 || t[l] == 3 || t[l] == 10 || t[l] == 6 || t[l] == 7) continue; if (abs(C[l]) < lim) vol += hx*hy; } return vol; } double change_sign_at_X(double hx, double hy, double *F, unsigned int j) { int l1, l2, L1, L2; double x = 0; double F_ = 0; //unsigned int j = nyg / 2; l1 = 0 + OFFSET*j; L1 = J[l1]; for (unsigned int i = 1; i <= nxg; i++) { l2 = i + OFFSET*j; L2 = J[l2]; if (I[l2] == 1) { if (F[L2] * F[L1] <= 0) { x = hx*(F_ - F[L1]) / (F[L2] - F[L1]) + ((i - 1)*hx); return x; } L1 = L2; } else { cout << "not a good sign you see it" << endl; exit(0); } } return x; } double pressure_jump(double hx, double hy, double *p, double x_, double border_width) { int l, L; unsigned int j = nyg / 2; double P1 = 0, P2 = 0; int i1, i2, n1 = 0, n2 = 0; i1 = (int)((x_ - border_width) / hx); i2 = (int)((x_ + border_width) / hx); for (unsigned int i = 0; i <= nxg; i++) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { if (i < i1) { P1 += p[L]; n1++; } if (i > i2) { P2 += p[L]; n2++; } } else { cout << "not a good sign you see it" << endl; exit(0); } } P1 = P1 / n1; P2 = P2 / n2; double ret = abs(P2 - P1); if (!std::isfinite(ret)) return 0.0; else return ret; } double flow_rate(double hx, double hy, double *vx, double Ly, unsigned int i) { int l, L; double Q = 0.0; for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { Q += vx[L]; } else { cout << "not a good sign you see it" << endl; exit(0); } } Q = Q * hy / Ly; return Q; } double tension(double hx, double hy, double *C) { double ten = 0; //unsigned int lr, lu, lru; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { if (t[l] == 0) { ten += 0.25 / hx / hx*pow(C[n3[l]] - C[n1[l]], 2) + 0.25 / hy / hy*pow(C[n2[l]] - C[n4[l]], 2); } } return ten*hx*hy; } void X_averaged_in_each_phase(double hx, double hy, double *C, double *X, double &X1av, double &X2av, double &Xav, double level = 0.0) { Xav = 0; X1av = 0; /*plus*/ X2av = 0; /*minus*/ unsigned int n = 0, n2 = 0, n_plus = 0, n2_plus = 0, n_minus = 0, n2_minus = 0; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { if (t[l] == 0) { Xav += X[l]; n++; if (C[l] > level) { X1av += X[l]; n_plus++; } if (C[l] < -level) { X2av += X[l]; n_minus++; } } else { Xav += X[l] / 2; n2++; if (C[l] > level) { X1av += X[l] / 2; n2_plus++; } if (C[l] < -level) { X2av += X[l] / 2; n2_minus++; } } } if (n + n2 > 0) Xav /= (n + 0.5*n2); if (n_plus + n2_plus > 0) X1av /= (n_plus + 0.5*n2_plus); if (n_minus + n2_minus > 0) X2av /= (n_minus + 0.5*n2_minus); } #define DX(F) 0.5 / hx * (F[n3[l]] - F[n1[l]]) #define DY(F) 0.5 / hy * (F[n2[l]] - F[n4[l]]) #define DX2(F) 1.0 / (hx * hx) * (F[n3[l]] + F[n1[l]] - 2.0 * F[l]) #define DY2(F) 1.0 / (hy * hy) * (F[n2[l]] + F[n4[l]] - 2.0 * F[l]) //#define DXY(F) (-F[l - 1 + OFFSET] + F[l + 1 + OFFSET] - F[l + 1 - OFFSET] + f[l - 1 - OFFSET]) / hx / hy / 4.0; #define DXY(F) (-F[n1[n2[l]]] + F[n3[n2[l]]] - F[n3[n4[l]]] + F[n1[n4[l]]]) / hx / hy / 4.0 void curvature_direct(double *C, double hx, double hy, double *curv, double add = 0.0) { double dCx, dCy, abs_dC; unsigned int i, j; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { i = iG(l); j = jG(l); if (t[l] == 0) { dCx = DX(C); dCy = DY(C); abs_dC = sqrt(dCx*dCx + dCy*dCy); double abs_dC3 = abs_dC*abs_dC*abs_dC + add; curv[l] = (dCx*dCx*DY2(C) + dCy*dCy*DX2(C) - 2.0*dCx*dCy*DXY(C)) / abs_dC3; //if (abs_dC < 1e-6) curv[l] = 0; } else { curv[l] = 0.0; } } } void curvature_direct2(double *C, double hx, double hy, double *curv) { double dCx, dCy, abs_dC; unsigned int i, j; for (unsigned int l = 0; l < TOTAL_SIZE; l++) { i = iG(l); j = jG(l); if (t[l] == 0) { dCx = DX(C); dCy = DY(C); abs_dC = sqrt(dCx*dCx + dCy*dCy); double abs_dC3 = abs_dC*abs_dC*abs_dC; curv[l] = (dCx*dCx*DY2(C) + dCy*dCy*DX2(C) - 2.0*dCx*dCy*DXY(C)) / abs_dC3; if (abs_dC < 1e-6) curv[l] = 0; } else { curv[l] = 0.0; } } } void curvature_2_steps(double *C, double *nx, double *ny, double hx, double hy, double *curv) { //1 for (unsigned int l = 0; l < TOTAL_SIZE; l++) { if (t[l] == 0) { double dCx = DX(C); double dCy = DY(C); double abs_dC = sqrt(dCx*dCx + dCy*dCy) + 0.001; nx[l] = dCx / abs_dC; ny[l] = dCy / abs_dC; } else { nx[l] = 0.0; ny[l] = 0.0; } } //2 for (unsigned int l = 0; l < TOTAL_SIZE; l++) { if (t[l] == 0) { curv[l] = DX(nx) + DY(ny); } else { curv[l] = 0.0; } } } void check() { int l, L; ofstream write("geomSettingCheck.txt"); write << "i, j, 1, L, t[L], n1[L], n2[L], n3[L], n4[L]" << endl; for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (I[i + OFFSET*j] == 1) write << i << " " << j << " " << 1 << " " << L << " " << t[L] << " " << n1[L] << " " << n2[L] << " " << n3[L] << " " << n4[L] << endl; else write << i << " " << j << " " << -1 << " " << -1 << " " << -1 << " " << -1 << " " << -1 << " " << -1 << " " << -1 << endl; } } write.close(); } unsigned int checkExit(double *C) { for (unsigned int l = 0; l < TOTAL_SIZE; l++) { if (t[l] == 10) { if (C[l] < 0) return 1; } } return 0; } }; struct multi_line { int *l, *t, *I, *J, *J_back; unsigned int line_N; // number of lines of the whole porous media unsigned int tube_N; // number of tubes per line unsigned int iter = 0; unsigned int TOTAL_SIZE = 0; unsigned int OFFSET; int *n1, *n2, *n3, *n4; unsigned int nxg, nyg; double *C0, *C, *p, *p0, *ux, *uy, *vx, *vy, *mu; unsigned int x; //length of porous block unsigned int y; //width of porous block unsigned int z; //width of capillary tube unsigned int line_x; //length of line unsigned int line_y; //width of line double hx, hy, Lx, Ly; unsigned int *gi, *gj; unsigned int shiftX = 0, shiftY = 0; vector <unsigned int> li, lj; void generate_levels(unsigned int x_in, unsigned int y_in, unsigned int z_in, unsigned int N_in, unsigned int tube_in) { x = x_in; y = y_in; z = z_in; line_N = N_in; tube_N = tube_in; line_x = 2 * (x + z); line_y = tube_N*(z + y) - y; hy = 1.0 / z; hx = hy; Lx = line_x*hx; Ly = line_y*hy; nyg = line_y; nxg = line_N*line_x + x; OFFSET = nxg + 1; J = new int[(nxg + 1)*(nyg + 1)]; I = new int[(nxg + 1)*(nyg + 1)]; for (unsigned int i = 0; i < (nxg + 1)*(nyg + 1); i++) { J[i] = -1; I[i] = 0; } // zero-level while (shiftY < line_y) { for (unsigned int j = 0; j <= z; j++) { for (unsigned int i = 0; i <= x - 1; i++) { //gi[i] = 1; gj[j] = 1; li.push_back(i + shiftX); lj.push_back(j + shiftY); iter++; } } shiftY += y + z; } cout << iter << endl; // main levels for (unsigned int C = 1; C <= line_N; C++) { //column shiftX += x; for (unsigned int i = 0; i <= z; i++) { for (unsigned int j = 0; j <= line_y; j++) { li.push_back(i + shiftX); lj.push_back(j); iter++; } } //blocks shiftX += z; shiftY = (y + z) / 2; while (shiftY < line_y) { for (unsigned int i = 1; i <= x - 1; i++) { for (unsigned int j = 0; j <= z; j++) { li.push_back(i + shiftX); lj.push_back(j + shiftY); iter++; } } shiftY += y + z; } //column shiftX += x; for (unsigned int i = 0; i <= z; i++) { for (unsigned int j = 0; j <= line_y; j++) { li.push_back(i + shiftX); lj.push_back(j); iter++; } } //blocks shiftX += z; shiftY = 0; while (shiftY < line_y) { for (unsigned int j = 0; j <= z; j++) { for (unsigned int i = 1; i <= x - 1; i++) { //gi[i] = 1; gj[j] = 1; li.push_back(i + shiftX); lj.push_back(j + shiftY); iter++; } if (C == line_N) { li.push_back(x + shiftX); lj.push_back(j + shiftY); iter++; } } shiftY += y + z; } } TOTAL_SIZE = iter; J_back = new int[TOTAL_SIZE]; n1 = new int[TOTAL_SIZE]; n2 = new int[TOTAL_SIZE]; n3 = new int[TOTAL_SIZE]; n4 = new int[TOTAL_SIZE]; t = new int[TOTAL_SIZE]; for (unsigned int i = 0; i < TOTAL_SIZE; i++) { n1[i] = -1; n2[i] = -1; n3[i] = -1; n4[i] = -1; t[i] = -1; } for (unsigned int i = 0; i < iter; i++) { J_back[i] = li[i] + OFFSET*lj[i]; J[J_back[i]] = i; I[J_back[i]] = 1; } } void set_neighbor() { int l, L, l1, l2, l3, l4; for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; l1 = i - 1 + OFFSET*j; l2 = i + OFFSET*j + OFFSET; l3 = i + 1 + OFFSET*j; l4 = i + OFFSET*j - OFFSET; if (I[l] == 1) { if (i > 0) if (I[l1] == 1) n1[L] = J[l1]; if (i < nxg) if (I[l3] == 1) n3[L] = J[l3]; if (j < nyg) if (I[l2] == 1) n2[L] = J[l2]; if (j > 0) if (I[l4] == 1) n4[L] = J[l4]; } else { } } } } void set_type() { int l, L; //int l1, l2, l3, l4; //inner for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { if (n1[L] != -1 && n2[L] != -1 && n3[L] != -1 && n4[L] != -1) t[L] = 0; } } } //rigid walls for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; // l1 = i - 1 + OFFSET*j; l2 = i + OFFSET*j + OFFSET; l3 = i + 1 + OFFSET*j; l4 = i + OFFSET*j - OFFSET; if (I[l] == 1) { if (I[l] == 1) { if (n1[L] == -1 && n2[L] != -1 && n3[L] != -1 && n4[L] != -1) t[L] = 1; if (n1[L] != -1 && n2[L] == -1 && n3[L] != -1 && n4[L] != -1) t[L] = 2; if (n1[L] != -1 && n2[L] != -1 && n3[L] == -1 && n4[L] != -1) t[L] = 3; if (n1[L] != -1 && n2[L] != -1 && n3[L] != -1 && n4[L] == -1) t[L] = 4; } } } } //corners for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { if (n2[n1[L]] == -1 && n1[L] != -1 && n2[L] != -1) t[L] = 5; if (n2[n3[L]] == -1 && n3[L] != -1 && n2[L] != -1) t[L] = 6; if (n3[n4[L]] == -1 && n3[L] != -1 && n4[L] != -1) t[L] = 7; if (n1[n4[L]] == -1 && n1[L] != -1 && n4[L] != -1) t[L] = 8; } } } //inlet, outlet for (unsigned int i = 0; i <= nxg; i = i + nxg) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (I[l] == 1) { if (i == 0) { t[L] = 9; if (t[n3[L]] == 2) t[L] = 2; if (t[n3[L]] == 4) t[L] = 4; } if (i == nxg) { t[L] = 10; if (t[n1[L]] == 2) t[L] = 2; if (t[n1[L]] == 4) t[L] = 4; } } } } } void check() { int l, L; ofstream write("out.txt"); for (unsigned int i = 0; i <= nxg; i++) { for (unsigned int j = 0; j <= nyg; j++) { l = i + OFFSET*j; L = J[l]; if (I[i + OFFSET*j] == 1) write << i << " " << j << " " << 1 << " " << L << " " << t[L] << " " << n1[L] << " " << n2[L] << " " << n3[L] << " " << n4[L] << endl; else write << i << " " << j << " " << -1 << " " << -1 << " " << -1 << " " << -1 << " " << -1 << " " << -1 << " " << -1 << endl; } } write.close(); } void write_field(double *f, string file_name, double time, int step) { #ifdef __linux__ ofstream to_file(("fields/" + file_name + ".dat").c_str()); #endif #ifdef _WIN32 ofstream to_file(("fields\\" + file_name + ".dat").c_str()); #endif int l, L; to_file << time << endl; for (unsigned int j = 0; j <= nyg; j = j + step) { for (unsigned int i = 0; i <= nxg; i = i + step) { l = i + OFFSET*j; L = J[l]; //if (J[l] == J[l]) to_file << i << " " << j << " " << f[L] << endl; if (I[l] == 1) { //to_file << i << " " << j << " " << f[L] << " " << t[L] << " " << L << " " << n1[L] << " " << n2[L] << " " << n3[L] << " " << n4[L] << endl; to_file << i << " " << j << " " << f[L] << endl; } else { to_file << "skip" << endl; //to_file << i << " " << j << " " << NAN << endl; //to_file << i << " " << j << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << endl; } } } to_file.close(); } void save(double *vx, double *vy, double *p, double *C, double *mu, unsigned int i_time, unsigned int i_write) { ofstream to_file("recovery.dat"); ofstream to_file2("recovery2.dat"); to_file << i_time << " " << i_write << endl; to_file2 << i_time << " " << i_write << endl; for (unsigned int i = 0; i < TOTAL_SIZE; i++) to_file << vx[i] << " " << vy[i] << " " << p[i] << " " << C[i] << " " << mu[i] << endl; for (unsigned int i = 0; i < TOTAL_SIZE; i++) to_file2 << vx[i] << " " << vy[i] << " " << p[i] << " " << C[i] << " " << mu[i] << endl; to_file.close(); to_file2.close(); } void recover(double *vx, double *vy, double *p, double *C, double *mu, unsigned int &i_time, unsigned int &i_write) { ifstream from_file("recovery.dat"); string str; string substr; stringstream ss; getline(from_file, str); ss << str; ss >> substr; i_time = atoi(substr.c_str()); ss >> substr; i_write = atoi(substr.c_str()); for (unsigned int i = 0; i < TOTAL_SIZE; i++) { getline(from_file, str); ss.str(""); ss.clear(); ss << str; ss >> substr; vx[i] = atof(substr.c_str()); ss >> substr; vy[i] = atof(substr.c_str()); ss >> substr; p[i] = atof(substr.c_str()); ss >> substr; C[i] = atof(substr.c_str()); } from_file.close(); } }; struct box { cross *Mcr; int *l, *t, *I, *J, *J_back; int Mx, My, Msize, Moffset, OFFSET; unsigned int iter = 0; unsigned int TOTAL_SIZE = 0; int *n1, *n2, *n3, *n4; unsigned int nx, ny, offset; unsigned int nxg, nyg; double *C0, *C, *p, *p0, *ux, *uy, *vx, *vy, *mu; double LX, LY; void set_global_size(int input_nx, int input_ny) { nx = input_nx; nxg = nx; ny = input_ny; nyg = ny; offset = nx + 1; OFFSET = offset; TOTAL_SIZE = (input_nx + 1) * (input_ny + 1); } void set_type() { l = new int[TOTAL_SIZE]; t = new int[TOTAL_SIZE]; for (unsigned int i = 0; i < TOTAL_SIZE; i++) { l[i] = 0; t[i] = 0; } unsigned int k; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; if (i == 0) t[k] = 9; if (i == nx) t[k] = 10; if (j == 0) t[k] = 4; if (j == ny) t[k] = 2; iter++; } } } void set_neighbor() { n1 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n2 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n3 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n4 = (int*)malloc(TOTAL_SIZE * sizeof(int)); for (unsigned int i = 0; i < TOTAL_SIZE; i++) { n1[i] = -1; n2[i] = -1; n3[i] = -1; n4[i] = -1; } unsigned int k, it = 0; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; if (t[k] == 0) { n1[k] = k - 1; n2[k] = k + offset; n3[k] = k + 1; n4[k] = k - offset; } if (t[k] == 2) n4[k] = k - offset; if (t[k] == 4) n2[k] = k + offset; if (t[k] == 9) { n3[k] = k + 1; n4[k] = k - offset; n2[k] = k + offset; } if (t[k] == 10) { n1[k] = k - 1; n4[k] = k - offset; n2[k] = k + offset; } it++; } } } void set_global_id() { I = new int[(nx + 1)*(ny + 1)]; J = new int[(nx + 1)*(ny + 1)]; J_back = new int[TOTAL_SIZE]; OFFSET = nx + 1; for (unsigned int i = 0; i < (nx + 1)*(ny + 1); i++) { I[i] = 0; J[i] = -1; } for (unsigned int i = 0; i < TOTAL_SIZE; i++) { J_back[i] = -1; } unsigned int k, it = 0; // , in, ii, jj; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; I[k] = 1; J[k] = k; J_back[k] = k; it++; } } } void write_field(double *f, string file_name, double time, int step) { #ifdef __linux__ ofstream to_file(("fields/" + file_name + ".dat").c_str()); #endif #ifdef _WIN32 ofstream to_file(("fields\\" + file_name + ".dat").c_str()); #endif unsigned int l, L; to_file << time << endl; for (unsigned int j = 0; j <= nyg; j = j + step) { for (unsigned int i = 0; i <= nxg; i = i + step) { l = i + OFFSET*j; L = J[l]; //if (J[l] == J[l]) to_file << i << " " << j << " " << f[L] << endl; if (I[l] == 1) { //to_file << i << " " << j << " " << f[L] << " " << t[L] << " " << L << " " << n1[L] << " " << n2[L] << " " << n3[L] << " " << n4[L] << " " << //(J_back[L] - (J_back[L] / OFFSET)*OFFSET) << " " << (J_back[L] / OFFSET) << endl; to_file << i << " " << j << " " << f[L] << endl; } else { to_file << "skip" << endl; //to_file << i << " " << j << " " << NAN << endl; //to_file << i << " " << j << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << endl; } } } to_file.close(); } void save(double *vx, double *vy, double *p, double *C, double *mu, unsigned int i_time, unsigned int i_write) { ofstream to_file("recovery.dat"); ofstream to_file2("recovery2.dat"); to_file << i_time << " " << i_write << endl; to_file2 << i_time << " " << i_write << endl; for (unsigned int i = 0; i < TOTAL_SIZE; i++) to_file << vx[i] << " " << vy[i] << " " << p[i] << " " << C[i] << " " << mu[i] << endl; for (unsigned int i = 0; i < TOTAL_SIZE; i++) to_file2 << vx[i] << " " << vy[i] << " " << p[i] << " " << C[i] << " " << mu[i] << endl; to_file.close(); to_file2.close(); } void recover(double *vx, double *vy, double *p, double *C, double *mu, unsigned int &i_time, unsigned int &i_write) { ifstream from_file("recovery.dat"); string str; string substr; stringstream ss; getline(from_file, str); ss << str; ss >> substr; i_time = atoi(substr.c_str()); ss >> substr; i_write = atoi(substr.c_str()); for (unsigned int i = 0; i < TOTAL_SIZE; i++) { getline(from_file, str); ss.str(""); ss.clear(); ss << str; ss >> substr; vx[i] = atof(substr.c_str()); ss >> substr; vy[i] = atof(substr.c_str()); ss >> substr; p[i] = atof(substr.c_str()); ss >> substr; C[i] = atof(substr.c_str()); } from_file.close(); } }; struct box_inherited :multi_cross { unsigned int nx, ny, offset; void set_global_size(int input_nx, int input_ny) { nx = input_nx; nxg = nx; ny = input_ny; nyg = ny; offset = nx + 1; OFFSET = offset; TOTAL_SIZE = (input_nx + 1) * (input_ny + 1); } void set_type() { l = new int[TOTAL_SIZE]; t = new int[TOTAL_SIZE]; for (unsigned int i = 0; i < TOTAL_SIZE; i++) { l[i] = 0; t[i] = 0; } unsigned int k; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; if (i == 0) t[k] = 9; if (i == nx) t[k] = 10; if (j == 0) t[k] = 4; if (j == ny) t[k] = 2; iter++; } } } void set_neighbor() { n1 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n2 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n3 = (int*)malloc(TOTAL_SIZE * sizeof(int)); n4 = (int*)malloc(TOTAL_SIZE * sizeof(int)); for (unsigned int i = 0; i < TOTAL_SIZE; i++) { n1[i] = -1; n2[i] = -1; n3[i] = -1; n4[i] = -1; } unsigned int k, it = 0; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; if (t[k] == 0) { n1[k] = k - 1; n2[k] = k + offset; n3[k] = k + 1; n4[k] = k - offset; } if (t[k] == 2) n4[k] = k - offset; if (t[k] == 4) n2[k] = k + offset; if (t[k] == 9) { n3[k] = k + 1; n4[k] = k - offset; n2[k] = k + offset; } if (t[k] == 10) { n1[k] = k - 1; n4[k] = k - offset; n2[k] = k + offset; } it++; } } } void set_global_id() { I = new int[(nx + 1)*(ny + 1)]; J = new int[(nx + 1)*(ny + 1)]; J_back = new int[TOTAL_SIZE]; OFFSET = nx + 1; for (unsigned int i = 0; i < (nx + 1)*(ny + 1); i++) { I[i] = 0; J[i] = -1; } for (unsigned int i = 0; i < TOTAL_SIZE; i++) { J_back[i] = -1; } unsigned int k, it = 0; // in, ii, jj; for (unsigned int i = 0; i <= nx; i++) { for (unsigned int j = 0; j <= ny; j++) { k = i + offset*j; I[k] = 1; J[k] = k; J_back[k] = k; it++; } } } }; __global__ void stupid_alloc(unsigned int TS) { n1 = new int[TS]; n2 = new int[TS]; n3 = new int[TS]; n4 = new int[TS]; t = new int[TS]; J_back = new int[TS]; } __global__ void stupid_swap(int *nn1, int *nn2, int *nn3, int *nn4, int *tt, int* JJ, unsigned int TS) { //unsigned int l = blockIdx.x*blockDim.x + threadIdx.x; for (unsigned int l = 0; l < TS; l++) { //printf("%i \n", l); n1[l] = nn1[l]; n2[l] = nn2[l]; n3[l] = nn3[l]; n4[l] = nn4[l]; t[l] = tt[l]; J_back[l] = JJ[l]; } } //this "stupid" step is designed to keep some objects in the global GPU scope //it is supposed to simplify some other parts of the code void stupid_step(int *nn1, int *nn2, int *nn3, int *nn4, int *tt, int *JJ, unsigned int TS) { /*выделение всего того, но на GPU*/ unsigned int TSB = TS * sizeof(int); int *n1_temp, *n2_temp, *n3_temp, *n4_temp, *t_temp, *J_temp; cudaMalloc((void**)&n1_temp, TSB); cudaMalloc((void**)&n2_temp, TSB); cudaMalloc((void**)&n3_temp, TSB); cudaMalloc((void**)&n4_temp, TSB); cudaMalloc((void**)&t_temp, TSB); cudaMalloc((void**)&J_temp, TSB); cudaMemcpy(n1_temp, nn1, TSB, cudaMemcpyHostToDevice); cudaMemcpy(n2_temp, nn2, TSB, cudaMemcpyHostToDevice); cudaMemcpy(n3_temp, nn3, TSB, cudaMemcpyHostToDevice); cudaMemcpy(n4_temp, nn4, TSB, cudaMemcpyHostToDevice); cudaMemcpy(t_temp, tt, TSB, cudaMemcpyHostToDevice); cudaMemcpy(J_temp, JJ, TSB, cudaMemcpyHostToDevice); stupid_alloc << <1, 1 >> > (TS); stupid_swap << <1, 1 >> > (n1_temp, n2_temp, n3_temp, n4_temp, t_temp, J_temp, TS); //so cudaFree(n1_temp); cudaFree(n2_temp); cudaFree(n3_temp); cudaFree(n4_temp); cudaFree(t_temp); cudaFree(J_temp); } //pressure transformation to real pressure void true_pressure(double *p, double *p_true, double *C, double *mu, int *t, int *n1, int *n2, int *n3, int *n4, int *J_back, double tau, unsigned int size, double hx, double hy, double Ca, double A, double Gr, double M, int OFFSET, double sinA, double cosA, unsigned int PHASE, double VV_h, double vibrX, double vibrY, double *Phi, double *Wx, double *Wy) { /*функция написана не совсем интуитивно понятно, были ошибки, ошибки исправлялись, осознание того, как надо было, пришло потом, когда переписывать заново стало долго*/ // double WX = 0.0, WY = 0.0; double dxC = 0, dyC = 0; int left, right, up, down, left2, right2, up2, down2; for (unsigned int l = 0; l < size; l++) { if (PHASE == 0) { p_true[l] = p[l]; continue; } left = n1[l]; right = n3[l]; up = n2[l]; down = n4[l]; if (left == -1) left = right; if (right == -1) right = left; if (up == -1) up = down; if (down == -1) down = up; left2 = n1[left]; right2 = n3[right]; up2 = n2[up]; down2 = n4[down]; p_true[l] = p[l] + (+mu[l] * C[l] - A*pow(C[l], 2) - pow(C[l], 4)) / M + C[l] * Gr*((J_back[l] - (J_back[l] / OFFSET)*OFFSET) * hx*cosA + (J_back[l] / OFFSET) * hy*sinA); if (VV_h > 0) { //WX = -C[l] * vibrX + 0.5*(C[right] - C[left]) / hx; //WY = -C[l] * vibrY + 0.5*(C[up] - C[down]) / hy; p_true[l] += -VV_h*0.5*(Wx[l] * Wx[l] + Wy[l] * Wy[l]); } switch (t[l]) { case 0: //inner p_true[l] += -0.5*Ca / M*( pow((0.5*(C[right] - C[left]) / hx), 2) + pow((0.5*(C[up] - C[down]) / hy), 2)); break; case 1: //left rigid p_true[l] += -0.5*Ca / M*( pow((-0.5*(3.0*C[l] - 4.0*C[right] + C[right2]) / hx), 2) + pow((0.5*(C[up] - C[down]) / hy), 2)); break; case 2: //upper rigid p_true[l] += -0.5*Ca / M*( pow((0.5*(C[right] - C[left]) / hx), 2) + pow((0.5*(3.0*C[l] - 4.0*C[down] + C[down2]) / hy), 2)); break; case 3: //right rigid p_true[l] += -0.5*Ca / M*( pow((0.5*(3.0*C[l] - 4.0*C[left] + C[left2]) / hx), 2) + pow((0.5*(C[up] - C[down]) / hy), 2)); break; case 4: //lower rigid p_true[l] += -0.5*Ca / M*( pow((0.5*(C[right] - C[left]) / hx), 2) + pow((-0.5*(3.0*C[l] - 4.0*C[up] + C[up2]) / hy), 2)); break; case 5: //left upper rigid corner p_true[l] += -0.5*Ca / M*( pow((-0.5*(3.0*C[l] - 4.0*C[right] + C[right2]) / hx), 2) + pow((0.5*(3.0*C[l] - 4.0*C[down] + C[down2]) / hy), 2)); break; case 6: //right upper rigid corner p_true[l] += -0.5*Ca / M*( pow((0.5*(3.0*C[l] - 4.0*C[left] + C[left2]) / hx), 2) + pow((0.5*(3.0*C[l] - 4.0*C[down] + C[down2]) / hy), 2)); break; case 7: //right lower rigid corner p_true[l] += -0.5*Ca / M*( pow((0.5*(3.0*C[l] - 4.0*C[left] + C[left2]) / hx), 2) + pow((-0.5*(3.0*C[l] - 4.0*C[up] + C[up2]) / hy), 2)); break; case 8: //left lower rigid corner p_true[l] += -0.5*Ca / M*( pow((-0.5*(3.0*C[l] - 4.0*C[right] + C[right2]) / hx), 2) + pow((-0.5*(3.0*C[l] - 4.0*C[up] + C[up2]) / hy), 2)); break; case 9: //inlet (from left) dxC = -0.5*(3.0*C[l] - 4.0*C[right] + C[right2]) / hx; dyC = 0.5*(C[up] - C[down]) / hy; p_true[l] += -0.5*Ca / M*(pow(dxC, 2) + pow(dyC, 2)); break; case 10://outlet (to right) dxC = 0.5*(3.0*C[l] - 4.0*C[left] + C[left2]) / hx; dyC = 0.5*(C[up] - C[down]) / hy; p_true[l] += -0.5*Ca / M*(pow(dxC, 2)+ pow(dyC, 2)); break; default: break; } } } void signalHandler(int signum) { cout << "Interrupt signal (" << signum << ") received.\n"; cout << "state: " << state << endl; // cleanup and close up stuff here // terminate program exit(signum); } void create_folder(string name) { #ifdef __linux__ string str = "mkdir -p " + name + "/"; system(str.c_str()); #endif #ifdef _WIN32 CreateDirectoryA(name.c_str(), NULL); #endif } int main(int argc, char **argv) { state = 0; signal(SIGINT, signalHandler); cout << "Version: " << ThisSoftwareVersion << endl; cout << "Compilation time: " << __DATE__ << " " << __TIME__ << endl; cout << "command line: " << endl; for (int i = 0; i < argc; i++) cout << i << ": " << argv[i] << endl; int devID = 0, deviceCount = 0; cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) cout << "there is no detected GPU" << endl; double heap_GB = 1.0; double timer1, timer2; double pi = 3.1415926535897932384626433832795; double eps0 = 1e-5; double *C0, *C, *p, *p0, *ux, *uy, *vx, *vy, *mu, *zero, *Phi, *Phi0, *WX, *WY; //_d - device (GPU) double *C_h, *p_h, *vx_h, *vy_h, *mu_h, *p_true_h, *zero_h, *Phi_h, *WX_h, *WY_h; //_h - host (CPU) double *curv1, *curv2, *nx_dC, *ny_dC; double *psiav_array, *psiav_array_Phi; // temporal variables //psiav0_h, eps_h *psiav_d, *psiav_array_h, *psiav_h; double hx_h, hy_h, Lx_h, Ly_h, tau_h, tau_p_h, psiav, psiav0, eps, A_h, Ca_h, Gr_h, Pe_h, Re_h, MM_h, dP_h, Gs_h; //parameters double alpha_h, sinA_h, cosA_h, theta_h, sinTh_h, cosTh_h; double Ek, Ek_old, Vmax, Q_in, Q_out, C_average, Cv; unsigned int nx_h, ny_h, Matrix_X, Matrix_Y, iter = 0, offset_h, Phi_kk, kk, k = 0, tt, write_i = 0, each = 1, stop = 0; //parameters double time_fields, time_recovery, time_display; double timeq = 0.0, C_av, C_plus, C_minus; double tecplot, limit_timeq; bool copied = false; unsigned int linear_pressure, fill_gradually, wetting, read_C, stop_at_exit; unsigned int sphere_distribution, curv_calc, vibration, simple_geometry = 0; double fill_gradually_x; unsigned int reset_timeq, invert_initial_C, reset_velocity, reset_pressure; unsigned int PHASE_h, DIFFUSION_h; string geometry; //1 is 'yes' / true, 0 is 'no' / false //unsigned int clean_fields; unsigned int picture_switch = 1; //write fields to a file? unsigned int read_switch = 1; //read to continue or not? //reading_parameters(ny_h, nx_h, each_t, each, Matrix_X, Matrix_Y, tau_h, A_h, Ca_h, Gr_h, Pe_h, Re_h, alpha_h, MM_h, tecplot, PHASE_h); create_folder("fields"); string file_name = "inp.dat"; if (argc == 2) file_name = argv[1]; ReadingFile File(file_name); #define constUint(VAR) \ unsigned int VAR##_h; File.reading<unsigned int>(VAR##_h, #VAR, 0); \ cudaMemcpyToSymbol(VAR, &VAR##_h, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); #define constDouble(VAR) \ double VAR##_h; File.reading<double>(VAR##_h, #VAR, 0.0); \ cudaMemcpyToSymbol(VAR, &VAR##_h, sizeof(double), 0, cudaMemcpyHostToDevice); File.reading<unsigned int>(ny_h, "ny", 200); File.reading<unsigned int>(nx_h, "nx", 200); File.reading<double>(time_fields, "time_fields", 0.5); File.reading<double>(time_recovery, "time_recovery", 0.5); File.reading<double>(time_display, "time_display", 0.1); File.reading<unsigned int>(each, "each_xy", 10); File.reading<unsigned int>(Matrix_X, "Matrix_X", 3); File.reading<unsigned int>(Matrix_Y, "Matrix_Y", 3); File.reading<double>(tau_h, "tau", 5.0e-5); File.reading<double>(A_h, "A", -0.5); if (File.reading<double>(Ca_h, "Ca", 4e-4) == 0) File.reading<double>(Ca_h, "Cn", 4e-4); File.reading<double>(Gr_h, "Gr", 0.0); if (File.reading<double>(Pe_h, "Pe", 1e+4) == 0) File.reading<double>(Pe_h, "Sc", 1e+4); File.reading<double>(Re_h, "Re", 1.0); File.reading<double>(alpha_h, "alpha", 0.0); File.reading<double>(theta_h, "theta", 90.0); File.reading<double>(MM_h, "MM", 1.0); File.reading<double>(Gs_h, "Gs", 0.0); File.reading<double>(dP_h, "dP", 1.0); File.reading<double>(tecplot, "tecplot", 10000); File.reading<unsigned int>(PHASE_h, "Phase_field", 1, 0, 1); File.reading<unsigned int>(read_switch, "read_recovery", 1, 0, 1); File.reading<unsigned int>(picture_switch, "picture_switch", 1, 0, 1); File.reading<double>(limit_timeq, "time_limit", 5000.0); File.reading<unsigned int>(linear_pressure, "linear_pressure", 0, 0, 1); File.reading<unsigned int>(fill_gradually, "fill_gradually", 0, 0, 1); File.reading<double>(fill_gradually_x, "fill_gradually_x", 0.5); File.reading<unsigned int>(DIFFUSION_h, "pure_diffusion", 0, 0, 1); File.reading<unsigned int>(wetting, "wetting", 0, 0, 4); File.reading_string(geometry, "geometry", "matrix"); File.reading<unsigned int>(reset_timeq, "reset_time", 0, 0, 1); File.reading<unsigned int>(invert_initial_C, "invert_C", 0, 0, 1); File.reading<unsigned int>(reset_velocity, "reset_velocity", 0, 0, 1); File.reading<unsigned int>(reset_pressure, "reset_pressure", 0, 0, 1); File.reading<double>(heap_GB, "heap_GB", 1.0); File.reading<int>(devID, "GPU_id", 0, 0, deviceCount - 1); File.reading<unsigned int>(read_C, "read_concentration", 0, 0, 1); File.reading<unsigned int>(stop_at_exit, "stop_at_exit", 0, 0, 1); File.reading<unsigned int>(sphere_distribution, "sphere", 0, 0, 1); File.reading<unsigned int>(curv_calc, "curv_calc", 0, 0, 1); unsigned int horizontal_profile; File.reading<unsigned int>(horizontal_profile, "horizontal_profile", 0, 0, 1); if (horizontal_profile) create_folder("horizontal_profile"); unsigned int vertical_profile; File.reading<unsigned int>(vertical_profile, "vertical_profile", 0, 0, 1); if (vertical_profile) create_folder("vertical_profile"); File.reading<double>(Lx_h, "Lx", 0.0); File.reading<double>(Ly_h, "Ly", 0.0); File.reading<unsigned int>(vibration, "vibration", 0, 0, 3); double Amp_h; File.reading<double>(Amp_h, "Amp", 0.0); double Omega_h; File.reading<double>(Omega_h, "Omega", 0.0); double vibr_X_h; File.reading<double>(vibr_X_h, "vibr_X", 0.0, 0.0, 1.0); double vibr_Y_h; File.reading<double>(vibr_Y_h, "vibr_Y", 0.0, 0.0, 1.0); double VV_h; File.reading<double>(VV_h, "VV", 0.0); if (vibration == 0) { VV_h = 0; } unsigned int integrals_add1; File.reading<unsigned int>(integrals_add1, "integrals_add1", 0); string filling; File.reading_string(filling, "filling", "no"); //unsigned int W_BORDER_h; File.reading<unsigned int>(W_BORDER_h, "W_BORDER", 0); //cudaMemcpyToSymbol(W_BORDER, &W_BORDER_h, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); //unsigned int PHI_BORDER_LEFT_h; File.reading<unsigned int>(PHI_BORDER_LEFT_h, "PHI_BORDER_LEFT", 0); //cudaMemcpyToSymbol(PHI_BORDER_LEFT, &PHI_BORDER_LEFT_h, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); //unsigned int PHI_BORDER_RIGHT_h; File.reading<unsigned int>(PHI_BORDER_RIGHT_h, "PHI_BORDER_RIGHT", 0); //cudaMemcpyToSymbol(PHI_BORDER_RIGHT, &PHI_BORDER_RIGHT_h, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); //File.reading<unsigned int>(clean_fields, "clean_fields", 1, 0, 1); constUint(W_BORDER); constUint(PHI_border_left); constUint(PHI_border_right); constDouble(PHI_value_left); constDouble(PHI_value_right); //GPU setting cudaSetDevice(devID); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, devID); printf("\nDevice %d: \"%s\"\n", devID, deviceProp.name); //allocate heap size size_t limit = (size_t)(1024 * 1024 * 1024 * heap_GB); cudaDeviceSetLimit(cudaLimitMallocHeapSize, limit); cudaDeviceGetLimit(&limit, cudaLimitMallocHeapSize); //if (clean_fields == 1) { //system("exec rm -r /fields/*"); //cout << "fields cleaned" << endl; //} //the main class for geometry multi_cross Geom; hy_h = 1.0 / ny_h; hx_h = hy_h; tt = (unsigned int)round(1.0 / tau_h); cosA_h = cos(alpha_h*pi / 180); sinA_h = sin(alpha_h*pi / 180); tau_p_h = 0.20*hx_h*hx_h; Ek = 0; Ek_old = 0; kk = 1000000; //Poisson iteration limit Phi_kk = 1000000; //geometry { if (geometry == "matrix") { Geom.set_global_size(nx_h, ny_h, Matrix_X, Matrix_Y); //Geom.set_global_size_narrow_tubes(2*nx_h, nx_h, ny_h/2, Matrix_X, Matrix_Y); Geom.set_type(); //Geom.left_normal_in((Matrix_Y - 1) / 2, (Matrix_Y - 1) / 2); //Geom.left_normal_out((Matrix_Y - 1) / 2, (Matrix_Y - 1) / 2); Geom.set_neighbor(); Geom.set_global_id(); cout << "Matrix_X = " << Matrix_X << ", Matrix_Y = " << Matrix_Y << endl; Geom.check(); } else if (geometry == "matrix2") { Geom.set_global_size(nx_h, ny_h, Matrix_X, Matrix_Y); Geom.set_global_id_B(); Geom.set_neighbor_B(); Geom.set_type_B(); Geom.check(); cout << "Matrix_X = " << Matrix_X << ", Matrix_Y = " << Matrix_Y << endl; } else if (geometry == "box") { if (Ly_h != 0) hy_h = Ly_h / ny_h; if (Lx_h != 0) hx_h = Lx_h / nx_h; Geom.set_global_size_box(nx_h, ny_h); Geom.set_type_box(); Geom.set_neighbor_box(); Geom.set_global_id_box(); Geom.check(); simple_geometry = 1; } else if (geometry == "tube") { Geom.set_global_size_box(nx_h, ny_h); Geom.set_type_tube(); Geom.set_neighbor_box(); Geom.set_global_id_box(); Geom.check(); simple_geometry = 1; } else if (geometry == "grid") { Geom.read_grid_geometry(); File.reading<double>(hy_h, "h_step", 0.001); hx_h = hy_h; tau_p_h = 0.20 * hx_h * hx_h; nx_h = Geom.nx; ny_h = Geom.ny; offset_h = Geom.offset; //sure you need all this? Geom.check(); } else { cout << "what are you trying to do?" << endl; return 0; } } /* box_inherited Geom; Geom.set_global_size(nx_h, ny_h); Geom.set_type(); Geom.set_neighbor(); Geom.set_global_id(); cout << "SIZE = " << " " << Geom.TOTAL_SIZE << endl; */ //alternative geometry /* multi_line Geom; Geom.generate_levels(30, 30, 30, 3, 5); cout << "approximate memory amount = " << 100 * Geom.TOTAL_SIZE / 1024 / 1024 << " MB" << endl << endl << endl; Geom.set_neighbor(); Geom.set_type(); pause */ //int sss = 0; for (int i = 0; i < Geom.TOTAL_SIZE; i++) if (Geom.t[i] == 9) sss++; cout << "S=" << sss << endl; //here we copy the arrays responsible for the geometry to GPU stupid_step(Geom.n1, Geom.n2, Geom.n3, Geom.n4, Geom.t, Geom.J_back, Geom.TOTAL_SIZE); cudaCheckError() //total Length and Width of the porous matrix Lx_h = hx_h * (Geom.nxg); Ly_h = hy_h * (Geom.nyg); cudaDeviceSynchronize(); //size setting //you may just skip it, that is weird offset_h = nx_h + 1; unsigned int size_l = Geom.TOTAL_SIZE; //Number of all nodes/elements if (size_l <= 1024 || size_l >= 1024 * 1024 * 1024) { cout << "data is too small or too large" << endl; return 0; } std::cout << "size_l=" << size_l << endl; size_t size_b /*size (in) bytes*/ = size_l * sizeof(double); //sizeof(double) = 8 bytes size_t thread_x_d /*the dimension of x in a block*/ = 1024; //size_t threads_per_block = thread_x_d; dim3 gridD((unsigned int)ceil((size_l + 0.0) / thread_x_d)); dim3 blockD((unsigned int)thread_x_d); std::cout << "gridD.x=" << gridD.x << endl; std::cout << "blockD.x=" << blockD.x << endl; //setting for the reduction procedure //that is even weirder, skip it, don't hesitate unsigned long long int *Gp, *Np; unsigned int s = 0; unsigned int GN = size_l; while (true) { s++; GN = (unsigned int)ceil(GN / (thread_x_d + 0.0)); if (GN == 1) break; } GN = size_l; std::cout << "the number of reduction = " << s << endl; Gp = new unsigned long long int[s]; Np = new unsigned long long int[s]; for (unsigned int i = 0; i < s; i++) Gp[i] = GN = (unsigned int)ceil(GN / (thread_x_d + 0.0)); Np[0] = size_l; for (unsigned int i = 1; i < s; i++) Np[i] = Gp[i - 1]; int last_reduce = (int)pow(2, ceil(log2(Np[s - 1] + 0.0))); std::cout << "last reduction = " << last_reduce << endl; (s != 1) ? std::cout << "sub array for the Poisson solver = " << Np[1] << endl : std::cout << "it shouldn't be here" << endl; double *arr[10]; double *arr2[10]; //allocating memory for arrays on CPU and initializing them { if (DIFFUSION_h == 1) { C_h = (double*)malloc(size_b); mu_h = (double*)malloc(size_b); p_true_h = (double*)malloc(size_b); zero_h = (double*)malloc(size_b); p_h = vx_h = vy_h = zero_h; for (unsigned int l = 0; l < size_l; l++) { C_h[l] = 0.5; mu_h[l] = 0; p_true_h[l] = 0.0; zero_h[l] = 0.0; } } else { C_h = (double*)malloc(size_b); mu_h = (double*)malloc(size_b); p_h = (double*)malloc(size_b); p_true_h = (double*)malloc(size_b); vx_h = (double*)malloc(size_b); vy_h = (double*)malloc(size_b); // psiav_h = (double*)malloc(sizeof(double)); // psiav_array_h = (double*)malloc(size_b / threads_per_block); for (unsigned int l = 0; l < size_l; l++) { C_h[l] = 0.5; mu_h[l] = 0; p_h[l] = 0.0; p_true_h[l] = 0.0; vx_h[l] = 0.0; vy_h[l] = 0.0; } } if (curv_calc == 1) { curv1 = (double*)malloc(size_b); curv2 = (double*)malloc(size_b); nx_dC = (double*)malloc(size_b); ny_dC = (double*)malloc(size_b); for (unsigned int l = 0; l < size_l; l++) { curv1[l] = 0.0; curv2[l] = 0.0; nx_dC[l] = 0.0; ny_dC[l] = 0.0; } } if (vibration == 1) { Phi_h = (double*)malloc(size_b); WX_h = (double*)malloc(size_b); WY_h = (double*)malloc(size_b); for (unsigned int l = 0; l < size_l; l++) { Phi_h[l] = 0.0; WX_h[l] = 0.0; WY_h[l] = 0.0; } } } if (linear_pressure == 1) { Geom.linear_pressure(p_h, hx_h, hy_h, cosA_h, sinA_h, Lx_h, Ly_h, 8.0 / Re_h); } if (filling == "shift") { double delta = sqrt(Ca_h / 0.5); Geom.fill_gradually(C_h, hx_h, hy_h, delta, fill_gradually_x); } if (filling == "sphere") { double sphere_x0, sphere_y0, sphere_R0; double C_outer, C_inner; File.reading<double>(sphere_x0, "sphere_x0", Lx_h * 0.5); File.reading<double>(sphere_y0, "sphere_y0", Ly_h * 0.5); File.reading<double>(sphere_R0, "sphere_R0", 0.1); File.reading<double>(C_outer, "sphere_C_outer", +sqrt(abs(-A_h) / 2.0)); File.reading<double>(C_inner, "sphere_C_inner", -sqrt(abs(-A_h) / 2.0)); Geom.fill_with_sphere(C_h, hx_h, hy_h, sphere_x0, sphere_y0, sphere_R0, C_outer, C_inner); } if (filling == "horizontal") { double delta = sqrt(Ca_h / 0.5); double horizontal_amp; File.reading<double>(horizontal_amp, "horizontal_amp", 0); Geom.fill_horizontal_way(C_h, hx_h, hy_h, 0.5, Ly_h*0.5, horizontal_amp, 2.0*Pi / Lx_h, delta); } //additional allocation on CPU for statistics if necessary // при какой еще необходимости!? double *fx, *fy; signed char *mark; { fx = (double*)malloc(sizeof(double)*size_l); fy = (double*)malloc(sizeof(double)*size_l); mark = (signed char*)malloc(sizeof(signed char)*size_l); } //allocating memory for arrays on GPU { if (DIFFUSION_h == 1) { cudaMalloc((void**)&C, size_b); cudaMalloc((void**)&C0, size_b); cudaMalloc((void**)&mu, size_b); cudaMalloc((void**)&zero, size_b); p = p0 = ux = uy = vx = vy = zero; } else { cudaMalloc((void**)&C, size_b); cudaMalloc((void**)&C0, size_b); cudaMalloc((void**)&p, size_b); cudaMalloc((void**)&p0, size_b); cudaMalloc((void**)&ux, size_b); cudaMalloc((void**)&uy, size_b); cudaMalloc((void**)&vx, size_b); cudaMalloc((void**)&vy, size_b); cudaMalloc((void**)&mu, size_b); (s != 1) ? cudaMalloc((void**)&psiav_array, sizeof(double)*Np[1]) : cudaMalloc((void**)&psiav_array, sizeof(double)); } if (vibration == 1) { cudaMalloc((void**)&Phi, size_b); cudaMalloc((void**)&Phi0, size_b); cudaMalloc((void**)&WX, size_b); cudaMalloc((void**)&WY, size_b); (s != 1) ? cudaMalloc((void**)&psiav_array_Phi, sizeof(double)*Np[1]) : cudaMalloc((void**)&psiav_array_Phi, sizeof(double)); } } //for Poisson procedure shortness arr[0] = p; for (unsigned int i = 1; i <= s; i++) arr[i] = psiav_array; if (vibration == 1) { arr2[0] = Phi; for (unsigned int i = 1; i <= s; i++) arr2[i] = psiav_array_Phi; } //ofstream is a class to write data in a file, ifstream is a class to read data from a file ofstream integrals; ofstream test_output; int test_output_switch; File.reading<int>(test_output_switch, "test_output", 0, 0, 1); if (test_output_switch) test_output.open("test_output.dat"); ofstream k_number; ifstream read; read.open("recovery.dat"); //checking whether a recovery file exists or not //if not we start at t = 0, otherwise we continue from the saved data bool file_exists = read.good(); if (read_switch == 0) file_exists = false; if (file_exists == true) { read_switch = 1; std::cout << endl << "CONTINUE" << endl; } else { read_switch = 0; iter = 0; std::cout << endl << "from the Start" << endl; if (read_C == 1) { std::cout << "initial concentration reading" << endl; Geom.read_concentration(C_h, "recovery.dat", 4, 1, 1); } } //continue if (read_switch == 1) { //Geom.recover(vx_h, vy_h, p_h, C_h, mu_h, iter, write_i, timeq, kk); double* var[10]; unsigned int n = 0; var[n] = vx_h; n++; var[n] = vy_h; n++; var[n] = p_h; n++; var[n] = C_h; n++; var[n] = mu_h; n++; if (vibration == 1) { var[n] = Phi_h; n++; } Geom.recover(var, n, iter, write_i, timeq, kk); if (reset_timeq == 0) { integrals.open("integrals.dat", std::ofstream::app); cout << "from time: " << timeq << " iter:" << iter << endl; } else if (reset_timeq == 1) { cout << "reset time" << endl; integrals.open("integrals.dat"); iter = 0; write_i = 0; timeq = 0; if (invert_initial_C == 1) for (unsigned int l = 0; l < size_l; l++) C_h[l] = C_h[l] * (-1); if (reset_velocity == 1) for (unsigned int l = 0; l < size_l; l++) { vx_h[l] = 0.0; vy_h[l] = 0.0; } if (reset_pressure == 1) for (unsigned int l = 0; l < size_l; l++) p_h[l] = 0.0; } } //from the start if (read_switch == 0) integrals.open("integrals.dat"); //copying values from host variables to device ones { cudaMemcpy(C, C_h, size_b, cudaMemcpyHostToDevice); cudaMemcpy(C0, C_h, size_b, cudaMemcpyHostToDevice); cudaMemcpy(p0, p_h, size_b, cudaMemcpyHostToDevice); cudaMemcpy(p, p_h, size_b, cudaMemcpyHostToDevice); cudaMemcpy(ux, vx_h, size_b, cudaMemcpyHostToDevice); cudaMemcpy(uy, vy_h, size_b, cudaMemcpyHostToDevice); cudaMemcpy(vx, vx_h, size_b, cudaMemcpyHostToDevice); cudaMemcpy(vy, vy_h, size_b, cudaMemcpyHostToDevice); cudaMemcpy(mu, mu_h, size_b, cudaMemcpyHostToDevice); if (vibration == 1) { cudaMemcpy(Phi, Phi_h, size_b, cudaMemcpyHostToDevice); cudaMemcpy(Phi0, Phi_h, size_b, cudaMemcpyHostToDevice); } } //copying some constant parameters to the fast constant memory { cudaMemcpyToSymbol(hx, &hx_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(hy, &hy_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(Lx, &Lx_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(Ly, &Ly_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(nx, &nx_h, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(ny, &ny_h, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(n, &size_l, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(offset, &offset_h, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(A, &A_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(Ca, &Ca_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(Gr, &Gr_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(Gs, &Gs_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(Pe, &Pe_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(Re, &Re_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(MM, &MM_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(tau, &tau_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(tau_p, &tau_p_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(alpha, &alpha_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(sinA, &sinA_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(cosA, &cosA_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(theta, &theta_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(sinTh, &sinTh_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(cosTh, &cosTh_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(OFFSET, &Geom.OFFSET, sizeof(int), 0, cudaMemcpyHostToDevice); // cudaMemcpyToSymbol(Mx, &Geom.Mx, sizeof(int), 0, cudaMemcpyHostToDevice); // cudaMemcpyToSymbol(My, &Geom.My, sizeof(int), 0, cudaMemcpyHostToDevice); // cudaMemcpyToSymbol(Msize, &Geom.Msize, sizeof(int), 0, cudaMemcpyHostToDevice); // cudaMemcpyToSymbol(Moffset, &Geom.Moffset, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(TOTAL_SIZE, &Geom.TOTAL_SIZE, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(PHASE, &PHASE_h, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(dP, &dP_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(Amp, &Amp_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(Omega, &Omega_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(VV, &VV_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(vibr_X, &vibr_X_h, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(vibr_Y, &vibr_Y_h, sizeof(double), 0, cudaMemcpyHostToDevice); } { cout << "approximate memory amount = " << 100 * Geom.TOTAL_SIZE / 1024 / 1024 << " MB" << endl; PrintVar(wetting) PrintVar(DIFFUSION_h) PrintVar(geometry) PrintVar(filling) } //just printing parameters from GPU to be confident they are passed correctly hello << <1, 1 >> > (); cudaDeviceSynchronize(); //Geom.fast_test_writing(C_h); Geom.write_field(C_h, "0", timeq, each); //measure real time of calculating timer1 = clock() / CLOCKS_PER_SEC; //Geom.write_field(C_h, "test", 0, 1); //write the file with parameters //this step was written for making movies { #ifdef __linux__ ofstream to_file("fields/param.dat"); #endif #ifdef _WIN32 ofstream to_file("fields\\param.dat"); #endif #define space << " " << to_file << Geom.nxg / each space Geom.nyg / each space hx_h*each space hy_h*each space Lx_h space Ly_h space Gr_h space Ca_h space Pe_h space Re_h space A_h space MM_h space alpha_h << endl; to_file.close(); } true_pressure(p_h, p_true_h, C_h, mu_h, Geom.t, Geom.n1, Geom.n2, Geom.n3, Geom.n4, Geom.J_back, tau_h, Geom.TOTAL_SIZE, hx_h, hy_h, Ca_h, A_h, Gr_h, MM_h, Geom.OFFSET, sinA_h, cosA_h, PHASE_h, VV_h, vibr_X_h, vibr_Y_h, Phi_h, WX_h, WY_h); //pause // the main time loop of the whole calculation procedure while (true) { iter = iter + 1; timeq = timeq + tau_h; //Poisson equation for pulsation potential if (vibration == 1) { double eps_Phi = 1.0; double psiav0_Phi = -1.0; double psiav_Phi = 0.0; unsigned int k_Phi = 0; //while ((eps_Phi > eps0*psiav0_Phi && k_Phi < Phi_kk)) while ((eps_Phi > eps0*psiav0_Phi)) { psiav_Phi = 0.0; k_Phi++; Poisson_Phi << <gridD, blockD >> >(Phi, Phi0, C0, WX, WY); for (unsigned int i = 0; i < s; i++) reduction00 << < Gp[i], 1024, 1024 * sizeof(double) >> > (arr2[i], Np[i], arr2[i + 1]); swap_one << <gridD, blockD >> > (Phi0, Phi); cudaMemcpy(&psiav_Phi, psiav_array_Phi, sizeof(double), cudaMemcpyDeviceToHost); eps_Phi = abs(psiav_Phi - psiav0_Phi); psiav0_Phi = psiav_Phi; if (k_Phi % 1000 == 0) { cout << "Phi_iter=" << k_Phi << " " << eps_Phi << endl; } } Phi_kk = k_Phi; if (iter % (int)(tt *time_display) == 0 || iter == 1) { cout << "Phi_iter=" << Phi_kk << endl; } Phi_normalization << < gridD, blockD >> > (Phi); WW_from_Phi << < gridD, blockD >> > (WX, WY, Phi, C0); } if (DIFFUSION_h == 1) { //only diffusion if (PHASE_h == 1) { chemical_potential << <gridD, blockD >> > (mu, C); concentration << < gridD, blockD >> > (C, C0, vx, vy, mu); //concentration_surface_energy_wetting << < gridD, blockD >> > (C, C0, vx, vy, mu); } else if (PHASE_h == 0) { concentration << < gridD, blockD >> > (C, C0, vx, vy, C0); } swap_one << <gridD, blockD >> > (C0, C); } else { //flow //1st step, calculating of time evolutionary parts of velocity (quasi-velocity) and concentration and chemical potential { if (PHASE_h == 1) { chemical_potential << <gridD, blockD >> > (mu, C); //chemical_potential_inside << <gridD, blockD >> > (mu, C); //chemical_potential_border << <gridD, blockD >> > (mu, C); //quasi_velocity_upstream << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu); switch (vibration) { case 0: quasi_velocity << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu); break; case 1: quasi_velocity_pulsation_with_Phi << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu, timeq, Phi, WX, WY); break; case 2: quasi_velocity_pulsation << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu, timeq); break; case 3: quasi_velocity << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu); break; default: break; } switch (wetting) { case 0: //as it is concentration << < gridD, blockD >> > (C, C0, vx, vy, mu); //concentration_upstream << < gridD, blockD >> > (C, C0, vx, vy, mu); break; case 1: //const initial concentration at walls, which is not washed out concentration_no_wetting << < gridD, blockD >> > (C, C0, vx, vy, mu); break; case 2: //ongoing concentration devours initial one concentration_wetting << < gridD, blockD >> > (C, C0, vx, vy, mu); break; case 3: //surface energy formulation by Jacqmin // not finished concentration_surface_energy_wetting << < gridD, blockD >> > (C, C0, vx, vy, mu); break; default: break; } } else if (PHASE_h == 0) { chemical_potential_Gr << <gridD, blockD >> > (mu); switch (vibration) { case 0: //as it is quasi_velocity << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu); //quasi_velocity_no_phase_field << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu); break; case 1: quasi_velocity_pulsation_with_Phi << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu, timeq, Phi, WX, WY); break; case 2: quasi_velocity_pulsation << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu, timeq); break; case 3: quasi_velocity << < gridD, blockD >> > (ux, uy, vx, vy, C0, mu); break; default: break; } //if (timeq < 1) concentration << < gridD, blockD >> > (C, C0, vx, vy, C0); //else concentration_no_input_C << < gridD, blockD >> > (C, C0, vx, vy, C0); } } //2nd step, Poisson equation for pressure { eps = 1.0; psiav0 = -1.0; psiav = 0.0; k = 0; //while (eps > eps0*psiav0 || k < 10) //while (eps > eps0*psiav0 ) while ((eps > eps0*psiav0 && k < kk)) { psiav = 0.0; k++; if (vibration == 1) Poisson_pulsation_Phi << <gridD, blockD >> > (p, p0, ux, uy, mu, C, Phi, WX, WY); else if (vibration == 3) Poisson_pulsation << <gridD, blockD >> >(p, p0, ux, uy, mu, C, timeq); else Poisson << <gridD, blockD >> > (p, p0, ux, uy, mu, C); for (unsigned int i = 0; i < s; i++) reduction00 << < Gp[i], 1024, 1024 * sizeof(double) >> > (arr[i], Np[i], arr[i + 1]); swap_one << <gridD, blockD >> > (p0, p); cudaMemcpy(&psiav, psiav_array, sizeof(double), cudaMemcpyDeviceToHost); eps = abs(psiav - psiav0); psiav0 = psiav; if (k % 1000 == 0) { cout << "p_iter=" << k << endl; } } } kk = k; if (iter % (int)(tt *time_display) == 0 || iter == 1) { cout << "p_iter=" << k << endl; } //3rd step, velocity correction and swapping field values velocity_correction << <gridD, blockD >> > (vx, vy, ux, uy, p); swap_3 << <gridD, blockD >> > (ux, vx, uy, vy, C0, C); } //4th step, printing results, writing data and whatever you want if (iter % (int)(tt *time_display) == 0 || iter == 1) { cout << setprecision(15) << endl; cout << fixed << endl; cudaMemcpy(vx_h, vx, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(vy_h, vy, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(p_h, p, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(C_h, C, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(mu_h, mu, size_b, cudaMemcpyDeviceToHost); if (vibration == 1) { WW_from_Phi << <gridD, blockD >> > (WX, WY, Phi, C); cudaMemcpy(Phi_h, Phi, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(WX_h, WX, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(WY_h, WY, size_b, cudaMemcpyDeviceToHost); } copied = true; true_pressure(p_h, p_true_h, C_h, mu_h, Geom.t, Geom.n1, Geom.n2, Geom.n3, Geom.n4, Geom.J_back, tau_h, Geom.TOTAL_SIZE, hx_h, hy_h, Ca_h, A_h, Gr_h, MM_h, Geom.OFFSET, sinA_h, cosA_h, PHASE_h, VV_h, vibr_X_h, vibr_Y_h, Phi_h, WX_h, WY_h); double len, ten, vol, width, p_plusAv, p_minusAv, p_Av, vx_plusAv, vx_minusAv, vx_Av; velocity(size_l, hx_h, hy_h, vx_h, vy_h, Ek, Vmax); VFR(vx_h, Geom.t, size_l, hy_h, Q_in, Q_out, C_h, C_average, Cv); C_statistics(Geom.TOTAL_SIZE, hx_h, hy_h, Geom.t, C_h, C_av, C_plus, C_minus); len = Geom.isoline(hx_h, hy_h, C_h, mark, fx, fy, 0.0); ten = Ca_h / len / MM_h * Geom.tension(hx_h, hy_h, C_h); if (!std::isfinite(ten)) ten = 0; //if (ten != ten) ten = 0; vol = Geom.volume(hx_h, hy_h, C_h, 0.2); width = vol / len; if (!std::isfinite(width)) width = 0; //if (abs(width) > 100000) width = 0; Geom.X_averaged_in_each_phase(hx_h, hy_h, C_h, p_true_h, p_plusAv, p_minusAv, p_Av, 0.05); Geom.X_averaged_in_each_phase(hx_h, hy_h, C_h, vx_h, vx_plusAv, vx_minusAv, vx_Av); //Display timer cout << "t= " << tau_h*iter << endl; cout << "Vmax= " << Vmax << endl; cout << "Ek= " << Ek << endl; cout << "dEk= " << (Ek - Ek_old) << endl; cout << "p_iter=" << k << endl; cout << "Q_in=" << Q_in << endl; cout << "Q_out=" << Q_out << endl; cout << "Vx_max=" << maxval(vx_h, size_l) << endl; cout << "C_max=" << maxval(C_h, size_l) << endl; cout << "p_max=" << maxval(p_h, size_l) << endl; cout << "C_av=" << C_av << endl; cout << "C_plus=" << C_plus << endl; cout << "C_minus=" << C_minus << endl; //Integrals if (iter == 1) { integrals << "t, Ek, Vmax, time(min), dEk, Q_in, Q_out, C_average, Q_per_cap, Q_per_width" << ", Cv_per_cap, Cv_per_width, C_av, C_plus, C_minus, L, ten, width" << ", p_plusAv, p_minusAv, vx_plusAv, vx_minusAv"; if (integrals_add1) { integrals << ", Xtip, Xwall, Qtip, Qwall, Cap_pres"; } integrals << endl; } integrals << setprecision(20) << fixed; integrals << timeq << " " << Ek << " " << Vmax << " " << (timer2 - timer1) / 60 << " " << abs(Ek - Ek_old) << " " << Q_in << " " << Q_out << " " << C_average / Matrix_Y << " " << Q_out / Matrix_Y << " " << Q_out / Ly_h << " " << Cv / Matrix_Y << " " << Cv / Ly_h << " " << C_av << " " << C_plus << " " << C_minus << " " << len << " " << ten << " " << width << " " << p_plusAv << " " << p_minusAv << " " << vx_plusAv << " " << vx_minusAv; if (integrals_add1) { double x_tip = Geom.change_sign_at_X(hx_h, hy_h, C_h, Geom.nyg / 2); double x_wall = Geom.change_sign_at_X(hx_h, hy_h, C_h, 0); double cap_pres = Geom.pressure_jump(hx_h, hy_h, p_true_h, x_tip, 0.1); double Q_tip = Geom.flow_rate(hx_h, hy_h, vx_h, Ly_h, (unsigned int)(x_tip / hx_h)); double Q_wall = Geom.flow_rate(hx_h, hy_h, vx_h, Ly_h, (unsigned int)(x_wall / hx_h)); integrals << " " << x_tip << " " << x_wall << " " << Q_tip << " " << Q_wall << " " << cap_pres; cout << "x_tip=" << x_tip << endl; } integrals << endl; Ek_old = Ek; if (stop_at_exit == 1) { stop = Geom.checkExit(C_h); if (stop == 1) cout << "stop command is applied" << endl; } } //fields writing if (iter % (int(time_fields * tt)) == 0 || iter == 1 || stop == 1) { if (copied == false) { cudaMemcpy(vx_h, vx, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(vy_h, vy, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(p_h, p, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(C_h, C, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(mu_h, mu, size_b, cudaMemcpyDeviceToHost); if (vibration == 1) { WW_from_Phi << <gridD, blockD >> > (WX, WY, Phi, C); cudaMemcpy(Phi_h, Phi, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(WX_h, WX, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(WY_h, WY, size_b, cudaMemcpyDeviceToHost); } true_pressure(p_h, p_true_h, C_h, mu_h, Geom.t, Geom.n1, Geom.n2, Geom.n3, Geom.n4, Geom.J_back, tau_h, Geom.TOTAL_SIZE, hx_h, hy_h, Ca_h, A_h, Gr_h, MM_h, Geom.OFFSET, sinA_h, cosA_h, PHASE_h, VV_h, vibr_X_h, vibr_Y_h, Phi_h, WX_h, WY_h); copied = true; } write_i++; stringstream ss; string file_name; ss.str(""); ss.clear(); ss << write_i; file_name = ss.str(); Geom.write_field(C_h, file_name, timeq, each); Geom.write_field(p_true_h, "true_p_" + file_name, timeq, each); Geom.write_field(mu_h, "mu_" + file_name, timeq, each); if (curv_calc) { Geom.curvature_direct(C_h, hx_h, hy_h, curv1, 0.1); Geom.curvature_direct(C_h, hx_h, hy_h, curv2, 0.001); //Geom.curvature_2_steps(C_h, nx_dC, ny_dC, hx_h, hy_h, curv2); Geom.write_field(curv1, "curv_" + file_name, timeq, each); Geom.write_field(curv2, "curv2_" + file_name, timeq, each); } if (vibration == 1) { Geom.write_field(Phi_h, "Phi_" + file_name, timeq, each); Geom.write_field(WX_h, "WX_" + file_name, timeq, each); Geom.write_field(WY_h, "WY_" + file_name, timeq, each); if (test_output_switch) test_output << timeq << " " << MAXval(Phi_h, size_l) << " " << MINval(Phi_h, size_l) << " " << Phi_h[0 + Geom.OFFSET*Geom.nyg] - Phi_h[Geom.nxg + Geom.OFFSET*Geom.nyg] << endl; } if (horizontal_profile) { double *var[20]; string head = "i x C P_true P Mu vx vy"; var[0] = C_h; var[1] = p_true_h; var[2] = p_h; var[3] = mu_h; var[4] = vx_h; var[5] = vy_h; int n = 6; if (curv_calc) { head.append(" curv1 curv2"); var[n] = curv1; n++; var[n] = curv2; n++; } if (vibration == 1) { head.append(" Phi"); head.append(" WX"); head.append(" WY"); var[n] = Phi_h; n++; var[n] = WX_h; n++; var[n] = WY_h; n++; } Geom.write_linear_profile(file_name, head, timeq, 1, hx_h, var, n); Geom.write_linear_profile(file_name + "_bot", head, timeq, 1, hx_h, var, n, 0); Geom.write_linear_profile(file_name + "_top", head, timeq, 1, hx_h, var, n, Geom.nyg); } if (vertical_profile && integrals_add1) { double *var[20]; string head = "j y C P_true P Mu vx vy"; var[0] = C_h; var[1] = p_true_h; var[2] = p_h; var[3] = mu_h; var[4] = vx_h; var[5] = vy_h; int n = 6; double x_tip = Geom.change_sign_at_X(hx_h, hy_h, C_h, Geom.nyg / 2); double x_wall = Geom.change_sign_at_X(hx_h, hy_h, C_h, 0); if (vibration == 1) { head.append(" Phi"); head.append(" WX"); head.append(" WY"); var[n] = Phi_h; n++; var[n] = WX_h; n++; var[n] = WY_h; n++; } Geom.write_section_profile(file_name + "_tip", head, timeq, 1, hy_h, var, n, (unsigned int)(x_tip / hx_h)); Geom.write_section_profile(file_name + "_wall", head, timeq, 1, hy_h, var, n, (unsigned int)(x_wall / hx_h)); Geom.write_section_profile(file_name + "_end", head, timeq, 1, hy_h, var, n, Geom.nxg - 1); Geom.write_section_profile(file_name + "_start", head, timeq, 1, hy_h, var, n, 0); //Geom.write_section_profile(file_name + "_j10", head, timeq, 1, hy_h, var, n, 10); } //Geom.write_field(mu_h, "mu" + file_name, timeq, each); } //fields writting for tecplot if (tecplot != 0 && (iter % (int(time_fields * tt)) == 0 || iter == 1 || stop == 1)) { if (copied == false) { cudaMemcpy(vx_h, vx, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(vy_h, vy, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(p_h, p, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(C_h, C, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(mu_h, mu, size_b, cudaMemcpyDeviceToHost); if (vibration == 1) { WW_from_Phi << <gridD, blockD >> > (WX, WY, Phi, C); cudaMemcpy(Phi_h, Phi, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(WX_h, WX, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(WY_h, WY, size_b, cudaMemcpyDeviceToHost); } true_pressure(p_h, p_true_h, C_h, mu_h, Geom.t, Geom.n1, Geom.n2, Geom.n3, Geom.n4, Geom.J_back, tau_h, Geom.TOTAL_SIZE, hx_h, hy_h, Ca_h, A_h, Gr_h, MM_h, Geom.OFFSET, sinA_h, cosA_h, PHASE_h, VV_h, vibr_X_h, vibr_Y_h, Phi_h, WX_h, WY_h); copied = true; } double *var[10]; int n = 0; string head = "VARIABLES=\"x\",\"y\",\"C\",\"p\",\"mu\",\"vx\",\"vy\""; var[n] = C_h; n++; var[n] = p_true_h; n++; var[n] = mu_h; n++; var[n] = vx_h; n++; var[n] = vy_h; n++; if (vibration == 1) { head.append(",\"WX\",\"WY\",\"Phi\""); var[n] = WX_h; n++; var[n] = WY_h; n++; var[n] = Phi_h; n++; } Geom.write_field_tecplot(tecplot, hx_h, hy_h, "fields", timeq, each, iter, var, n, head); } //recovery fields writing if (iter % (int)(tt*time_recovery) == 0 || timeq > limit_timeq || stop == 1) { if (copied == false) { cudaMemcpy(vx_h, vx, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(vy_h, vy, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(p_h, p, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(C_h, C, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(mu_h, mu, size_b, cudaMemcpyDeviceToHost); if (vibration == 1) { WW_from_Phi << <gridD, blockD >> > (WX, WY, Phi, C); cudaMemcpy(Phi_h, Phi, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(WX_h, WX, size_b, cudaMemcpyDeviceToHost); cudaMemcpy(WY_h, WY, size_b, cudaMemcpyDeviceToHost); } copied = true; } //Geom.save(vx_h, vy_h, p_h, C_h, mu_h, iter, write_i, timeq, kk); double * var[10]; unsigned int n = 0; var[n] = vx_h; n++; var[n] = vy_h; n++; var[n] = p_h; n++; var[n] = C_h; n++; var[n] = mu_h; n++; if (vibration == 1) { var[n] = Phi_h; n++; } Geom.save(var, n, iter, write_i, timeq, kk); } copied = false; // the end of 4th step if (timeq > limit_timeq || stop == 1) return 0; } //the end of the main time loop }
c247e812f982359a12ebb0ffa342f8159dc016c0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #include "../../benchmark.h" #ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE RD_WG_SIZE #else #define BLOCK_SIZE 16 #endif #define STR_SIZE 256 /* maximum power density possible (say 300W for a 10mm x 10mm chip) */ #define MAX_PD (3.0e6) /* required precision in degrees */ #define PRECISION 0.001 #define SPEC_HEAT_SI 1.75e6 #define K_SI 100 /* capacitance fitting factor */ #define FACTOR_CHIP 0.5 /* chip parameters */ float t_chip = 0.0005; float chip_height = 0.016; float chip_width = 0.016; /* ambient temperature, assuming no package at all */ float amb_temp = 80.0; void run(int argc, char** argv); /* define timer macros */ #define pin_stats_reset() startCycle() #define pin_stats_pause(cycles) stopCycle(cycles) #define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles) void fatal(const char *s) { fprintf(stderr, "error: %s\n", s); } void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){ int i,j, index=0; FILE *fp; char str[STR_SIZE]; if( (fp = fopen(file, "w" )) == 0 ) printf( "The file was not opened\n" ); for (i=0; i < grid_rows; i++) for (j=0; j < grid_cols; j++) { sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]); fputs(str,fp); index++; } fclose(fp); } void readinput(float *vect, int grid_rows, int grid_cols, char *file){ int i,j; FILE *fp; char str[STR_SIZE]; float val; if( (fp = fopen(file, "r" )) ==0 ) printf( "The file was not opened\n" ); for (i=0; i <= grid_rows-1; i++) for (j=0; j <= grid_cols-1; j++) { fgets(str, STR_SIZE, fp); if (feof(fp)) fatal("not enough lines in file"); //if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1))) if ((sscanf(str, "%f", &val) != 1)) fatal("invalid file format"); vect[i*grid_cols+j] = val; } fclose(fp); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void calculate_temp(int iteration, //number of iteration float *power, //power input float *temp_src, //temperature input/output float *temp_dst, //temperature input/output int grid_cols, //Col of grid int grid_rows, //Row of grid int border_cols, // border offset int border_rows, // border offset float Cap, //Capacitance float Rx, float Ry, float Rz, float step, float time_elapsed){ __shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result float amb_temp = 80.0; float step_div_Cap; float Rx_1,Ry_1,Rz_1; int bx = blockIdx.x; int by = blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; step_div_Cap=step/Cap; Rx_1=1/Rx; Ry_1=1/Ry; Rz_1=1/Rz; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE // calculate the boundary for the block according to // the boundary of its small block int blkY = small_block_rows*by-border_rows; int blkX = small_block_cols*bx-border_cols; int blkYmax = blkY+BLOCK_SIZE-1; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int yidx = blkY+ty; int xidx = blkX+tx; // load data if it is within the valid input range int loadYidx=yidx, loadXidx=xidx; int index = grid_cols*loadYidx+loadXidx; if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){ temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory } __syncthreads(); // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validYmin = (blkY < 0) ? -blkY : 0; int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1; int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1; int N = ty-1; int S = ty+1; int W = tx-1; int E = tx+1; N = (N < validYmin) ? validYmin : N; S = (S > validYmax) ? validYmax : S; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(tx, validXmin, validXmax) && \ IN_RANGE(ty, validYmin, validYmax) ) { computed = true; temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] + (temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 + (temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 + (amb_temp - temp_on_cuda[ty][tx]) * Rz_1); } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range temp_on_cuda[ty][tx]= temp_t[ty][tx]; __syncthreads(); } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ temp_dst[index]= temp_t[ty][tx]; } } /* compute N time steps */ int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \ int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(blockCols, blockRows); float grid_height = chip_height / row; float grid_width = chip_width / col; float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height; float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height); float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width); float Rz = t_chip / (K_SI * grid_height * grid_width); float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI); float step = PRECISION / max_slope; float t; float time_elapsed; time_elapsed=0.001; int src = 1, dst = 0; int i =0; for (t = 0; t < total_iterations; t+=num_iterations) { int temp = src; src = dst; dst = temp; i++; hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst], col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed); printf("launched t=%f, i=%i;\n", t, i); } return dst; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]); fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n"); fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n"); fprintf(stderr, "\t<sim_time> - number of iterations\n"); fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n"); fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n"); fprintf(stderr, "\t<output_file> - name of the output file\n"); exit(1); } int main(int argc, char** argv) { // printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE); run(argc,argv); return EXIT_SUCCESS; } void run(int argc, char** argv) { int size; int grid_rows,grid_cols; float *FilesavingTemp,*FilesavingPower,*MatrixOut; char *tfile, *pfile; int total_iterations = 60; int pyramid_height = 1; // number of iterations if (argc != 7) usage(argc, argv); if((grid_rows = atoi(argv[1]))<=0|| (grid_cols = atoi(argv[1]))<=0|| (pyramid_height = atoi(argv[2]))<=0|| (total_iterations = atoi(argv[3]))<=0) usage(argc, argv); tfile=argv[4]; pfile=argv[5]; size=grid_rows*grid_cols; /* --------------- pyramid parameters --------------- */ # define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline int borderCols = (pyramid_height)*EXPAND_RATE/2; int borderRows = (pyramid_height)*EXPAND_RATE/2; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1); int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1); FilesavingTemp = (float *) malloc(size*sizeof(float)); FilesavingPower = (float *) malloc(size*sizeof(float)); MatrixOut = (float *) calloc (size, sizeof(float)); if( !FilesavingPower || !FilesavingTemp || !MatrixOut) fatal("unable to allocate memory"); //printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\ pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow); readinput(FilesavingTemp, grid_rows, grid_cols, tfile); readinput(FilesavingPower, grid_rows, grid_cols, pfile); BENCHMARK.start_total(); float *MatrixTemp[2], *MatrixPower; hipMalloc((void**)&MatrixTemp[0], sizeof(float)*size); hipMalloc((void**)&MatrixTemp[1], sizeof(float)*size); hipMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, hipMemcpyHostToDevice); hipMalloc((void**)&MatrixPower, sizeof(float)*size); hipMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, hipMemcpyHostToDevice); //printf("Start computing the transient temperature\n"); BENCHMARK.start_kernel(); int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \ total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows); BENCHMARK.end_kernel(); //printf("Ending simulation\n"); hipMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, hipMemcpyDeviceToHost); BENCHMARK.end_total(); hipFree(MatrixPower); hipFree(MatrixTemp[0]); hipFree(MatrixTemp[1]); free(MatrixOut); }
c247e812f982359a12ebb0ffa342f8159dc016c0.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #include "../../benchmark.h" #ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE RD_WG_SIZE #else #define BLOCK_SIZE 16 #endif #define STR_SIZE 256 /* maximum power density possible (say 300W for a 10mm x 10mm chip) */ #define MAX_PD (3.0e6) /* required precision in degrees */ #define PRECISION 0.001 #define SPEC_HEAT_SI 1.75e6 #define K_SI 100 /* capacitance fitting factor */ #define FACTOR_CHIP 0.5 /* chip parameters */ float t_chip = 0.0005; float chip_height = 0.016; float chip_width = 0.016; /* ambient temperature, assuming no package at all */ float amb_temp = 80.0; void run(int argc, char** argv); /* define timer macros */ #define pin_stats_reset() startCycle() #define pin_stats_pause(cycles) stopCycle(cycles) #define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles) void fatal(const char *s) { fprintf(stderr, "error: %s\n", s); } void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){ int i,j, index=0; FILE *fp; char str[STR_SIZE]; if( (fp = fopen(file, "w" )) == 0 ) printf( "The file was not opened\n" ); for (i=0; i < grid_rows; i++) for (j=0; j < grid_cols; j++) { sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]); fputs(str,fp); index++; } fclose(fp); } void readinput(float *vect, int grid_rows, int grid_cols, char *file){ int i,j; FILE *fp; char str[STR_SIZE]; float val; if( (fp = fopen(file, "r" )) ==0 ) printf( "The file was not opened\n" ); for (i=0; i <= grid_rows-1; i++) for (j=0; j <= grid_cols-1; j++) { fgets(str, STR_SIZE, fp); if (feof(fp)) fatal("not enough lines in file"); //if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1))) if ((sscanf(str, "%f", &val) != 1)) fatal("invalid file format"); vect[i*grid_cols+j] = val; } fclose(fp); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void calculate_temp(int iteration, //number of iteration float *power, //power input float *temp_src, //temperature input/output float *temp_dst, //temperature input/output int grid_cols, //Col of grid int grid_rows, //Row of grid int border_cols, // border offset int border_rows, // border offset float Cap, //Capacitance float Rx, float Ry, float Rz, float step, float time_elapsed){ __shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result float amb_temp = 80.0; float step_div_Cap; float Rx_1,Ry_1,Rz_1; int bx = blockIdx.x; int by = blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; step_div_Cap=step/Cap; Rx_1=1/Rx; Ry_1=1/Ry; Rz_1=1/Rz; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE // calculate the boundary for the block according to // the boundary of its small block int blkY = small_block_rows*by-border_rows; int blkX = small_block_cols*bx-border_cols; int blkYmax = blkY+BLOCK_SIZE-1; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int yidx = blkY+ty; int xidx = blkX+tx; // load data if it is within the valid input range int loadYidx=yidx, loadXidx=xidx; int index = grid_cols*loadYidx+loadXidx; if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){ temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory } __syncthreads(); // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validYmin = (blkY < 0) ? -blkY : 0; int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1; int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1; int N = ty-1; int S = ty+1; int W = tx-1; int E = tx+1; N = (N < validYmin) ? validYmin : N; S = (S > validYmax) ? validYmax : S; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \ IN_RANGE(tx, validXmin, validXmax) && \ IN_RANGE(ty, validYmin, validYmax) ) { computed = true; temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] + (temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 + (temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 + (amb_temp - temp_on_cuda[ty][tx]) * Rz_1); } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range temp_on_cuda[ty][tx]= temp_t[ty][tx]; __syncthreads(); } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ temp_dst[index]= temp_t[ty][tx]; } } /* compute N time steps */ int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \ int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(blockCols, blockRows); float grid_height = chip_height / row; float grid_width = chip_width / col; float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height; float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height); float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width); float Rz = t_chip / (K_SI * grid_height * grid_width); float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI); float step = PRECISION / max_slope; float t; float time_elapsed; time_elapsed=0.001; int src = 1, dst = 0; int i =0; for (t = 0; t < total_iterations; t+=num_iterations) { int temp = src; src = dst; dst = temp; i++; calculate_temp<<<dimGrid, dimBlock>>>(MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst], col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed); printf("launched t=%f, i=%i;\n", t, i); } return dst; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]); fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n"); fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n"); fprintf(stderr, "\t<sim_time> - number of iterations\n"); fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n"); fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n"); fprintf(stderr, "\t<output_file> - name of the output file\n"); exit(1); } int main(int argc, char** argv) { // printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE); run(argc,argv); return EXIT_SUCCESS; } void run(int argc, char** argv) { int size; int grid_rows,grid_cols; float *FilesavingTemp,*FilesavingPower,*MatrixOut; char *tfile, *pfile; int total_iterations = 60; int pyramid_height = 1; // number of iterations if (argc != 7) usage(argc, argv); if((grid_rows = atoi(argv[1]))<=0|| (grid_cols = atoi(argv[1]))<=0|| (pyramid_height = atoi(argv[2]))<=0|| (total_iterations = atoi(argv[3]))<=0) usage(argc, argv); tfile=argv[4]; pfile=argv[5]; size=grid_rows*grid_cols; /* --------------- pyramid parameters --------------- */ # define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline int borderCols = (pyramid_height)*EXPAND_RATE/2; int borderRows = (pyramid_height)*EXPAND_RATE/2; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE; int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1); int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1); FilesavingTemp = (float *) malloc(size*sizeof(float)); FilesavingPower = (float *) malloc(size*sizeof(float)); MatrixOut = (float *) calloc (size, sizeof(float)); if( !FilesavingPower || !FilesavingTemp || !MatrixOut) fatal("unable to allocate memory"); //printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\ pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow); readinput(FilesavingTemp, grid_rows, grid_cols, tfile); readinput(FilesavingPower, grid_rows, grid_cols, pfile); BENCHMARK.start_total(); float *MatrixTemp[2], *MatrixPower; cudaMalloc((void**)&MatrixTemp[0], sizeof(float)*size); cudaMalloc((void**)&MatrixTemp[1], sizeof(float)*size); cudaMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, cudaMemcpyHostToDevice); cudaMalloc((void**)&MatrixPower, sizeof(float)*size); cudaMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, cudaMemcpyHostToDevice); //printf("Start computing the transient temperature\n"); BENCHMARK.start_kernel(); int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \ total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows); BENCHMARK.end_kernel(); //printf("Ending simulation\n"); cudaMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, cudaMemcpyDeviceToHost); BENCHMARK.end_total(); cudaFree(MatrixPower); cudaFree(MatrixTemp[0]); cudaFree(MatrixTemp[1]); free(MatrixOut); }
6f8ae46f79a468582ddf57b0e75a352616c0ecd4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @generated from ziterilu.cu normal z -> s, Sun May 3 11:22:58 2015 */ #include "common_magmasparse.h" #define PRECISION_s __global__ void magma_siterilu_csr_kernel( magma_int_t num_rows, magma_int_t nnz, magma_index_t *rowidxA, magma_index_t *colidxA, const float * __restrict__ A, magma_index_t *rowptrL, magma_index_t *colidxL, float *valL, magma_index_t *rowptrU, magma_index_t *rowidxU, float *valU ){ int i, j; int k = blockDim.x * blockIdx.x + threadIdx.x; float zero = MAGMA_S_MAKE(0.0, 0.0); float s, sp; int il, iu, jl, ju; if (k < nnz) { i = rowidxA[k]; j = colidxA[k]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) s = __ldg( A+k ); #else s = A[k]; #endif il = rowptrL[i]; iu = rowptrU[j]; while (il < rowptrL[i+1] && iu < rowptrU[j+1]) { sp = zero; jl = colidxL[il]; ju = rowidxU[iu]; // avoid branching sp = ( jl == ju ) ? valL[il] * valU[iu] : sp; s = ( jl == ju ) ? s-sp : s; il = ( jl <= ju ) ? il+1 : il; iu = ( jl >= ju ) ? iu+1 : iu; } // undo the last operation (it must be the last) s += sp; __syncthreads(); if ( i>j ) // modify l entry valL[il-1] = s / valU[rowptrU[j+1]-1]; else{ // modify u entry valU[iu-1] = s; } } }// kernel /** Purpose ------- This routine iteratively computes an incomplete LU factorization. The idea is according to Edmond Chow's presentation at SIAM 2014. This routine was used in the ISC 2015 paper: E. Chow et al.: 'Study of an Asynchronous Iterative Algorithm for Computing Incomplete Factorizations on GPUs' The input format of the matrix is Magma_CSRCOO for the upper and lower triangular parts. Note however, that we flip col and rowidx for the U-part. Every component of L and U is handled by one thread. Arguments --------- @param[in] A magma_s_matrix input matrix A determing initial guess & processing order @param[in][out] L magma_s_matrix input/output matrix L containing the ILU approximation @param[in][out] U magma_s_matrix input/output matrix U containing the ILU approximation @param[in] A_CSR magma_s_matrix input/output matrix containing the IC approximation @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_siterilu_csr( magma_s_matrix A, magma_s_matrix L, magma_s_matrix U, magma_queue_t queue ){ int blocksize1 = 128; int blocksize2 = 1; int dimgrid1 = ( A.nnz + blocksize1 -1 ) / blocksize1; int dimgrid2 = 1; int dimgrid3 = 1; // Runtime API // hipFuncCachePreferShared: shared memory is 48 KB // hipFuncCachePreferEqual: shared memory is 32 KB // hipFuncCachePreferL1: shared memory is 16 KB // hipFuncCachePreferNone: no preference //hipFuncSetCacheConfig(hipFuncCachePreferShared); hipDeviceSetCacheConfig( hipFuncCachePreferL1 ); dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_siterilu_csr_kernel), dim3(grid), dim3(block), 0, magma_stream , A.num_rows, A.nnz, A.rowidx, A.col, A.val, L.row, L.col, L.val, U.row, U.col, U.val ); return MAGMA_SUCCESS; }
6f8ae46f79a468582ddf57b0e75a352616c0ecd4.cu
/* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @generated from ziterilu.cu normal z -> s, Sun May 3 11:22:58 2015 */ #include "common_magmasparse.h" #define PRECISION_s __global__ void magma_siterilu_csr_kernel( magma_int_t num_rows, magma_int_t nnz, magma_index_t *rowidxA, magma_index_t *colidxA, const float * __restrict__ A, magma_index_t *rowptrL, magma_index_t *colidxL, float *valL, magma_index_t *rowptrU, magma_index_t *rowidxU, float *valU ){ int i, j; int k = blockDim.x * blockIdx.x + threadIdx.x; float zero = MAGMA_S_MAKE(0.0, 0.0); float s, sp; int il, iu, jl, ju; if (k < nnz) { i = rowidxA[k]; j = colidxA[k]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) s = __ldg( A+k ); #else s = A[k]; #endif il = rowptrL[i]; iu = rowptrU[j]; while (il < rowptrL[i+1] && iu < rowptrU[j+1]) { sp = zero; jl = colidxL[il]; ju = rowidxU[iu]; // avoid branching sp = ( jl == ju ) ? valL[il] * valU[iu] : sp; s = ( jl == ju ) ? s-sp : s; il = ( jl <= ju ) ? il+1 : il; iu = ( jl >= ju ) ? iu+1 : iu; } // undo the last operation (it must be the last) s += sp; __syncthreads(); if ( i>j ) // modify l entry valL[il-1] = s / valU[rowptrU[j+1]-1]; else{ // modify u entry valU[iu-1] = s; } } }// kernel /** Purpose ------- This routine iteratively computes an incomplete LU factorization. The idea is according to Edmond Chow's presentation at SIAM 2014. This routine was used in the ISC 2015 paper: E. Chow et al.: 'Study of an Asynchronous Iterative Algorithm for Computing Incomplete Factorizations on GPUs' The input format of the matrix is Magma_CSRCOO for the upper and lower triangular parts. Note however, that we flip col and rowidx for the U-part. Every component of L and U is handled by one thread. Arguments --------- @param[in] A magma_s_matrix input matrix A determing initial guess & processing order @param[in][out] L magma_s_matrix input/output matrix L containing the ILU approximation @param[in][out] U magma_s_matrix input/output matrix U containing the ILU approximation @param[in] A_CSR magma_s_matrix input/output matrix containing the IC approximation @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_siterilu_csr( magma_s_matrix A, magma_s_matrix L, magma_s_matrix U, magma_queue_t queue ){ int blocksize1 = 128; int blocksize2 = 1; int dimgrid1 = ( A.nnz + blocksize1 -1 ) / blocksize1; int dimgrid2 = 1; int dimgrid3 = 1; // Runtime API // cudaFuncCachePreferShared: shared memory is 48 KB // cudaFuncCachePreferEqual: shared memory is 32 KB // cudaFuncCachePreferL1: shared memory is 16 KB // cudaFuncCachePreferNone: no preference //cudaFuncSetCacheConfig(cudaFuncCachePreferShared); cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ); dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_siterilu_csr_kernel<<< grid, block, 0, magma_stream >>> ( A.num_rows, A.nnz, A.rowidx, A.col, A.val, L.row, L.col, L.val, U.row, U.col, U.val ); return MAGMA_SUCCESS; }
8101fb8a4cd1499db99b312531f71a33289e4230.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <string> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <cmath> #include <fstream> #include <sstream> #define DIM 32 const float PI = 3.14159265358979f; using namespace std; /*************************************************/ class Complex { public: __host__ __device__ Complex() : real(0.0f), imag(0.0f) {} __host__ __device__ Complex(float r) : real(r), imag(0.0f) {} __host__ __device__ Complex(float r, float i) : real(r), imag(i) {} __host__ __device__ Complex operator+(const Complex &b) const { float newReal = real + b.real; float newImag = imag + b.imag; Complex newComplex(newReal, newImag); return newComplex; } __host__ __device__ Complex operator-(const Complex &b) const { float newReal = real - b.real; float newImag = imag - b.imag; Complex newComplex(newReal, newImag); return newComplex; } __host__ __device__ Complex operator*(const Complex &b) const { float newReal = real * b.real - imag * b.imag; float newImag = real * b.imag + imag * b.real; Complex newComplex(newReal, newImag); return newComplex; } __host__ __device__ Complex mag() const { float magNum = sqrt(real * real + imag * imag); Complex magComplex(magNum); return magComplex; } __host__ __device__ Complex angle() const { float angle = atan(1.0 * imag / real)*180/PI; Complex angleComplex(angle); return angleComplex; } __host__ __device__ Complex conj() const { Complex newComplex(real, -1.0 * imag); return newComplex; } float real; float imag; }; std::ostream& operator<< (std::ostream& os, const Complex& rhs) { Complex c(rhs); if(fabsf(rhs.imag) < 1e-10) c.imag = 0.0f; if(fabsf(rhs.real) < 1e-10) c.real = 0.0f; if(c.imag == 0) { os << c.real; } else { os << "(" << c.real << "," << c.imag << ")"; } return os; } class InputImage { public: InputImage(const char* filename){ std::ifstream ifs(filename); if(!ifs) { std::cout << "Can't open image file " << filename << std::endl; exit(1); } ifs >> w >> h; data = new Complex[w * h]; for(int r = 0; r < h; ++r) { for(int c = 0; c < w; ++c) { // float real; // ifs >> real; // data[r * w + c] = Complex(real); string word; ifs >> word; int found = word.find_first_not_of(" \t"); if (word[found] == '(') { istringstream iss(word); char temp; float real, imag; iss >> temp >> real >> temp >> imag >> temp; data[r * w + c] = Complex(real, imag); } else { istringstream iss(word); float real; iss >> real; data[r * w + c] = Complex(real); } } } } int get_width() const{ return w; } int get_height() const{ return h; } //returns a pointer to the image data. Note the return is a 1D //array which represents a 2D image. The data for row 1 is //immediately following the data for row 0 in the 1D array Complex* get_image_data() const{ return data; } //use this to save output from forward DFT void save_image_data(const char* filename, Complex* d, int w, int h){ std::ofstream ofs(filename); if(!ofs) { std::cout << "Can't create output image " << filename << std::endl; return; } ofs << w << " " << h << std::endl; for(int r = 0; r < h; ++r) { for(int c = 0; c < w; ++c) { ofs << d[r * w + c] << " "; } ofs << std::endl; } } //use this to save output from reverse DFT void save_image_data_real(const char* filename, Complex* d, int w, int h){ std::ofstream ofs(filename); if(!ofs) { std::cout << "Can't create output image " << filename << std::endl; return; } ofs << w << " " << h << std::endl; for (int r = 0; r < h; ++r) { for (int c = 0; c < w; ++c) { ofs << d[r * w + c].real << " "; } ofs << std::endl; } } private: int w; int h; Complex* data; }; /*************************************************/ struct DeviceData { Complex *d_data; Complex *d_temp; Complex *d_res; }; void cleanup(DeviceData *d) { hipFree(d->d_data); hipFree(d->d_temp); hipFree(d->d_res); } /*************** forward transform by row **********************/ __global__ void transByRow (Complex* dst, Complex* src, int width, int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int index = x + y * width; if (x < width && y < height) { for (int i = 0; i < width; i++) { float re = (src + y*width + i)->real; float im = (src + y*width + i)->imag; Complex w = Complex(cos(2*PI*i*x/width), -sin(2*PI*i*x/width)); (dst + index)->real += re * w.real - im*w.imag; (dst + index)->imag += re * w.imag + im*w.real; } } } /*************** forward transform by column **********************/ __global__ void transByCol (Complex* dst, Complex* src, int width, int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int index = x + y * width; if (x < width && y < height) { for (int i = 0; i < height; i++) { float re = (src + x + i*width)->real; float im = (src + x + i*width)->imag; Complex w = Complex(cos(2*PI*i*y/height), -sin(2*PI*i*y/height)); (dst + index)->real += re * w.real - im*w.imag; (dst + index)->imag += re * w.imag + im*w.real; } } } /*************** reverse transform by row **********************/ __global__ void revByRow (Complex* dst, Complex* src, int width, int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int index = x + y * width; if (x < width && y < height) { for (int i = 0; i < width; i++) { float re = (src + y*width + i)->real; float im = (src + y*width + i)->imag; Complex w = Complex(cos(2*PI*i*x/width), sin(2*PI*i*x/width)); (dst + index)->real += (re * w.real - im*w.imag)/width; (dst + index)->imag += (re * w.imag + im*w.real)/width; } } } /*************** reverse transform by column **********************/ __global__ void revByCol (Complex* dst, Complex* src, int width, int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int index = x + y * width; if (x < width && y < height) { for (int i = 0; i < height; i++) { float re = (src + x + i*width)->real; float im = (src + x + i*width)->imag; Complex w = Complex(cos(2*PI*i*y/height), sin(2*PI*i*y/height)); (dst + index)->real += (re * w.real - im*w.imag)/height; (dst + index)->imag += (re * w.imag + im*w.real)/height; } } } int main (int argc, char* argv[]) { DeviceData devs; string str = "forward"; bool forward = (strcmp(argv[1], str.c_str()) == 0 ); char* inFile = argv[2]; char* outFile = argv[3]; InputImage image(inFile); int height = image.get_height(); int width = image.get_width(); int N = height * width; Complex res[N]; fill_n(res, N, 1); Complex* data = image.get_image_data(); hipMalloc((void**)&devs.d_data, N * sizeof(Complex)); hipMalloc((void**)&devs.d_res, N * sizeof(Complex)); hipMalloc((void**)&devs.d_temp, N * sizeof(Complex)); hipMemcpy(devs.d_data, data, N * sizeof(Complex), hipMemcpyHostToDevice); dim3 blocks((width + DIM - 1) / DIM, (height + DIM - 1) / DIM); dim3 threads(DIM, DIM); cout << width << ", " << height << forward << endl; if (forward) { hipLaunchKernelGGL(( transByRow), dim3(blocks), dim3(threads), 0, 0, devs.d_temp, devs.d_data, width, height); hipLaunchKernelGGL(( transByCol), dim3(blocks), dim3(threads), 0, 0, devs.d_res, devs.d_temp, width, height); hipMemcpy(res, devs.d_res, N*sizeof(Complex), hipMemcpyDeviceToHost); image.save_image_data(outFile, res, width, height); } else { hipLaunchKernelGGL(( revByRow), dim3(blocks), dim3(threads), 0, 0, devs.d_temp, devs.d_data, width, height); hipLaunchKernelGGL(( revByCol), dim3(blocks), dim3(threads), 0, 0, devs.d_res, devs.d_temp, width, height); hipMemcpy(res, devs.d_res, N*sizeof(Complex), hipMemcpyDeviceToHost); image.save_image_data_real(outFile, res, width, height); } cleanup(&devs); return 0; }
8101fb8a4cd1499db99b312531f71a33289e4230.cu
#include <stdio.h> #include <iostream> #include <string> #include <stdlib.h> #include <math.h> #include <cuda.h> #include <cmath> #include <fstream> #include <sstream> #define DIM 32 const float PI = 3.14159265358979f; using namespace std; /*************************************************/ class Complex { public: __host__ __device__ Complex() : real(0.0f), imag(0.0f) {} __host__ __device__ Complex(float r) : real(r), imag(0.0f) {} __host__ __device__ Complex(float r, float i) : real(r), imag(i) {} __host__ __device__ Complex operator+(const Complex &b) const { float newReal = real + b.real; float newImag = imag + b.imag; Complex newComplex(newReal, newImag); return newComplex; } __host__ __device__ Complex operator-(const Complex &b) const { float newReal = real - b.real; float newImag = imag - b.imag; Complex newComplex(newReal, newImag); return newComplex; } __host__ __device__ Complex operator*(const Complex &b) const { float newReal = real * b.real - imag * b.imag; float newImag = real * b.imag + imag * b.real; Complex newComplex(newReal, newImag); return newComplex; } __host__ __device__ Complex mag() const { float magNum = sqrt(real * real + imag * imag); Complex magComplex(magNum); return magComplex; } __host__ __device__ Complex angle() const { float angle = atan(1.0 * imag / real)*180/PI; Complex angleComplex(angle); return angleComplex; } __host__ __device__ Complex conj() const { Complex newComplex(real, -1.0 * imag); return newComplex; } float real; float imag; }; std::ostream& operator<< (std::ostream& os, const Complex& rhs) { Complex c(rhs); if(fabsf(rhs.imag) < 1e-10) c.imag = 0.0f; if(fabsf(rhs.real) < 1e-10) c.real = 0.0f; if(c.imag == 0) { os << c.real; } else { os << "(" << c.real << "," << c.imag << ")"; } return os; } class InputImage { public: InputImage(const char* filename){ std::ifstream ifs(filename); if(!ifs) { std::cout << "Can't open image file " << filename << std::endl; exit(1); } ifs >> w >> h; data = new Complex[w * h]; for(int r = 0; r < h; ++r) { for(int c = 0; c < w; ++c) { // float real; // ifs >> real; // data[r * w + c] = Complex(real); string word; ifs >> word; int found = word.find_first_not_of(" \t"); if (word[found] == '(') { istringstream iss(word); char temp; float real, imag; iss >> temp >> real >> temp >> imag >> temp; data[r * w + c] = Complex(real, imag); } else { istringstream iss(word); float real; iss >> real; data[r * w + c] = Complex(real); } } } } int get_width() const{ return w; } int get_height() const{ return h; } //returns a pointer to the image data. Note the return is a 1D //array which represents a 2D image. The data for row 1 is //immediately following the data for row 0 in the 1D array Complex* get_image_data() const{ return data; } //use this to save output from forward DFT void save_image_data(const char* filename, Complex* d, int w, int h){ std::ofstream ofs(filename); if(!ofs) { std::cout << "Can't create output image " << filename << std::endl; return; } ofs << w << " " << h << std::endl; for(int r = 0; r < h; ++r) { for(int c = 0; c < w; ++c) { ofs << d[r * w + c] << " "; } ofs << std::endl; } } //use this to save output from reverse DFT void save_image_data_real(const char* filename, Complex* d, int w, int h){ std::ofstream ofs(filename); if(!ofs) { std::cout << "Can't create output image " << filename << std::endl; return; } ofs << w << " " << h << std::endl; for (int r = 0; r < h; ++r) { for (int c = 0; c < w; ++c) { ofs << d[r * w + c].real << " "; } ofs << std::endl; } } private: int w; int h; Complex* data; }; /*************************************************/ struct DeviceData { Complex *d_data; Complex *d_temp; Complex *d_res; }; void cleanup(DeviceData *d) { cudaFree(d->d_data); cudaFree(d->d_temp); cudaFree(d->d_res); } /*************** forward transform by row **********************/ __global__ void transByRow (Complex* dst, Complex* src, int width, int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int index = x + y * width; if (x < width && y < height) { for (int i = 0; i < width; i++) { float re = (src + y*width + i)->real; float im = (src + y*width + i)->imag; Complex w = Complex(cos(2*PI*i*x/width), -sin(2*PI*i*x/width)); (dst + index)->real += re * w.real - im*w.imag; (dst + index)->imag += re * w.imag + im*w.real; } } } /*************** forward transform by column **********************/ __global__ void transByCol (Complex* dst, Complex* src, int width, int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int index = x + y * width; if (x < width && y < height) { for (int i = 0; i < height; i++) { float re = (src + x + i*width)->real; float im = (src + x + i*width)->imag; Complex w = Complex(cos(2*PI*i*y/height), -sin(2*PI*i*y/height)); (dst + index)->real += re * w.real - im*w.imag; (dst + index)->imag += re * w.imag + im*w.real; } } } /*************** reverse transform by row **********************/ __global__ void revByRow (Complex* dst, Complex* src, int width, int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int index = x + y * width; if (x < width && y < height) { for (int i = 0; i < width; i++) { float re = (src + y*width + i)->real; float im = (src + y*width + i)->imag; Complex w = Complex(cos(2*PI*i*x/width), sin(2*PI*i*x/width)); (dst + index)->real += (re * w.real - im*w.imag)/width; (dst + index)->imag += (re * w.imag + im*w.real)/width; } } } /*************** reverse transform by column **********************/ __global__ void revByCol (Complex* dst, Complex* src, int width, int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int index = x + y * width; if (x < width && y < height) { for (int i = 0; i < height; i++) { float re = (src + x + i*width)->real; float im = (src + x + i*width)->imag; Complex w = Complex(cos(2*PI*i*y/height), sin(2*PI*i*y/height)); (dst + index)->real += (re * w.real - im*w.imag)/height; (dst + index)->imag += (re * w.imag + im*w.real)/height; } } } int main (int argc, char* argv[]) { DeviceData devs; string str = "forward"; bool forward = (strcmp(argv[1], str.c_str()) == 0 ); char* inFile = argv[2]; char* outFile = argv[3]; InputImage image(inFile); int height = image.get_height(); int width = image.get_width(); int N = height * width; Complex res[N]; fill_n(res, N, 1); Complex* data = image.get_image_data(); cudaMalloc((void**)&devs.d_data, N * sizeof(Complex)); cudaMalloc((void**)&devs.d_res, N * sizeof(Complex)); cudaMalloc((void**)&devs.d_temp, N * sizeof(Complex)); cudaMemcpy(devs.d_data, data, N * sizeof(Complex), cudaMemcpyHostToDevice); dim3 blocks((width + DIM - 1) / DIM, (height + DIM - 1) / DIM); dim3 threads(DIM, DIM); cout << width << ", " << height << forward << endl; if (forward) { transByRow<<<blocks, threads>>>(devs.d_temp, devs.d_data, width, height); transByCol<<<blocks, threads>>>(devs.d_res, devs.d_temp, width, height); cudaMemcpy(res, devs.d_res, N*sizeof(Complex), cudaMemcpyDeviceToHost); image.save_image_data(outFile, res, width, height); } else { revByRow<<<blocks, threads>>>(devs.d_temp, devs.d_data, width, height); revByCol<<<blocks, threads>>>(devs.d_res, devs.d_temp, width, height); cudaMemcpy(res, devs.d_res, N*sizeof(Complex), cudaMemcpyDeviceToHost); image.save_image_data_real(outFile, res, width, height); } cleanup(&devs); return 0; }
2364dc84313b0fa505d3c9219bbb9df86055ff0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Find BLANK and replace your own code. * And submit report why do you replace the blank that way. */ /* 2015004693_YangSangheon */ #include<stdlib.h> #include<iostream> #include<fstream> #include<vector> #include<string> #define TILE_WIDTH 16/* set TILE_WIDTH 16 for the evaluation! */ #define MAXPOOL_INPUT_FILENAME "input.txt" #define A_FILENAME "a.txt" #define B_FILENAME "b.txt" #define C_FILENAME "c.txt" using namespace std; __global__ void maxpool(float *input, float *output, const int input_size, const int filter_size) { // input : input_matrix address // output : output buffer address // input_size : width, height of input matrix // filter_size : filter_size of maxpolling // all input, output matrices are vectorized int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; // out of bound // CHANGE float tmp = 0.0; float Max = -999999.9; int output_size = input_size/filter_size int tmp_index = (input_size*filter_size*row)+(filter_size*col) for(int i = 0; i < filter_size; i++){ for(int j = 0; j < filter_size; j++){ tmp = input[tmp_index+(input_size*j)+i]; if(Max<tmp) Max = tmp; } } if(col < output_size && row < output_size) output[output_size*row+col] = Max; //printf("thread_made\n"); } __global__ void gemm(float *a, float *b, float *c, const float alpha, const float beta, float *output, const int input_size){ // a, b, c : input matrix address // alpha, beta : input constant // output : output buffer address // input_size : width, height of input matrix // all input, output matrices are vectorized int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; int row = by*blockDim.y + ty; int col = bx*blockDim.x + tx; //if(row>=input_size ||col>=input_size) { return; } if(row >= (input_size/TILE_WIDTH+1)*TILE_WIDTH ||col >= (input_size/TILE_WIDTH+1)*TILE_WIDTH) {return;} // allocate 2D tiles in __shared__ memory __shared__ float s_a[TILE_WIDTH][TILE_WIDTH]; __shared__ float s_b[TILE_WIDTH][TILE_WIDTH]; float result = 0; // make sure you handle the case when the matrix sizes are not // multiple of TILE_WIDTH! // loop over the tiles of the input in phases int sqr_input_size = input_size * input_size int a_index; int b_index; int output_index = row*input_size + col; for(int p = 0; p < input_size/TILE_WIDTH+1 ;p++){ // CHANGE // You need to use __syncthreads() a few times // to synchronize the threads in a thread block. a_index = row*input_size + p*TILE_WIDTH +tx; b_index = (ty + p*TILE_WIDTH)*input_size + col; if(a_index < sqr_input_size ) s_a[ty][tx] = a[a_index]; else s_a[ty][tx] = 0.0; if(b_index < sqr_input_size ) s_b[ty][tx] = b[b_index]; else s_b[ty][tx] = 0.0; // s_a[ty][tx] = a[row*input_size + p*TILE_WIDTH+tx]; // s_b[ty][tx] = b[(ty+p*TILE_WIDTH)*input_size + col]; __syncthreads(); for(int i = 0; i<TILE_WIDTH; i++) result += s_a[ty][i] * s_b[i][tx]; __syncthreads(); } //__syncthreads(); // write out the result to output[row*input_size + col] // CHANGE if(row < input_size && col < input_size) output[output_index] = (alpha * result) + (beta * c[output_index]); //__syncthreads(); } int main(int argc, char **argv) { if(argc < 4) { cout << "usage : " << argv[0] << " input_size filter_size alpha beta\n" << "example : " << argv[0] << " 100 2 0.5 0.8\n"; return 1; } const int input_size = stoi(argv[1]); const int filter_size = stoi(argv[2]); // used for maxpooling const float alpha = stof(argv[3]); const float beta = stof(argv[4]); const int maxpool_output_size = input_size/filter_size; // check input_siize is power of 2 if(input_size == 0 && (input_size & (input_size-1))){ cout << "input_size must be power of 2\n"; return 1; } if(filter_size == 0){ cout << "filter_size cannot be 0\n"; return 1; } float maxpool_input[input_size*input_size]; float a[input_size*input_size]; float b[input_size*input_size]; float c[input_size*input_size]; // read input matrices ifstream input_in(MAXPOOL_INPUT_FILENAME); ifstream a_in(A_FILENAME); ifstream b_in(B_FILENAME); ifstream c_in(C_FILENAME); for (int i = 0; i < input_size*input_size; ++i) { input_in >> maxpool_input[i]; a_in >> a[i]; b_in >> b[i]; c_in >> c[i]; } // prints inputs for debugging. cout<<"filter size : "<<filter_size; cout<<"\n========== MAXPOOL_INPUT ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<maxpool_input[i]<<" "; } cout<<"\nalpha : "<<alpha<<'\n'; cout<<"========== A ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<a[i]<<" "; } cout<<"\n========== B ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<b[i]<<" "; } cout<<"\nbeta : "<<beta<<'\n'; cout<<"========== C ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<c[i]<<" "; } cout<<'\n'; // set thread, block dimensions const dim3 block_size(TILE_WIDTH, TILE_WIDTH); const dim3 num_of_maxpool_blocks(maxpool_output_size/block_size.x+1, maxpool_output_size/block_size.y+1); const dim3 num_of_blocks(input_size/block_size.x+1, input_size/block_size.y+1); // memory allocation for the device float *dev_mem_a, *dev_mem_b, *dev_mem_c, *dev_mem_input, *gemm_output, *maxpool_output; hipMalloc(&dev_mem_a, sizeof(float) * input_size * input_size); hipMalloc(&dev_mem_b, sizeof(float) * input_size * input_size); hipMalloc(&dev_mem_c, sizeof(float) * input_size * input_size); hipMalloc(&gemm_output, sizeof(float) * input_size * input_size); hipMalloc(&dev_mem_input, sizeof(float) * input_size * input_size); hipMalloc(&maxpool_output, sizeof(float) * maxpool_output_size * maxpool_output_size); // copy variable to device memory hipMemcpy(dev_mem_a, a, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice); hipMemcpy(dev_mem_b, b, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice); hipMemcpy(dev_mem_c, c, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice); hipMemcpy(dev_mem_input, maxpool_input, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice); // launch CUDA kernels // First launch gemm kernel hipLaunchKernelGGL(( gemm), dim3(num_of_blocks), dim3(block_size), 0, 0, dev_mem_a, dev_mem_b, dev_mem_c, alpha, beta, gemm_output, input_size); hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if(error!=hipSuccess) { fprintf(stderr, "ERROR %s\n", hipGetErrorString(error)); return 1; } // Then run maxpooling hipLaunchKernelGGL(( maxpool), dim3(num_of_maxpool_blocks), dim3(block_size), 0, 0, dev_mem_input, maxpool_output, input_size, filter_size); hipDeviceSynchronize(); error = hipGetLastError(); if(error!=hipSuccess) { fprintf(stderr, "ERROR %s\n", hipGetErrorString(error)); return 1; } // allocate output buf in main memory float *gemm_output_buf = (float*) malloc (sizeof(float)*input_size*input_size); float *maxpool_output_buf = (float*) malloc (sizeof(float)*maxpool_output_size*maxpool_output_size); // copy results from device to host hipMemcpy(gemm_output_buf, gemm_output, sizeof(float)*input_size*input_size, hipMemcpyDeviceToHost); hipMemcpy(maxpool_output_buf, maxpool_output, sizeof(float)*maxpool_output_size*maxpool_output_size, hipMemcpyDeviceToHost); // prints the results cout<<"\n========== GEMM OUTPUT ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<gemm_output_buf[i]<<" "; } cout<<"\n========== MAXPOOL OUTPUT ==========\n"; for (int i = 0; i < maxpool_output_size * maxpool_output_size; ++i) { if(i%maxpool_output_size==0) cout<<"\n"; cout<<maxpool_output_buf[i]<<" "; } cout<<'\n'; hipFree(dev_mem_a); hipFree(dev_mem_b); hipFree(dev_mem_c); hipFree(gemm_output); hipFree(dev_mem_input); hipFree(maxpool_output); free(gemm_output_buf); free(maxpool_output_buf); return 0; }
2364dc84313b0fa505d3c9219bbb9df86055ff0e.cu
/* * Find BLANK and replace your own code. * And submit report why do you replace the blank that way. */ /* 2015004693_YangSangheon */ #include<stdlib.h> #include<iostream> #include<fstream> #include<vector> #include<string> #define TILE_WIDTH 16/* set TILE_WIDTH 16 for the evaluation! */ #define MAXPOOL_INPUT_FILENAME "input.txt" #define A_FILENAME "a.txt" #define B_FILENAME "b.txt" #define C_FILENAME "c.txt" using namespace std; __global__ void maxpool(float *input, float *output, const int input_size, const int filter_size) { // input : input_matrix address // output : output buffer address // input_size : width, height of input matrix // filter_size : filter_size of maxpolling // all input, output matrices are vectorized int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; // out of bound // CHANGE float tmp = 0.0; float Max = -999999.9; int output_size = input_size/filter_size int tmp_index = (input_size*filter_size*row)+(filter_size*col) for(int i = 0; i < filter_size; i++){ for(int j = 0; j < filter_size; j++){ tmp = input[tmp_index+(input_size*j)+i]; if(Max<tmp) Max = tmp; } } if(col < output_size && row < output_size) output[output_size*row+col] = Max; //printf("thread_made\n"); } __global__ void gemm(float *a, float *b, float *c, const float alpha, const float beta, float *output, const int input_size){ // a, b, c : input matrix address // alpha, beta : input constant // output : output buffer address // input_size : width, height of input matrix // all input, output matrices are vectorized int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; int row = by*blockDim.y + ty; int col = bx*blockDim.x + tx; //if(row>=input_size ||col>=input_size) { return; } if(row >= (input_size/TILE_WIDTH+1)*TILE_WIDTH ||col >= (input_size/TILE_WIDTH+1)*TILE_WIDTH) {return;} // allocate 2D tiles in __shared__ memory __shared__ float s_a[TILE_WIDTH][TILE_WIDTH]; __shared__ float s_b[TILE_WIDTH][TILE_WIDTH]; float result = 0; // make sure you handle the case when the matrix sizes are not // multiple of TILE_WIDTH! // loop over the tiles of the input in phases int sqr_input_size = input_size * input_size int a_index; int b_index; int output_index = row*input_size + col; for(int p = 0; p < input_size/TILE_WIDTH+1 ;p++){ // CHANGE // You need to use __syncthreads() a few times // to synchronize the threads in a thread block. a_index = row*input_size + p*TILE_WIDTH +tx; b_index = (ty + p*TILE_WIDTH)*input_size + col; if(a_index < sqr_input_size ) s_a[ty][tx] = a[a_index]; else s_a[ty][tx] = 0.0; if(b_index < sqr_input_size ) s_b[ty][tx] = b[b_index]; else s_b[ty][tx] = 0.0; // s_a[ty][tx] = a[row*input_size + p*TILE_WIDTH+tx]; // s_b[ty][tx] = b[(ty+p*TILE_WIDTH)*input_size + col]; __syncthreads(); for(int i = 0; i<TILE_WIDTH; i++) result += s_a[ty][i] * s_b[i][tx]; __syncthreads(); } //__syncthreads(); // write out the result to output[row*input_size + col] // CHANGE if(row < input_size && col < input_size) output[output_index] = (alpha * result) + (beta * c[output_index]); //__syncthreads(); } int main(int argc, char **argv) { if(argc < 4) { cout << "usage : " << argv[0] << " input_size filter_size alpha beta\n" << "example : " << argv[0] << " 100 2 0.5 0.8\n"; return 1; } const int input_size = stoi(argv[1]); const int filter_size = stoi(argv[2]); // used for maxpooling const float alpha = stof(argv[3]); const float beta = stof(argv[4]); const int maxpool_output_size = input_size/filter_size; // check input_siize is power of 2 if(input_size == 0 && (input_size & (input_size-1))){ cout << "input_size must be power of 2\n"; return 1; } if(filter_size == 0){ cout << "filter_size cannot be 0\n"; return 1; } float maxpool_input[input_size*input_size]; float a[input_size*input_size]; float b[input_size*input_size]; float c[input_size*input_size]; // read input matrices ifstream input_in(MAXPOOL_INPUT_FILENAME); ifstream a_in(A_FILENAME); ifstream b_in(B_FILENAME); ifstream c_in(C_FILENAME); for (int i = 0; i < input_size*input_size; ++i) { input_in >> maxpool_input[i]; a_in >> a[i]; b_in >> b[i]; c_in >> c[i]; } // prints inputs for debugging. cout<<"filter size : "<<filter_size; cout<<"\n========== MAXPOOL_INPUT ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<maxpool_input[i]<<" "; } cout<<"\nalpha : "<<alpha<<'\n'; cout<<"========== A ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<a[i]<<" "; } cout<<"\n========== B ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<b[i]<<" "; } cout<<"\nbeta : "<<beta<<'\n'; cout<<"========== C ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<c[i]<<" "; } cout<<'\n'; // set thread, block dimensions const dim3 block_size(TILE_WIDTH, TILE_WIDTH); const dim3 num_of_maxpool_blocks(maxpool_output_size/block_size.x+1, maxpool_output_size/block_size.y+1); const dim3 num_of_blocks(input_size/block_size.x+1, input_size/block_size.y+1); // memory allocation for the device float *dev_mem_a, *dev_mem_b, *dev_mem_c, *dev_mem_input, *gemm_output, *maxpool_output; cudaMalloc(&dev_mem_a, sizeof(float) * input_size * input_size); cudaMalloc(&dev_mem_b, sizeof(float) * input_size * input_size); cudaMalloc(&dev_mem_c, sizeof(float) * input_size * input_size); cudaMalloc(&gemm_output, sizeof(float) * input_size * input_size); cudaMalloc(&dev_mem_input, sizeof(float) * input_size * input_size); cudaMalloc(&maxpool_output, sizeof(float) * maxpool_output_size * maxpool_output_size); // copy variable to device memory cudaMemcpy(dev_mem_a, a, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice); cudaMemcpy(dev_mem_b, b, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice); cudaMemcpy(dev_mem_c, c, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice); cudaMemcpy(dev_mem_input, maxpool_input, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice); // launch CUDA kernels // First launch gemm kernel gemm<<<num_of_blocks, block_size>>>(dev_mem_a, dev_mem_b, dev_mem_c, alpha, beta, gemm_output, input_size); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if(error!=cudaSuccess) { fprintf(stderr, "ERROR %s\n", cudaGetErrorString(error)); return 1; } // Then run maxpooling maxpool<<<num_of_maxpool_blocks, block_size>>>(dev_mem_input, maxpool_output, input_size, filter_size); cudaDeviceSynchronize(); error = cudaGetLastError(); if(error!=cudaSuccess) { fprintf(stderr, "ERROR %s\n", cudaGetErrorString(error)); return 1; } // allocate output buf in main memory float *gemm_output_buf = (float*) malloc (sizeof(float)*input_size*input_size); float *maxpool_output_buf = (float*) malloc (sizeof(float)*maxpool_output_size*maxpool_output_size); // copy results from device to host cudaMemcpy(gemm_output_buf, gemm_output, sizeof(float)*input_size*input_size, cudaMemcpyDeviceToHost); cudaMemcpy(maxpool_output_buf, maxpool_output, sizeof(float)*maxpool_output_size*maxpool_output_size, cudaMemcpyDeviceToHost); // prints the results cout<<"\n========== GEMM OUTPUT ==========\n"; for (int i = 0; i < input_size * input_size; ++i) { if(i%input_size==0) cout<<"\n"; cout<<gemm_output_buf[i]<<" "; } cout<<"\n========== MAXPOOL OUTPUT ==========\n"; for (int i = 0; i < maxpool_output_size * maxpool_output_size; ++i) { if(i%maxpool_output_size==0) cout<<"\n"; cout<<maxpool_output_buf[i]<<" "; } cout<<'\n'; cudaFree(dev_mem_a); cudaFree(dev_mem_b); cudaFree(dev_mem_c); cudaFree(gemm_output); cudaFree(dev_mem_input); cudaFree(maxpool_output); free(gemm_output_buf); free(maxpool_output_buf); return 0; }
49e96b02790f00b9cd7107e933953d8407bcf7e4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <assert.h> #include <cstring> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <cuda_device_runtime_api.h> #include <roctracer/roctx.h> const unsigned int MB_TO_TRANSFER = 16; // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } #endif return result; } void profileCopies(float *h_a, float *h_b, float *d, unsigned int n, char *desc) { printf("\n%s transfers\n", desc); unsigned int bytes = n * sizeof(float); //Warm Up for(int i = 0; i < 16; i++) { checkCuda( hipMemcpy(d, h_a, bytes, hipMemcpyHostToDevice) ); checkCuda( hipMemcpy(h_b, d, bytes, hipMemcpyDeviceToHost) ); } // events for timing hipEvent_t startEvent, stopEvent; checkCuda( hipEventCreate(&startEvent) ); checkCuda( hipEventCreate(&stopEvent) ); checkCuda( hipEventRecord(startEvent, 0) ); checkCuda( hipMemcpy(d, h_a, bytes, hipMemcpyHostToDevice) ); checkCuda( hipEventRecord(stopEvent, 0) ); checkCuda( hipEventSynchronize(stopEvent) ); float time; checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) ); printf(" Host to Device bandwidth (GB/s): %f\n", bytes * 1e-6 / time); checkCuda( hipEventRecord(startEvent, 0) ); checkCuda( hipMemcpy(h_b, d, bytes, hipMemcpyDeviceToHost) ); checkCuda( hipEventRecord(stopEvent, 0) ); checkCuda( hipEventSynchronize(stopEvent) ); checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) ); printf(" Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / time); for (unsigned int i = 0; i < n; ++i) { if (h_a[i] != h_b[i]) { printf("*** %s transfers failed ***", desc); break; } } // clean up events checkCuda( hipEventDestroy(startEvent) ); checkCuda( hipEventDestroy(stopEvent) ); } void profileD2DCopies(float *d_a, float *d_b, unsigned int n ) { printf("\nDevice to Device Memcpy\n"); unsigned int bytes = n * sizeof(float); //Warm Up for(int i = 0; i < 16; i++) { checkCuda( hipMemcpy(d_b, d_a, bytes, hipMemcpyDeviceToDevice) ); checkCuda( hipMemcpy(d_a, d_b, bytes, hipMemcpyDeviceToDevice) ); } // events for timing hipEvent_t startEvent, stopEvent; int iters = 100; checkCuda( hipEventCreate(&startEvent) ); checkCuda( hipEventCreate(&stopEvent) ); checkCuda( hipEventRecord(startEvent, 0) ); for(int i = 0; i < iters; i++) checkCuda( hipMemcpy(d_b, d_a, bytes, hipMemcpyDeviceToDevice) ); checkCuda( hipEventRecord(stopEvent, 0) ); checkCuda( hipEventSynchronize(stopEvent) ); float time1; checkCuda( hipEventElapsedTime(&time1, startEvent, stopEvent) ); float band1 = 2.0f * iters * bytes * (float)1e-6 / time1; //2.0 for read and write checkCuda( hipEventRecord(startEvent, 0) ); for(int i = 0; i < iters; i++) checkCuda( hipMemcpy(d_a, d_b, bytes, hipMemcpyDeviceToDevice) ); checkCuda( hipEventRecord(stopEvent, 0) ); checkCuda( hipEventSynchronize(stopEvent) ); float time2; checkCuda( hipEventElapsedTime(&time2, startEvent, stopEvent) ); float band2 = 2.0f * iters * bytes * (float)1e-6 / time2; printf(" Device to Device bandwidth (GB/s): %f\n", (band1 + band2) / 2.0f); float *h_a; float *h_b; checkCuda( hipHostMalloc((void**)&h_a, bytes) ); // host pinned checkCuda( hipHostMalloc((void**)&h_b, bytes) ); // host pinned checkCuda( hipMemcpy(h_a, d_a, bytes, hipMemcpyDeviceToHost) ); checkCuda( hipMemcpy(h_b, d_b, bytes, hipMemcpyDeviceToHost) ); for (unsigned int i = 0; i < n; ++i) { if (h_a[i] != h_b[i]) { printf("*** Device to device transfers failed ***"); break; } } // clean up events checkCuda( hipEventDestroy(startEvent) ); checkCuda( hipEventDestroy(stopEvent) ); } int main() { unsigned int nElements = MB_TO_TRANSFER * 256 * 1024; const unsigned int bytes = nElements * sizeof(float); // host arrays float *h_aPageable, *h_bPageable; float *h_aPinned, *h_bPinned; // device array float *d_a; float *d_b; // allocate and initialize h_aPageable = (float*)malloc(bytes); // host pageable h_bPageable = (float*)malloc(bytes); // host pageable checkCuda( hipHostMalloc((void**)&h_aPinned, bytes) ); // host pinned checkCuda( hipHostMalloc((void**)&h_bPinned, bytes) ); // host pinned checkCuda( hipMalloc((void**)&d_a, bytes) ); // device checkCuda( hipMalloc((void**)&d_b, bytes) ); // device for (unsigned int i = 0; i < nElements; ++i) h_aPageable[i] = (float)i; memcpy(h_aPinned, h_aPageable, bytes); memset(h_bPageable, 0, bytes); memset(h_bPinned, 0, bytes); // output device info and transfer size hipDeviceProp_t prop; checkCuda( hipGetDeviceProperties(&prop, 0) ); printf("\nDevice: %s\n", prop.name); printf("Transfer size (MB): %d\n", bytes / (1024 * 1024)); // perform copies and report bandwidth nvtxRangeId_t pageable_range = roctxRangeStart("Paged Memory Transfer"); profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable"); roctxRangeStop(pageable_range); nvtxRangeId_t pinned_range = roctxRangeStart("Pinned Memory Transfer"); profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned"); roctxRangeStop(pinned_range); nvtxRangeId_t d2d_range = roctxRangeStart("Device to Device Memory Transfer"); profileD2DCopies(d_a, d_b, nElements); roctxRangeStop(d2d_range); printf("\n"); // cleanup hipFree(d_a); hipHostFree(h_aPinned); hipHostFree(h_bPinned); free(h_aPageable); free(h_bPageable); hipDeviceReset(); return 0; }
49e96b02790f00b9cd7107e933953d8407bcf7e4.cu
#include <stdio.h> #include <assert.h> #include <cstring> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_device_runtime_api.h> #include <nvToolsExt.h> const unsigned int MB_TO_TRANSFER = 16; // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } void profileCopies(float *h_a, float *h_b, float *d, unsigned int n, char *desc) { printf("\n%s transfers\n", desc); unsigned int bytes = n * sizeof(float); //Warm Up for(int i = 0; i < 16; i++) { checkCuda( cudaMemcpy(d, h_a, bytes, cudaMemcpyHostToDevice) ); checkCuda( cudaMemcpy(h_b, d, bytes, cudaMemcpyDeviceToHost) ); } // events for timing cudaEvent_t startEvent, stopEvent; checkCuda( cudaEventCreate(&startEvent) ); checkCuda( cudaEventCreate(&stopEvent) ); checkCuda( cudaEventRecord(startEvent, 0) ); checkCuda( cudaMemcpy(d, h_a, bytes, cudaMemcpyHostToDevice) ); checkCuda( cudaEventRecord(stopEvent, 0) ); checkCuda( cudaEventSynchronize(stopEvent) ); float time; checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) ); printf(" Host to Device bandwidth (GB/s): %f\n", bytes * 1e-6 / time); checkCuda( cudaEventRecord(startEvent, 0) ); checkCuda( cudaMemcpy(h_b, d, bytes, cudaMemcpyDeviceToHost) ); checkCuda( cudaEventRecord(stopEvent, 0) ); checkCuda( cudaEventSynchronize(stopEvent) ); checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) ); printf(" Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / time); for (unsigned int i = 0; i < n; ++i) { if (h_a[i] != h_b[i]) { printf("*** %s transfers failed ***", desc); break; } } // clean up events checkCuda( cudaEventDestroy(startEvent) ); checkCuda( cudaEventDestroy(stopEvent) ); } void profileD2DCopies(float *d_a, float *d_b, unsigned int n ) { printf("\nDevice to Device Memcpy\n"); unsigned int bytes = n * sizeof(float); //Warm Up for(int i = 0; i < 16; i++) { checkCuda( cudaMemcpy(d_b, d_a, bytes, cudaMemcpyDeviceToDevice) ); checkCuda( cudaMemcpy(d_a, d_b, bytes, cudaMemcpyDeviceToDevice) ); } // events for timing cudaEvent_t startEvent, stopEvent; int iters = 100; checkCuda( cudaEventCreate(&startEvent) ); checkCuda( cudaEventCreate(&stopEvent) ); checkCuda( cudaEventRecord(startEvent, 0) ); for(int i = 0; i < iters; i++) checkCuda( cudaMemcpy(d_b, d_a, bytes, cudaMemcpyDeviceToDevice) ); checkCuda( cudaEventRecord(stopEvent, 0) ); checkCuda( cudaEventSynchronize(stopEvent) ); float time1; checkCuda( cudaEventElapsedTime(&time1, startEvent, stopEvent) ); float band1 = 2.0f * iters * bytes * (float)1e-6 / time1; //2.0 for read and write checkCuda( cudaEventRecord(startEvent, 0) ); for(int i = 0; i < iters; i++) checkCuda( cudaMemcpy(d_a, d_b, bytes, cudaMemcpyDeviceToDevice) ); checkCuda( cudaEventRecord(stopEvent, 0) ); checkCuda( cudaEventSynchronize(stopEvent) ); float time2; checkCuda( cudaEventElapsedTime(&time2, startEvent, stopEvent) ); float band2 = 2.0f * iters * bytes * (float)1e-6 / time2; printf(" Device to Device bandwidth (GB/s): %f\n", (band1 + band2) / 2.0f); float *h_a; float *h_b; checkCuda( cudaMallocHost((void**)&h_a, bytes) ); // host pinned checkCuda( cudaMallocHost((void**)&h_b, bytes) ); // host pinned checkCuda( cudaMemcpy(h_a, d_a, bytes, cudaMemcpyDeviceToHost) ); checkCuda( cudaMemcpy(h_b, d_b, bytes, cudaMemcpyDeviceToHost) ); for (unsigned int i = 0; i < n; ++i) { if (h_a[i] != h_b[i]) { printf("*** Device to device transfers failed ***"); break; } } // clean up events checkCuda( cudaEventDestroy(startEvent) ); checkCuda( cudaEventDestroy(stopEvent) ); } int main() { unsigned int nElements = MB_TO_TRANSFER * 256 * 1024; const unsigned int bytes = nElements * sizeof(float); // host arrays float *h_aPageable, *h_bPageable; float *h_aPinned, *h_bPinned; // device array float *d_a; float *d_b; // allocate and initialize h_aPageable = (float*)malloc(bytes); // host pageable h_bPageable = (float*)malloc(bytes); // host pageable checkCuda( cudaMallocHost((void**)&h_aPinned, bytes) ); // host pinned checkCuda( cudaMallocHost((void**)&h_bPinned, bytes) ); // host pinned checkCuda( cudaMalloc((void**)&d_a, bytes) ); // device checkCuda( cudaMalloc((void**)&d_b, bytes) ); // device for (unsigned int i = 0; i < nElements; ++i) h_aPageable[i] = (float)i; memcpy(h_aPinned, h_aPageable, bytes); memset(h_bPageable, 0, bytes); memset(h_bPinned, 0, bytes); // output device info and transfer size cudaDeviceProp prop; checkCuda( cudaGetDeviceProperties(&prop, 0) ); printf("\nDevice: %s\n", prop.name); printf("Transfer size (MB): %d\n", bytes / (1024 * 1024)); // perform copies and report bandwidth nvtxRangeId_t pageable_range = nvtxRangeStart("Paged Memory Transfer"); profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable"); nvtxRangeEnd(pageable_range); nvtxRangeId_t pinned_range = nvtxRangeStart("Pinned Memory Transfer"); profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned"); nvtxRangeEnd(pinned_range); nvtxRangeId_t d2d_range = nvtxRangeStart("Device to Device Memory Transfer"); profileD2DCopies(d_a, d_b, nElements); nvtxRangeEnd(d2d_range); printf("\n"); // cleanup cudaFree(d_a); cudaFreeHost(h_aPinned); cudaFreeHost(h_bPinned); free(h_aPageable); free(h_bPageable); cudaDeviceReset(); return 0; }
b9ef21ce3e26b05beb3f05d78e9dbbf9f52b7d92.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <float.h> #include <math.h> #include <time.h> #include "../../constants.h" #define N_THREADS_PER_BLOCK 1024 __global__ void find_min_max_u_kernel( const float *__restrict__ g_u, float *__restrict__ g_max, float *__restrict__ g_min ) { extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; unsigned int tidFromBack = blockDim.x - 1 - tid; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = g_u[i]; __syncthreads(); for (unsigned int s = blockDim.x/2; s > 0; s >>= 1) { if (tid < s) { if (sdata[tid + s] > sdata[tid]) { sdata[tid] = sdata[tid + s]; } } if (tidFromBack < s) { if (sdata[tid - s] < sdata[tid]) { sdata[tid] = sdata[tid - s]; } } __syncthreads(); } if (tid == 0) { g_max[blockIdx.x] = sdata[0]; } if (tidFromBack == 0) { g_min[blockIdx.x] = sdata[tid]; } } extern "C" void find_min_max_u_cuda( const float *__restrict__ u, llint u_size, float *__restrict__ min_u, float *__restrict__ max_u ) { llint u_block = u_size / N_THREADS_PER_BLOCK; llint u_remainder = u_size % N_THREADS_PER_BLOCK; llint d_block = u_block; if (u_remainder != 0) { d_block += 1; } llint d_size = d_block * N_THREADS_PER_BLOCK; float *remainder = 0; llint remainder_size = 0; if (u_remainder != 0) { remainder_size = N_THREADS_PER_BLOCK - u_remainder; remainder = (float *)malloc(remainder_size * sizeof(float)); memcpy(remainder, u, remainder_size * sizeof(float)); } float* max = (float*)malloc(d_block * sizeof(float)); float *min = (float*)malloc(d_block * sizeof(float)); float* d_u, * d_max, * d_min; hipMalloc(&d_u, d_size * sizeof(float)); hipMalloc(&d_max, d_block * sizeof(float)); hipMalloc(&d_min, d_block * sizeof(float)); hipMemcpy(d_u, u, u_size * sizeof(float), hipMemcpyHostToDevice); if (u_remainder != 0) { hipMemcpy(d_u+u_size, remainder, remainder_size * sizeof(float), hipMemcpyHostToDevice); } hipLaunchKernelGGL(( find_min_max_u_kernel), dim3(d_block), dim3(N_THREADS_PER_BLOCK), sizeof(float) * N_THREADS_PER_BLOCK, 0, d_u, d_max, d_min); hipMemcpy(max, d_max, d_block * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(min, d_min, d_block * sizeof(float), hipMemcpyDeviceToHost); *min_u = FLT_MAX, *max_u = FLT_MIN; for (size_t i = 0; i < d_block; i++) { *min_u = fminf(*min_u, min[i]); *max_u = fmaxf(*max_u, max[i]); } hipFree(d_max); hipFree(d_min); hipFree(d_u); free(remainder); free(max); free(min); }
b9ef21ce3e26b05beb3f05d78e9dbbf9f52b7d92.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <float.h> #include <math.h> #include <time.h> #include "../../constants.h" #define N_THREADS_PER_BLOCK 1024 __global__ void find_min_max_u_kernel( const float *__restrict__ g_u, float *__restrict__ g_max, float *__restrict__ g_min ) { extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; unsigned int tidFromBack = blockDim.x - 1 - tid; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = g_u[i]; __syncthreads(); for (unsigned int s = blockDim.x/2; s > 0; s >>= 1) { if (tid < s) { if (sdata[tid + s] > sdata[tid]) { sdata[tid] = sdata[tid + s]; } } if (tidFromBack < s) { if (sdata[tid - s] < sdata[tid]) { sdata[tid] = sdata[tid - s]; } } __syncthreads(); } if (tid == 0) { g_max[blockIdx.x] = sdata[0]; } if (tidFromBack == 0) { g_min[blockIdx.x] = sdata[tid]; } } extern "C" void find_min_max_u_cuda( const float *__restrict__ u, llint u_size, float *__restrict__ min_u, float *__restrict__ max_u ) { llint u_block = u_size / N_THREADS_PER_BLOCK; llint u_remainder = u_size % N_THREADS_PER_BLOCK; llint d_block = u_block; if (u_remainder != 0) { d_block += 1; } llint d_size = d_block * N_THREADS_PER_BLOCK; float *remainder = 0; llint remainder_size = 0; if (u_remainder != 0) { remainder_size = N_THREADS_PER_BLOCK - u_remainder; remainder = (float *)malloc(remainder_size * sizeof(float)); memcpy(remainder, u, remainder_size * sizeof(float)); } float* max = (float*)malloc(d_block * sizeof(float)); float *min = (float*)malloc(d_block * sizeof(float)); float* d_u, * d_max, * d_min; cudaMalloc(&d_u, d_size * sizeof(float)); cudaMalloc(&d_max, d_block * sizeof(float)); cudaMalloc(&d_min, d_block * sizeof(float)); cudaMemcpy(d_u, u, u_size * sizeof(float), cudaMemcpyHostToDevice); if (u_remainder != 0) { cudaMemcpy(d_u+u_size, remainder, remainder_size * sizeof(float), cudaMemcpyHostToDevice); } find_min_max_u_kernel<<<d_block, N_THREADS_PER_BLOCK, sizeof(float) * N_THREADS_PER_BLOCK>>>(d_u, d_max, d_min); cudaMemcpy(max, d_max, d_block * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(min, d_min, d_block * sizeof(float), cudaMemcpyDeviceToHost); *min_u = FLT_MAX, *max_u = FLT_MIN; for (size_t i = 0; i < d_block; i++) { *min_u = fminf(*min_u, min[i]); *max_u = fmaxf(*max_u, max[i]); } cudaFree(d_max); cudaFree(d_min); cudaFree(d_u); free(remainder); free(max); free(min); }
d6861ddb91c7b155ad3e3741deb90330832ff57d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Faster R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Shaoqing Ren // ------------------------------------------------------------------ //#ifdef __cplusplus //extern "C" { //#endif #include <math.h> #include <stdio.h> #include <float.h> #include "nms_kernel.h" __device__ inline float devIoU(float const * const a, float const * const b) { float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]); float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]); float width = fmaxf(right - left + 1, 0.f), height = fmaxf(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = fminf(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _nms(int boxes_num, float * boxes_dev, unsigned long * mask_dev, float nms_overlap_thresh) { dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); } //#ifdef __cplusplus //} //#endif
d6861ddb91c7b155ad3e3741deb90330832ff57d.cu
// ------------------------------------------------------------------ // Faster R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Shaoqing Ren // ------------------------------------------------------------------ //#ifdef __cplusplus //extern "C" { //#endif #include <math.h> #include <stdio.h> #include <float.h> #include "nms_kernel.h" __device__ inline float devIoU(float const * const a, float const * const b) { float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]); float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]); float width = fmaxf(right - left + 1, 0.f), height = fmaxf(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = fminf(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _nms(int boxes_num, float * boxes_dev, unsigned long * mask_dev, float nms_overlap_thresh) { dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); } //#ifdef __cplusplus //} //#endif
6acb31f85f0f36e4be41cbe8205082e95b996460.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cmath> #include <cassert> #include <cstdlib> #include <omp.h> #ifdef WITH_CUDA5 # include <helper_cuda.h> # define CUDA_SAFE_CALL checkCudaErrors #else # include <cutil.h> #endif #include "cuda_pointer.h" #define NTHREAD 64 // 64 or 128 // #define NJBLOCK 14 // for GTX 470 #define NJBLOCK 28 // for GTX660Ti #define NIBLOCK 32 // 16 or 32 #define NIMAX (NTHREAD * NIBLOCK) // 2048 #define NXREDUCE 32 // must be >NJBLOCK #define NYREDUCE 8 #define NNB_PER_BLOCK 256 // NNB per block, must be power of 2 #define NB_BUF_SIZE (1<<21) // (1<<20) << RdV // #define NNB_MAX 384 // total NNB at reduced #define MAX_CPU 12 // (8) << RdV for SPARTAN #define MAX_GPU 4 // for clearity, for myself #define __out #define PROFILE #define NAN_CHECK(val) assert((val) == (val)); typedef unsigned short uint16; struct Jparticle{ float3 pos; float mass; float3 vel; float pad; Jparticle() {} Jparticle(double mj, double xj[3], double vj[3]){ pos.x = xj[0]; pos.y = xj[1]; pos.z = xj[2]; mass = mj; vel.x = vj[0]; vel.y = vj[1]; vel.z = vj[2]; NAN_CHECK(xj[0]); NAN_CHECK(xj[1]); NAN_CHECK(xj[2]); NAN_CHECK(mj); NAN_CHECK(vj[0]); NAN_CHECK(vj[1]); NAN_CHECK(vj[2]); } __device__ Jparticle(const float4 *buf){ float4 tmp1 = buf[0]; float4 tmp2 = buf[1]; pos.x = tmp1.x; pos.y = tmp1.y; pos.z = tmp1.z; mass = tmp1.w; vel.x = tmp2.x; vel.y = tmp2.y; vel.z = tmp2.z; } }; struct Iparticle{ float3 pos; float h2; float3 vel; float dtr; Iparticle() {} Iparticle(double h2i, double dtri, double xi[3], double vi[3]){ pos.x = xi[0]; pos.y = xi[1]; pos.z = xi[2]; h2 = h2i; vel.x = vi[0]; vel.y = vi[1]; vel.z = vi[2]; dtr = dtri; NAN_CHECK(xi[0]); NAN_CHECK(xi[1]); NAN_CHECK(xi[2]); NAN_CHECK(h2i); NAN_CHECK(vi[0]); NAN_CHECK(vi[1]); NAN_CHECK(vi[2]); } }; struct Force{ float3 acc; float pot; float3 jrk; int nnb; // 8 words __device__ void clear(){ acc.x = acc.y = acc.z = 0.f; jrk.x = jrk.y = jrk.z = 0.f; pot = 0.f; nnb = 0; } __device__ void operator+=(const Force &rhs){ acc.x += rhs.acc.x; acc.y += rhs.acc.y; acc.z += rhs.acc.z; pot += rhs.pot; jrk.x += rhs.jrk.x; jrk.y += rhs.jrk.y; jrk.z += rhs.jrk.z; if(nnb>=0 && rhs.nnb>=0){ nnb += rhs.nnb; }else{ nnb = -1; } } #if __CUDA_ARCH__ >= 300 __device__ void reduce_with(const int mask){ acc.x += __shfl_xor(acc.x, mask); acc.y += __shfl_xor(acc.y, mask); acc.z += __shfl_xor(acc.z, mask); pot += __shfl_xor(pot , mask); jrk.x += __shfl_xor(jrk.x, mask); jrk.y += __shfl_xor(jrk.y, mask); jrk.z += __shfl_xor(jrk.z, mask); int ntmp = __shfl_xor(nnb, mask); if(nnb>=0 && ntmp>=0){ nnb += ntmp; }else{ nnb = -1; } } #endif }; __device__ void dev_gravity( const int jidx, const Iparticle &ip, const Jparticle &jp, __out Force &fo, __out uint16 nblist[]){ float dx = jp.pos.x - ip.pos.x; float dy = jp.pos.y - ip.pos.y; float dz = jp.pos.z - ip.pos.z; float dvx = jp.vel.x - ip.vel.x; float dvy = jp.vel.y - ip.vel.y; float dvz = jp.vel.z - ip.vel.z; float r2 = dx*dx + dy*dy + dz*dz; #if 1 float dxp = dx + ip.dtr * dvx; float dyp = dy + ip.dtr * dvy; float dzp = dz + ip.dtr * dvz; float r2p = dxp*dxp + dyp*dyp + dzp*dzp; #else float r2p = r2; #endif float rv = dx*dvx + dy*dvy + dz*dvz; float rinv1 = rsqrtf(r2); if(min(r2, r2p) < jp.mass * ip.h2){ // fo.neib[fo.nnb++ % NBMAX] = j; nblist[fo.nnb & (NNB_PER_BLOCK-1)] = (uint16)jidx; fo.nnb++; rinv1 = 0.f; } float rinv2 = rinv1 * rinv1; float mrinv1 = jp.mass * rinv1; float mrinv3 = mrinv1 * rinv2; rv *= -3.f * rinv2; #ifdef POTENTIAL fo.pot += mrinv1; #endif fo.acc.x += mrinv3 * dx; fo.acc.y += mrinv3 * dy; fo.acc.z += mrinv3 * dz; // fo.acc.z += 1.0; fo.jrk.x += mrinv3 * (dvx + rv * dx); fo.jrk.y += mrinv3 * (dvy + rv * dy); fo.jrk.z += mrinv3 * (dvz + rv * dz); } __global__ void gravity_kernel( const int nbody, const Iparticle ipbuf[], const Jparticle jpbuf[], __out Force fobuf[][NJBLOCK], __out uint16 nbbuf[][NJBLOCK][NNB_PER_BLOCK]){ int ibid = blockIdx.x; int jbid = blockIdx.y; int tid = threadIdx.x; int iaddr = tid + blockDim.x * ibid; int jstart = (nbody * (jbid )) / NJBLOCK; int jend = (nbody * (jbid+1)) / NJBLOCK; Iparticle ip = ipbuf[iaddr]; Force fo; fo.clear(); uint16 *nblist = nbbuf[iaddr][jbid]; #if __CUDA_ARCH__ >= 300 // just some trial for(int j=jstart; j<jend; j+=32){ __shared__ Jparticle jpshare[32]; __syncthreads(); float4 *src = (float4 *)&jpbuf[j]; float4 *dst = (float4 *)jpshare; dst[tid] = src[tid]; __syncthreads(); if(jend-j < 32){ #pragma unroll 4 for(int jj=0; jj<jend-j; jj++){ const Jparticle jp = jpshare[jj]; // const Jparticle jp( (float4 *)jpshare + 2*jj); dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } }else{ #pragma unroll 8 for(int jj=0; jj<32; jj++){ const Jparticle jp = jpshare[jj]; // const Jparticle jp( (float4 *)jpshare + 2*jj); dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } } } #else for(int j=jstart; j<jend; j+=NTHREAD){ __shared__ Jparticle jpshare[NTHREAD]; __syncthreads(); float4 *src = (float4 *)&jpbuf[j]; float4 *dst = (float4 *)jpshare; dst[ tid] = src[ tid]; dst[NTHREAD+tid] = src[NTHREAD+tid]; __syncthreads(); if(jend-j < NTHREAD){ #pragma unroll 4 for(int jj=0; jj<jend-j; jj++){ Jparticle jp = jpshare[jj]; dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } }else{ #pragma unroll 8 for(int jj=0; jj<NTHREAD; jj++){ Jparticle jp = jpshare[jj]; dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } } } #endif if(fo.nnb > NNB_PER_BLOCK) fo.nnb = -1; fobuf[iaddr][jbid] = fo; } #if __CUDA_ARCH__ >= 300 __device__ void warp_reduce_int(int inp, int *out){ inp += __shfl_xor(inp, 1); inp += __shfl_xor(inp, 2); inp += __shfl_xor(inp, 4); inp += __shfl_xor(inp, 8); # if NXREDUCE==32 inp += __shfl_xor(inp, 16); # endif *out = inp; } __device__ void warp_reduce_float8(float4 inp1, float4 inp2, float *out){ const int tid = threadIdx.x; float4 tmp4L = (4&tid) ? inp2 : inp1; float4 tmp4R = (4&tid) ? inp1 : inp2; tmp4L.x += __shfl_xor(tmp4R.x, 4); tmp4L.y += __shfl_xor(tmp4R.y, 4); tmp4L.z += __shfl_xor(tmp4R.z, 4); tmp4L.w += __shfl_xor(tmp4R.w, 4); float4 tmp4; tmp4.x = (2&tid) ? tmp4L.z : tmp4L.x; tmp4.y = (2&tid) ? tmp4L.w : tmp4L.y; tmp4.z = (2&tid) ? tmp4L.x : tmp4L.z; tmp4.w = (2&tid) ? tmp4L.y : tmp4L.w; tmp4.x += __shfl_xor(tmp4.z, 2); tmp4.y += __shfl_xor(tmp4.w, 2); float2 tmp2; tmp2.x = (1&tid) ? tmp4.y : tmp4.x; tmp2.y = (1&tid) ? tmp4.x : tmp4.y; tmp2.x += __shfl_xor(tmp2.y, 1); tmp2.x += __shfl_xor(tmp2.x, 8); # if NXREDUCE==32 tmp2.x += __shfl_xor(tmp2.x, 16); # endif if(tid < 8){ out[tid] = tmp2.x; } } #endif __global__ void force_reduce_kernel( const int ni, const Force fpart[][NJBLOCK], __out Force ftot []){ const int xid = threadIdx.x; const int yid = threadIdx.y; const int bid = blockIdx.x; const int iaddr = yid + blockDim.y * bid; #if __CUDA_ARCH__ >= 300 Force f; if(xid < NJBLOCK){ f = fpart[iaddr][xid]; }else{ f.clear(); } # if 0 # pragma unroll for(int mask=1; mask<NXREDUCE; mask*=2){ f.reduce_with(mask); } if(iaddr < ni && xid == 0){ ftot[iaddr] = f; } # else if(iaddr < ni){ const float4 tmp1 = make_float4(f.acc.x, f.acc.y, f.acc.z, f.pot); const float4 tmp2 = make_float4(f.jrk.x, f.jrk.y, f.jrk.z, 0.0f); const int itmp = f.nnb; float *dst = (float *)(ftot + iaddr); int *idst = (int *)(dst + 7); warp_reduce_float8(tmp1, tmp2, dst); warp_reduce_int(itmp, idst); } # endif #else __shared__ Force fshare[NYREDUCE][NXREDUCE]; if(xid < NJBLOCK){ fshare[yid][xid] = fpart[iaddr][xid]; }else{ fshare[yid][xid].clear(); } Force *fs = fshare[yid]; #if NXREDUCE==32 if(xid < 16) fs[xid] += fs[xid + 16]; #endif if(xid < 8) fs[xid] += fs[xid + 8]; if(xid < 4) fs[xid] += fs[xid + 4]; if(xid < 2) fs[xid] += fs[xid + 2]; if(xid < 1) fs[xid] += fs[xid + 1]; if(iaddr < ni){ ftot[iaddr] = fs[0]; } #endif } __global__ void gather_nb_kernel( const int ni, const int nj, const int joff, const Force fpart[][NJBLOCK], const Force ftot [], const int nboff[], const uint16 nbpart[][NJBLOCK][NNB_PER_BLOCK], __out int nblist[]) { const int xid = threadIdx.x; const int yid = threadIdx.y; const int bid = blockIdx.x; const int iaddr = yid + blockDim.y * bid; if(iaddr >= ni) return; if(ftot[iaddr].nnb < 0) return; const int mynnb = (xid < NJBLOCK) ? fpart[iaddr][xid].nnb : 0; // now performe prefix sum #if __CUDA_ARCH__ >= 300 int ix = mynnb; #pragma unroll for(int ioff=1; ioff<NXREDUCE; ioff*=2){ int iy = __shfl_up(ix, ioff); if(xid>=ioff) ix += iy; } int iz = __shfl_up(ix, 1); const int off = (xid == 0) ? 0 : iz; #else __shared__ int ishare[NYREDUCE][NXREDUCE]; ishare[yid][xid] = mynnb; volatile int *ish = ishare[yid]; if(xid>=1) ish[xid] += ish[xid-1]; if(xid>=2) ish[xid] += ish[xid-2]; if(xid>=4) ish[xid] += ish[xid-4]; if(xid>=8) ish[xid] += ish[xid-8]; #if NXREDUCE==32 if(xid>=16) ish[xid] += ish[xid-16]; #endif const int off = (xid == 0) ? 0 : ish[xid-1]; #endif int *nbdst = nblist + nboff[iaddr] + off; const int jstart = (nj * xid) / NJBLOCK; if(xid < NJBLOCK){ for(int k=0; k<mynnb; k++){ const int nbid = (joff + jstart) + int(nbpart[iaddr][xid][k]); // const int nbid = iaddr * 1000 + k; nbdst[k] = nbid; } } } // Host Part #ifdef PROFILE #include <sys/time.h> static double get_wtime(){ struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec + 1.e-6 * tv.tv_usec; } #else static double get_wtime(){ return 0.0; } #endif static double time_send, time_grav, time_reduce; static long long numInter; static cudaPointer <Jparticle> jpbuf[MAX_GPU]; static cudaPointer <Iparticle> ipbuf[MAX_GPU]; static cudaPointer <Force[NJBLOCK]> fpart[MAX_GPU]; static cudaPointer <Force> ftot [MAX_GPU]; static cudaPointer <uint16[NJBLOCK][NNB_PER_BLOCK]> nbpart[MAX_GPU]; static cudaPointer <int> nblist [MAX_GPU]; static cudaPointer <int> nboff [MAX_GPU]; static int numCPU, numGPU; static int joff[MAX_GPU + 1]; static int nbody, nbodymax; static int devid[MAX_GPU]; static bool is_open = false; static bool devinit = false; void GPUNB_devinit(){ if(devinit) return; assert(NXREDUCE >= NJBLOCK); assert(NXREDUCE <= 32); hipGetDeviceCount(&numGPU); assert(numGPU <= MAX_GPU); char *gpu_list = getenv("GPU_LIST"); if(gpu_list){ // get GPU list from environment variable numGPU = 0; char *p = strtok(gpu_list, " "); while(p){ devid[numGPU++] = atoi(p); p = strtok(NULL, " "); assert(numGPU <= MAX_GPU); } }else{ // use all GPUs for(int i=0; i<numGPU; i++){ devid[i] = i; } } // numGPU = 1; #pragma omp parallel { int tid = omp_get_thread_num(); if(tid == 0) numCPU = omp_get_num_threads(); } assert(numCPU <= MAX_CPU); assert(numGPU <= numCPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ hipSetDevice(devid[tid]); } } #ifdef PROFILE fprintf(stderr, "***********************\n"); fprintf(stderr, "Initializing NBODY6/GPU library\n"); fprintf(stderr, "#CPU %d, #GPU %d\n", numCPU, numGPU); fprintf(stderr, " device:"); for(int i=0; i<numGPU; i++){ fprintf(stderr, " %d", devid[i]); } fprintf(stderr, "\n"); #if 1 for(int i=0; i<numGPU; i++){ hipDeviceProp_t prop; hipGetDeviceProperties(&prop, devid[i]); fprintf(stderr, " device %d: %s\n", devid[i], prop.name); } #endif fprintf(stderr, "***********************\n"); #endif devinit = true; } void GPUNB_open(int nbmax){ time_send = time_grav = time_reduce = 0.0; numInter = 0; nbodymax = nbmax; GPUNB_devinit(); if(is_open){ fprintf(stderr, "gpunb: it is already open\n"); return; } is_open = true; for(int id=0; id<numGPU + 1; id++){ joff[id] = (id * nbmax) / numGPU; } // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ hipSetDevice(devid[tid]); int nj = joff[tid+1] - joff[tid]; jpbuf [tid].allocate(nj + NTHREAD); ipbuf [tid].allocate(NIMAX); fpart [tid].allocate(NIMAX); ftot [tid].allocate(NIMAX); nbpart[tid].allocate(NIMAX); nblist[tid].allocate(NB_BUF_SIZE); // total ganged nblist nboff [tid].allocate(NIMAX+1); } } #ifdef PROFILE fprintf(stderr, "***********************\n"); fprintf(stderr, "Opened NBODY6/GPU library\n"); fprintf(stderr, "#CPU %d, #GPU %d\n", numCPU, numGPU); fprintf(stderr, " device:"); for(int i=0; i<numGPU; i++){ fprintf(stderr, " %d", devid[i]); } fprintf(stderr, "\n"); for(int i=0; i<numGPU+1; i++){ fprintf(stderr, " %d", joff[i]); } fprintf(stderr, "\n"); fprintf(stderr, "nbmax = %d\n", nbmax); fprintf(stderr, "***********************\n"); #endif } void GPUNB_close(){ if(!is_open){ fprintf(stderr, "gpunb: it is already close\n"); return; } is_open = false; // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ jpbuf [tid].free(); ipbuf [tid].free(); fpart [tid].free(); ftot [tid].free(); nbpart[tid].free(); nblist[tid].free(); nboff [tid].free(); } } // omp_set_num_threads(numCPU); nbodymax = 0; #ifdef PROFILE fprintf(stderr, "***********************\n"); fprintf(stderr, "Closed NBODY6/GPU library\n"); fprintf(stderr, "time send : %f sec\n", time_send); fprintf(stderr, "time grav : %f sec\n", time_grav); fprintf(stderr, "time reduce : %f sec\n", time_reduce); fprintf(stderr, "time regtot : %f sec\n", time_send + time_grav + time_reduce); fprintf(stderr, "%f Gflops (gravity part only)\n", 60.e-9 * numInter / time_grav); fprintf(stderr, "***********************\n"); #endif } void GPUNB_send( int _nbody, double mj[], double xj[][3], double vj[][3]){ assert(is_open); nbody = _nbody; assert(nbody <= nbodymax); time_send -= get_wtime(); for(int id=0; id<numGPU + 1; id++){ joff[id] = (id * nbody) / numGPU; } #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ int nj = joff[tid+1] - joff[tid]; for(int j=0; j<nj; j++){ int jj = j + joff[tid]; jpbuf[tid][j] = Jparticle(mj[jj], xj[jj], vj[jj]); } jpbuf[tid].htod(nj); } } time_send += get_wtime(); } void GPUNB_regf( int ni, double h2[], double dtr[], double xi[][3], double vi[][3], double acc[][3], double jrk[][3], double pot[], int lmax, int nnbmax, int *listbase){ assert(is_open); time_grav -= get_wtime(); numInter += ni * nbody; assert(0 < ni && ni <= NIMAX); // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ // hipSetDevice(device_id[tid]); for(int i=0; i<ni; i++){ ipbuf[tid][i] = Iparticle(h2[i], dtr[i], xi[i], vi[i]); } // set i-particles ipbuf[tid].htod(ni); // gravity kernel int niblock = 1 + (ni-1) / NTHREAD; dim3 grid(niblock, NJBLOCK, 1); dim3 threads(NTHREAD, 1, 1); int nj = joff[tid+1] - joff[tid]; hipLaunchKernelGGL(( gravity_kernel) , dim3(grid), dim3(threads) , 0, 0, nj, ipbuf[tid], jpbuf[tid], fpart[tid], nbpart[tid]); // CUDA_SAFE_THREAD_SYNC(); #if 0 dim3 rgrid(niblock, 1, 1); hipLaunchKernelGGL(( reduce_kernel) , dim3(rgrid), dim3(threads) , 0, 0, nj, joff[tid], fpart[tid], nbpart[tid], ftot[tid], nbtot[tid]); #else const int ni8 = 1 + (ni-1) / NYREDUCE; dim3 rgrid (ni8, 1, 1); dim3 rthreads(NXREDUCE, NYREDUCE, 1); hipLaunchKernelGGL(( force_reduce_kernel) , dim3(rgrid), dim3(rthreads) , 0, 0, ni, fpart[tid], ftot[tid]); #endif // CUDA_SAFE_THREAD_SYNC(); ftot [tid].dtoh(ni); // now make prefix sum int nbsum = 0; for(int i=0; i<ni; i++){ nboff[tid][i] = nbsum; const int nnb = ftot[tid][i].nnb; // assert(nnb >= 0); if(nnb >= 0) nbsum += nnb; } assert(nbsum <= NB_BUF_SIZE); nboff[tid].htod(ni); // debugging // for(int k=0; k<nbsum; k++) nblist[tid][k] = -1; // nblist[tid].htod(nbsum); hipLaunchKernelGGL(( gather_nb_kernel) , dim3(rgrid), dim3(rthreads), 0, 0, ni, nj, joff[tid], fpart[tid], ftot[tid], nboff[tid], nbpart[tid], nblist[tid]); // CUDA_SAFE_THREAD_SYNC(); nblist[tid].dtoh(nbsum); } } const double wt = get_wtime(); time_grav += wt; time_reduce -= wt; // reduction phase // omp_set_num_threads(numCPU); #pragma omp parallel for for(int i=0; i<ni; i++){ double ax=0.0, ay=0.0, az=0.0; double jx=0.0, jy=0.0, jz=0.0; double po=0.0; for(int id=0; id<numGPU; id++){ Force &fo = ftot[id][i]; ax += fo.acc.x; ay += fo.acc.y; az += fo.acc.z; jx += fo.jrk.x; jy += fo.jrk.y; jz += fo.jrk.z; po += fo.pot; } acc[i][0] = ax; acc[i][1] = ay; acc[i][2] = az; jrk[i][0] = jx; jrk[i][1] = jy; jrk[i][2] = jz; pot[i] = po; } #pragma omp parallel for for(int i=0; i<ni; i++){ bool overflow = false; int *nnbp = listbase + lmax * i; int *nblistp = nnbp + 1; int nnb = 0; for(int id=0; id<numGPU; id++){ const int nnb_part = ftot[id][i].nnb; if(nnb_part < 0){ overflow = true; fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_part=%d\n", i, id, nnb_part); } // assert(!overflow); nnb += nnb_part; if(nnb > nnbmax){ overflow = true; fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_tot =%d, nnbmax=%d\n", i, id, nnb, nnbmax); } // assert(!overflow); if(!overflow){ const int off = nboff[id][i]; for(int k=0; k<nnb_part; k++){ *nblistp++ = nblist[id][off + k]; } } } if(overflow){ // *nnbp = -1; *nnbp = nnb ? -abs(nnb) : -9999; }else{ *nnbp = nnb; } } time_reduce += get_wtime(); } extern "C" { void gpunb_devinit_(){ GPUNB_devinit(); } void gpunb_open_(int *nbmax){ GPUNB_open(*nbmax); } void gpunb_close_(){ GPUNB_close(); } void gpunb_send_( int *nj, double mj[], double xj[][3], double vj[][3]){ GPUNB_send(*nj, mj, xj, vj); } void gpunb_regf_( int *ni, double h2[], double dtr[], double xi[][3], double vi[][3], double acc[][3], double jrk[][3], double pot[], int *lmax, int *nbmax, int *list){ // list[][lmax] GPUNB_regf(*ni, h2, dtr, xi, vi, acc, jrk, pot, *lmax, *nbmax, list); } }
6acb31f85f0f36e4be41cbe8205082e95b996460.cu
#include <cstdio> #include <cmath> #include <cassert> #include <cstdlib> #include <omp.h> #ifdef WITH_CUDA5 # include <helper_cuda.h> # define CUDA_SAFE_CALL checkCudaErrors #else # include <cutil.h> #endif #include "cuda_pointer.h" #define NTHREAD 64 // 64 or 128 // #define NJBLOCK 14 // for GTX 470 #define NJBLOCK 28 // for GTX660Ti #define NIBLOCK 32 // 16 or 32 #define NIMAX (NTHREAD * NIBLOCK) // 2048 #define NXREDUCE 32 // must be >NJBLOCK #define NYREDUCE 8 #define NNB_PER_BLOCK 256 // NNB per block, must be power of 2 #define NB_BUF_SIZE (1<<21) // (1<<20) << RdV // #define NNB_MAX 384 // total NNB at reduced #define MAX_CPU 12 // (8) << RdV for SPARTAN #define MAX_GPU 4 // for clearity, for myself #define __out #define PROFILE #define NAN_CHECK(val) assert((val) == (val)); typedef unsigned short uint16; struct Jparticle{ float3 pos; float mass; float3 vel; float pad; Jparticle() {} Jparticle(double mj, double xj[3], double vj[3]){ pos.x = xj[0]; pos.y = xj[1]; pos.z = xj[2]; mass = mj; vel.x = vj[0]; vel.y = vj[1]; vel.z = vj[2]; NAN_CHECK(xj[0]); NAN_CHECK(xj[1]); NAN_CHECK(xj[2]); NAN_CHECK(mj); NAN_CHECK(vj[0]); NAN_CHECK(vj[1]); NAN_CHECK(vj[2]); } __device__ Jparticle(const float4 *buf){ float4 tmp1 = buf[0]; float4 tmp2 = buf[1]; pos.x = tmp1.x; pos.y = tmp1.y; pos.z = tmp1.z; mass = tmp1.w; vel.x = tmp2.x; vel.y = tmp2.y; vel.z = tmp2.z; } }; struct Iparticle{ float3 pos; float h2; float3 vel; float dtr; Iparticle() {} Iparticle(double h2i, double dtri, double xi[3], double vi[3]){ pos.x = xi[0]; pos.y = xi[1]; pos.z = xi[2]; h2 = h2i; vel.x = vi[0]; vel.y = vi[1]; vel.z = vi[2]; dtr = dtri; NAN_CHECK(xi[0]); NAN_CHECK(xi[1]); NAN_CHECK(xi[2]); NAN_CHECK(h2i); NAN_CHECK(vi[0]); NAN_CHECK(vi[1]); NAN_CHECK(vi[2]); } }; struct Force{ float3 acc; float pot; float3 jrk; int nnb; // 8 words __device__ void clear(){ acc.x = acc.y = acc.z = 0.f; jrk.x = jrk.y = jrk.z = 0.f; pot = 0.f; nnb = 0; } __device__ void operator+=(const Force &rhs){ acc.x += rhs.acc.x; acc.y += rhs.acc.y; acc.z += rhs.acc.z; pot += rhs.pot; jrk.x += rhs.jrk.x; jrk.y += rhs.jrk.y; jrk.z += rhs.jrk.z; if(nnb>=0 && rhs.nnb>=0){ nnb += rhs.nnb; }else{ nnb = -1; } } #if __CUDA_ARCH__ >= 300 __device__ void reduce_with(const int mask){ acc.x += __shfl_xor(acc.x, mask); acc.y += __shfl_xor(acc.y, mask); acc.z += __shfl_xor(acc.z, mask); pot += __shfl_xor(pot , mask); jrk.x += __shfl_xor(jrk.x, mask); jrk.y += __shfl_xor(jrk.y, mask); jrk.z += __shfl_xor(jrk.z, mask); int ntmp = __shfl_xor(nnb, mask); if(nnb>=0 && ntmp>=0){ nnb += ntmp; }else{ nnb = -1; } } #endif }; __device__ void dev_gravity( const int jidx, const Iparticle &ip, const Jparticle &jp, __out Force &fo, __out uint16 nblist[]){ float dx = jp.pos.x - ip.pos.x; float dy = jp.pos.y - ip.pos.y; float dz = jp.pos.z - ip.pos.z; float dvx = jp.vel.x - ip.vel.x; float dvy = jp.vel.y - ip.vel.y; float dvz = jp.vel.z - ip.vel.z; float r2 = dx*dx + dy*dy + dz*dz; #if 1 float dxp = dx + ip.dtr * dvx; float dyp = dy + ip.dtr * dvy; float dzp = dz + ip.dtr * dvz; float r2p = dxp*dxp + dyp*dyp + dzp*dzp; #else float r2p = r2; #endif float rv = dx*dvx + dy*dvy + dz*dvz; float rinv1 = rsqrtf(r2); if(min(r2, r2p) < jp.mass * ip.h2){ // fo.neib[fo.nnb++ % NBMAX] = j; nblist[fo.nnb & (NNB_PER_BLOCK-1)] = (uint16)jidx; fo.nnb++; rinv1 = 0.f; } float rinv2 = rinv1 * rinv1; float mrinv1 = jp.mass * rinv1; float mrinv3 = mrinv1 * rinv2; rv *= -3.f * rinv2; #ifdef POTENTIAL fo.pot += mrinv1; #endif fo.acc.x += mrinv3 * dx; fo.acc.y += mrinv3 * dy; fo.acc.z += mrinv3 * dz; // fo.acc.z += 1.0; fo.jrk.x += mrinv3 * (dvx + rv * dx); fo.jrk.y += mrinv3 * (dvy + rv * dy); fo.jrk.z += mrinv3 * (dvz + rv * dz); } __global__ void gravity_kernel( const int nbody, const Iparticle ipbuf[], const Jparticle jpbuf[], __out Force fobuf[][NJBLOCK], __out uint16 nbbuf[][NJBLOCK][NNB_PER_BLOCK]){ int ibid = blockIdx.x; int jbid = blockIdx.y; int tid = threadIdx.x; int iaddr = tid + blockDim.x * ibid; int jstart = (nbody * (jbid )) / NJBLOCK; int jend = (nbody * (jbid+1)) / NJBLOCK; Iparticle ip = ipbuf[iaddr]; Force fo; fo.clear(); uint16 *nblist = nbbuf[iaddr][jbid]; #if __CUDA_ARCH__ >= 300 // just some trial for(int j=jstart; j<jend; j+=32){ __shared__ Jparticle jpshare[32]; __syncthreads(); float4 *src = (float4 *)&jpbuf[j]; float4 *dst = (float4 *)jpshare; dst[tid] = src[tid]; __syncthreads(); if(jend-j < 32){ #pragma unroll 4 for(int jj=0; jj<jend-j; jj++){ const Jparticle jp = jpshare[jj]; // const Jparticle jp( (float4 *)jpshare + 2*jj); dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } }else{ #pragma unroll 8 for(int jj=0; jj<32; jj++){ const Jparticle jp = jpshare[jj]; // const Jparticle jp( (float4 *)jpshare + 2*jj); dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } } } #else for(int j=jstart; j<jend; j+=NTHREAD){ __shared__ Jparticle jpshare[NTHREAD]; __syncthreads(); float4 *src = (float4 *)&jpbuf[j]; float4 *dst = (float4 *)jpshare; dst[ tid] = src[ tid]; dst[NTHREAD+tid] = src[NTHREAD+tid]; __syncthreads(); if(jend-j < NTHREAD){ #pragma unroll 4 for(int jj=0; jj<jend-j; jj++){ Jparticle jp = jpshare[jj]; dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } }else{ #pragma unroll 8 for(int jj=0; jj<NTHREAD; jj++){ Jparticle jp = jpshare[jj]; dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } } } #endif if(fo.nnb > NNB_PER_BLOCK) fo.nnb = -1; fobuf[iaddr][jbid] = fo; } #if __CUDA_ARCH__ >= 300 __device__ void warp_reduce_int(int inp, int *out){ inp += __shfl_xor(inp, 1); inp += __shfl_xor(inp, 2); inp += __shfl_xor(inp, 4); inp += __shfl_xor(inp, 8); # if NXREDUCE==32 inp += __shfl_xor(inp, 16); # endif *out = inp; } __device__ void warp_reduce_float8(float4 inp1, float4 inp2, float *out){ const int tid = threadIdx.x; float4 tmp4L = (4&tid) ? inp2 : inp1; float4 tmp4R = (4&tid) ? inp1 : inp2; tmp4L.x += __shfl_xor(tmp4R.x, 4); tmp4L.y += __shfl_xor(tmp4R.y, 4); tmp4L.z += __shfl_xor(tmp4R.z, 4); tmp4L.w += __shfl_xor(tmp4R.w, 4); float4 tmp4; tmp4.x = (2&tid) ? tmp4L.z : tmp4L.x; tmp4.y = (2&tid) ? tmp4L.w : tmp4L.y; tmp4.z = (2&tid) ? tmp4L.x : tmp4L.z; tmp4.w = (2&tid) ? tmp4L.y : tmp4L.w; tmp4.x += __shfl_xor(tmp4.z, 2); tmp4.y += __shfl_xor(tmp4.w, 2); float2 tmp2; tmp2.x = (1&tid) ? tmp4.y : tmp4.x; tmp2.y = (1&tid) ? tmp4.x : tmp4.y; tmp2.x += __shfl_xor(tmp2.y, 1); tmp2.x += __shfl_xor(tmp2.x, 8); # if NXREDUCE==32 tmp2.x += __shfl_xor(tmp2.x, 16); # endif if(tid < 8){ out[tid] = tmp2.x; } } #endif __global__ void force_reduce_kernel( const int ni, const Force fpart[][NJBLOCK], __out Force ftot []){ const int xid = threadIdx.x; const int yid = threadIdx.y; const int bid = blockIdx.x; const int iaddr = yid + blockDim.y * bid; #if __CUDA_ARCH__ >= 300 Force f; if(xid < NJBLOCK){ f = fpart[iaddr][xid]; }else{ f.clear(); } # if 0 # pragma unroll for(int mask=1; mask<NXREDUCE; mask*=2){ f.reduce_with(mask); } if(iaddr < ni && xid == 0){ ftot[iaddr] = f; } # else if(iaddr < ni){ const float4 tmp1 = make_float4(f.acc.x, f.acc.y, f.acc.z, f.pot); const float4 tmp2 = make_float4(f.jrk.x, f.jrk.y, f.jrk.z, 0.0f); const int itmp = f.nnb; float *dst = (float *)(ftot + iaddr); int *idst = (int *)(dst + 7); warp_reduce_float8(tmp1, tmp2, dst); warp_reduce_int(itmp, idst); } # endif #else __shared__ Force fshare[NYREDUCE][NXREDUCE]; if(xid < NJBLOCK){ fshare[yid][xid] = fpart[iaddr][xid]; }else{ fshare[yid][xid].clear(); } Force *fs = fshare[yid]; #if NXREDUCE==32 if(xid < 16) fs[xid] += fs[xid + 16]; #endif if(xid < 8) fs[xid] += fs[xid + 8]; if(xid < 4) fs[xid] += fs[xid + 4]; if(xid < 2) fs[xid] += fs[xid + 2]; if(xid < 1) fs[xid] += fs[xid + 1]; if(iaddr < ni){ ftot[iaddr] = fs[0]; } #endif } __global__ void gather_nb_kernel( const int ni, const int nj, const int joff, const Force fpart[][NJBLOCK], const Force ftot [], const int nboff[], const uint16 nbpart[][NJBLOCK][NNB_PER_BLOCK], __out int nblist[]) { const int xid = threadIdx.x; const int yid = threadIdx.y; const int bid = blockIdx.x; const int iaddr = yid + blockDim.y * bid; if(iaddr >= ni) return; if(ftot[iaddr].nnb < 0) return; const int mynnb = (xid < NJBLOCK) ? fpart[iaddr][xid].nnb : 0; // now performe prefix sum #if __CUDA_ARCH__ >= 300 int ix = mynnb; #pragma unroll for(int ioff=1; ioff<NXREDUCE; ioff*=2){ int iy = __shfl_up(ix, ioff); if(xid>=ioff) ix += iy; } int iz = __shfl_up(ix, 1); const int off = (xid == 0) ? 0 : iz; #else __shared__ int ishare[NYREDUCE][NXREDUCE]; ishare[yid][xid] = mynnb; volatile int *ish = ishare[yid]; if(xid>=1) ish[xid] += ish[xid-1]; if(xid>=2) ish[xid] += ish[xid-2]; if(xid>=4) ish[xid] += ish[xid-4]; if(xid>=8) ish[xid] += ish[xid-8]; #if NXREDUCE==32 if(xid>=16) ish[xid] += ish[xid-16]; #endif const int off = (xid == 0) ? 0 : ish[xid-1]; #endif int *nbdst = nblist + nboff[iaddr] + off; const int jstart = (nj * xid) / NJBLOCK; if(xid < NJBLOCK){ for(int k=0; k<mynnb; k++){ const int nbid = (joff + jstart) + int(nbpart[iaddr][xid][k]); // const int nbid = iaddr * 1000 + k; nbdst[k] = nbid; } } } // Host Part #ifdef PROFILE #include <sys/time.h> static double get_wtime(){ struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec + 1.e-6 * tv.tv_usec; } #else static double get_wtime(){ return 0.0; } #endif static double time_send, time_grav, time_reduce; static long long numInter; static cudaPointer <Jparticle> jpbuf[MAX_GPU]; static cudaPointer <Iparticle> ipbuf[MAX_GPU]; static cudaPointer <Force[NJBLOCK]> fpart[MAX_GPU]; static cudaPointer <Force> ftot [MAX_GPU]; static cudaPointer <uint16[NJBLOCK][NNB_PER_BLOCK]> nbpart[MAX_GPU]; static cudaPointer <int> nblist [MAX_GPU]; static cudaPointer <int> nboff [MAX_GPU]; static int numCPU, numGPU; static int joff[MAX_GPU + 1]; static int nbody, nbodymax; static int devid[MAX_GPU]; static bool is_open = false; static bool devinit = false; void GPUNB_devinit(){ if(devinit) return; assert(NXREDUCE >= NJBLOCK); assert(NXREDUCE <= 32); cudaGetDeviceCount(&numGPU); assert(numGPU <= MAX_GPU); char *gpu_list = getenv("GPU_LIST"); if(gpu_list){ // get GPU list from environment variable numGPU = 0; char *p = strtok(gpu_list, " "); while(p){ devid[numGPU++] = atoi(p); p = strtok(NULL, " "); assert(numGPU <= MAX_GPU); } }else{ // use all GPUs for(int i=0; i<numGPU; i++){ devid[i] = i; } } // numGPU = 1; #pragma omp parallel { int tid = omp_get_thread_num(); if(tid == 0) numCPU = omp_get_num_threads(); } assert(numCPU <= MAX_CPU); assert(numGPU <= numCPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ cudaSetDevice(devid[tid]); } } #ifdef PROFILE fprintf(stderr, "***********************\n"); fprintf(stderr, "Initializing NBODY6/GPU library\n"); fprintf(stderr, "#CPU %d, #GPU %d\n", numCPU, numGPU); fprintf(stderr, " device:"); for(int i=0; i<numGPU; i++){ fprintf(stderr, " %d", devid[i]); } fprintf(stderr, "\n"); #if 1 for(int i=0; i<numGPU; i++){ cudaDeviceProp prop; cudaGetDeviceProperties(&prop, devid[i]); fprintf(stderr, " device %d: %s\n", devid[i], prop.name); } #endif fprintf(stderr, "***********************\n"); #endif devinit = true; } void GPUNB_open(int nbmax){ time_send = time_grav = time_reduce = 0.0; numInter = 0; nbodymax = nbmax; GPUNB_devinit(); if(is_open){ fprintf(stderr, "gpunb: it is already open\n"); return; } is_open = true; for(int id=0; id<numGPU + 1; id++){ joff[id] = (id * nbmax) / numGPU; } // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ cudaSetDevice(devid[tid]); int nj = joff[tid+1] - joff[tid]; jpbuf [tid].allocate(nj + NTHREAD); ipbuf [tid].allocate(NIMAX); fpart [tid].allocate(NIMAX); ftot [tid].allocate(NIMAX); nbpart[tid].allocate(NIMAX); nblist[tid].allocate(NB_BUF_SIZE); // total ganged nblist nboff [tid].allocate(NIMAX+1); } } #ifdef PROFILE fprintf(stderr, "***********************\n"); fprintf(stderr, "Opened NBODY6/GPU library\n"); fprintf(stderr, "#CPU %d, #GPU %d\n", numCPU, numGPU); fprintf(stderr, " device:"); for(int i=0; i<numGPU; i++){ fprintf(stderr, " %d", devid[i]); } fprintf(stderr, "\n"); for(int i=0; i<numGPU+1; i++){ fprintf(stderr, " %d", joff[i]); } fprintf(stderr, "\n"); fprintf(stderr, "nbmax = %d\n", nbmax); fprintf(stderr, "***********************\n"); #endif } void GPUNB_close(){ if(!is_open){ fprintf(stderr, "gpunb: it is already close\n"); return; } is_open = false; // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ jpbuf [tid].free(); ipbuf [tid].free(); fpart [tid].free(); ftot [tid].free(); nbpart[tid].free(); nblist[tid].free(); nboff [tid].free(); } } // omp_set_num_threads(numCPU); nbodymax = 0; #ifdef PROFILE fprintf(stderr, "***********************\n"); fprintf(stderr, "Closed NBODY6/GPU library\n"); fprintf(stderr, "time send : %f sec\n", time_send); fprintf(stderr, "time grav : %f sec\n", time_grav); fprintf(stderr, "time reduce : %f sec\n", time_reduce); fprintf(stderr, "time regtot : %f sec\n", time_send + time_grav + time_reduce); fprintf(stderr, "%f Gflops (gravity part only)\n", 60.e-9 * numInter / time_grav); fprintf(stderr, "***********************\n"); #endif } void GPUNB_send( int _nbody, double mj[], double xj[][3], double vj[][3]){ assert(is_open); nbody = _nbody; assert(nbody <= nbodymax); time_send -= get_wtime(); for(int id=0; id<numGPU + 1; id++){ joff[id] = (id * nbody) / numGPU; } #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ int nj = joff[tid+1] - joff[tid]; for(int j=0; j<nj; j++){ int jj = j + joff[tid]; jpbuf[tid][j] = Jparticle(mj[jj], xj[jj], vj[jj]); } jpbuf[tid].htod(nj); } } time_send += get_wtime(); } void GPUNB_regf( int ni, double h2[], double dtr[], double xi[][3], double vi[][3], double acc[][3], double jrk[][3], double pot[], int lmax, int nnbmax, int *listbase){ assert(is_open); time_grav -= get_wtime(); numInter += ni * nbody; assert(0 < ni && ni <= NIMAX); // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ // cudaSetDevice(device_id[tid]); for(int i=0; i<ni; i++){ ipbuf[tid][i] = Iparticle(h2[i], dtr[i], xi[i], vi[i]); } // set i-particles ipbuf[tid].htod(ni); // gravity kernel int niblock = 1 + (ni-1) / NTHREAD; dim3 grid(niblock, NJBLOCK, 1); dim3 threads(NTHREAD, 1, 1); int nj = joff[tid+1] - joff[tid]; gravity_kernel <<< grid, threads >>> (nj, ipbuf[tid], jpbuf[tid], fpart[tid], nbpart[tid]); // CUDA_SAFE_THREAD_SYNC(); #if 0 dim3 rgrid(niblock, 1, 1); reduce_kernel <<< rgrid, threads >>> (nj, joff[tid], fpart[tid], nbpart[tid], ftot[tid], nbtot[tid]); #else const int ni8 = 1 + (ni-1) / NYREDUCE; dim3 rgrid (ni8, 1, 1); dim3 rthreads(NXREDUCE, NYREDUCE, 1); force_reduce_kernel <<< rgrid, rthreads >>> (ni, fpart[tid], ftot[tid]); #endif // CUDA_SAFE_THREAD_SYNC(); ftot [tid].dtoh(ni); // now make prefix sum int nbsum = 0; for(int i=0; i<ni; i++){ nboff[tid][i] = nbsum; const int nnb = ftot[tid][i].nnb; // assert(nnb >= 0); if(nnb >= 0) nbsum += nnb; } assert(nbsum <= NB_BUF_SIZE); nboff[tid].htod(ni); // debugging // for(int k=0; k<nbsum; k++) nblist[tid][k] = -1; // nblist[tid].htod(nbsum); gather_nb_kernel <<< rgrid, rthreads>>> (ni, nj, joff[tid], fpart[tid], ftot[tid], nboff[tid], nbpart[tid], nblist[tid]); // CUDA_SAFE_THREAD_SYNC(); nblist[tid].dtoh(nbsum); } } const double wt = get_wtime(); time_grav += wt; time_reduce -= wt; // reduction phase // omp_set_num_threads(numCPU); #pragma omp parallel for for(int i=0; i<ni; i++){ double ax=0.0, ay=0.0, az=0.0; double jx=0.0, jy=0.0, jz=0.0; double po=0.0; for(int id=0; id<numGPU; id++){ Force &fo = ftot[id][i]; ax += fo.acc.x; ay += fo.acc.y; az += fo.acc.z; jx += fo.jrk.x; jy += fo.jrk.y; jz += fo.jrk.z; po += fo.pot; } acc[i][0] = ax; acc[i][1] = ay; acc[i][2] = az; jrk[i][0] = jx; jrk[i][1] = jy; jrk[i][2] = jz; pot[i] = po; } #pragma omp parallel for for(int i=0; i<ni; i++){ bool overflow = false; int *nnbp = listbase + lmax * i; int *nblistp = nnbp + 1; int nnb = 0; for(int id=0; id<numGPU; id++){ const int nnb_part = ftot[id][i].nnb; if(nnb_part < 0){ overflow = true; fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_part=%d\n", i, id, nnb_part); } // assert(!overflow); nnb += nnb_part; if(nnb > nnbmax){ overflow = true; fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_tot =%d, nnbmax=%d\n", i, id, nnb, nnbmax); } // assert(!overflow); if(!overflow){ const int off = nboff[id][i]; for(int k=0; k<nnb_part; k++){ *nblistp++ = nblist[id][off + k]; } } } if(overflow){ // *nnbp = -1; *nnbp = nnb ? -abs(nnb) : -9999; }else{ *nnbp = nnb; } } time_reduce += get_wtime(); } extern "C" { void gpunb_devinit_(){ GPUNB_devinit(); } void gpunb_open_(int *nbmax){ GPUNB_open(*nbmax); } void gpunb_close_(){ GPUNB_close(); } void gpunb_send_( int *nj, double mj[], double xj[][3], double vj[][3]){ GPUNB_send(*nj, mj, xj, vj); } void gpunb_regf_( int *ni, double h2[], double dtr[], double xi[][3], double vi[][3], double acc[][3], double jrk[][3], double pot[], int *lmax, int *nbmax, int *list){ // list[][lmax] GPUNB_regf(*ni, h2, dtr, xi, vi, acc, jrk, pot, *lmax, *nbmax, list); } }
94d508e2106d02ed8081ee9b0f7e34fc596a8de7.hip
// !!! This is a file automatically generated by hipify!!! // SPDX-FileCopyrightText: 2021 Benjamin Brock // // SPDX-License-Identifier: BSD-3-Clause #define __thrust_compiler_fence() __sync_synchronize() #include <cusp/io/matrix_market.h> #include <cusp/csr_matrix.h> #include <cusp/array2d.h> #include <cusp/multiply.h> #include <cusp/array2d.h> #include <cusp/print.h> #include <bcl/bcl.hpp> #include <bcl/backends/experimental/nvshmem/backend.hpp> #include <bcl/containers/experimental/cuda/CudaMatrix.hpp> #include <bcl/containers/experimental/cuda/launch_kernel.cuh> #include <thrust/sort.h> #include <bcl/containers/experimental/cuda/CudaSPMatrix.hpp> #include <bcl/containers/experimental/cuda/algorithms/algorithm.hpp> #include <unordered_map> #include <chrono> #include <essl.h> template <typename T, typename U> struct PairHash { std::size_t operator()(const std::pair<T, U>& value) const noexcept { return std::hash<T>{}(value.first) ^ std::hash<U>{}(value.second); } }; // Check if two floating point numbers are // within epsilon of each other. template <typename T> bool equal(T a, T b, T eps = 1.0e-5) { if (std::abs((a - b) / a) <= eps) { return true; } else if (std::abs(a - b) <= eps) { return true; } else if (a == b) { return true; } return false; } int main(int argc, char** argv) { BCL::init(16); BCL::cuda::init(); using T = float; using index_type = int; bool verify_result = true; std::string fname = std::string(argv[1]); auto matrix_shape = BCL::matrix_io::matrix_info(fname); size_t m = matrix_shape.shape[0]; size_t n = matrix_shape.shape[1]; assert(m == n); size_t k = m; BCL::print("Choosing blocks...\n"); auto blocks = BCL::block_matmul(m, n, k); BCL::print("Reading matrices...\n"); BCL::cuda::SPMatrix<T, index_type> a(fname, std::move(blocks[0])); BCL::cuda::SPMatrix<T, index_type> b(fname, std::move(blocks[1])); BCL::cuda::SPMatrix<T, index_type> c(m, n, std::move(blocks[2])); BCL::print("Info:\n"); if (BCL::rank() == 0) { printf("A:\n"); a.print_info(); printf("B:\n"); b.print_info(); printf("C:\n"); c.print_info(); } hipsparseStatus_t status = hipsparseCreate(&BCL::cuda::bcl_cusparse_handle_); BCL::cuda::throw_cusparse(status); // printf("A taking %lf GB, B %lf GB\n", 1.0e-9*a.my_mem(), 1.0e-9*b.my_mem()); assert(a.grid_shape()[1] == b.grid_shape()[0]); using allocator_type = BCL::cuda::bcl_allocator<T>; BCL::cuda::duration_issue = 0; BCL::cuda::duration_sync = 0; BCL::cuda::duration_compute = 0; BCL::cuda::duration_accumulate = 0; BCL::cuda::duration_barrier = 0; BCL::print("Beginning SpGEMM...\n"); BCL::barrier(); auto begin = std::chrono::high_resolution_clock::now(); BCL::cuda::gemm<T, index_type, allocator_type>(a, b, c); auto end = std::chrono::high_resolution_clock::now(); double duration = std::chrono::duration<double>(end - begin).count(); double max_issue = BCL::allreduce(BCL::cuda::duration_issue, BCL::max<double>{}); double max_sync = BCL::allreduce(BCL::cuda::duration_sync, BCL::max<double>{}); double max_compute = BCL::allreduce(BCL::cuda::duration_compute, BCL::max<double>{}); double max_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, BCL::max<double>{}); double max_barrier = BCL::allreduce(BCL::cuda::duration_barrier, BCL::max<double>{}); double min_issue = BCL::allreduce(BCL::cuda::duration_issue, BCL::min<double>{}); double min_sync = BCL::allreduce(BCL::cuda::duration_sync, BCL::min<double>{}); double min_compute = BCL::allreduce(BCL::cuda::duration_compute, BCL::min<double>{}); double min_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, BCL::min<double>{}); double min_barrier = BCL::allreduce(BCL::cuda::duration_barrier, BCL::min<double>{}); BCL::cuda::duration_issue = BCL::allreduce(BCL::cuda::duration_issue, std::plus<double>{}); BCL::cuda::duration_sync = BCL::allreduce(BCL::cuda::duration_sync, std::plus<double>{}); BCL::cuda::duration_compute = BCL::allreduce(BCL::cuda::duration_compute, std::plus<double>{}); BCL::cuda::duration_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, std::plus<double>{}); BCL::cuda::duration_barrier = BCL::allreduce(BCL::cuda::duration_barrier, std::plus<double>{}); BCL::barrier(); fflush(stdout); BCL::barrier(); fprintf(stderr, "RANK(%lu) A has %lu nnz, B has %lu nnz, C has %lu nnz\n", BCL::rank(), a.my_nnzs(), b.my_nnzs(), c.my_nnzs()); BCL::barrier(); fflush(stderr); BCL::barrier(); if (BCL::rank() == 0) { printf("duration_issue %lf (%lf -> %lf)\n", BCL::cuda::duration_issue / BCL::nprocs(), min_issue, max_issue); printf("duration_sync %lf (%lf -> %lf)\n", BCL::cuda::duration_sync / BCL::nprocs(), min_sync, max_sync); printf("duration_compute %lf (%lf -> %lf)\n", BCL::cuda::duration_compute / BCL::nprocs(), min_compute, max_compute); printf("duration_accumulate %lf (%lf -> %lf)\n", BCL::cuda::duration_accumulate / BCL::nprocs(), min_accumulate, max_accumulate); printf("duration_barrier %lf (%lf -> %lf)\n", BCL::cuda::duration_barrier / BCL::nprocs(), min_barrier, max_barrier); } BCL::barrier(); fflush(stdout); BCL::barrier(); BCL::print("Matrix multiply finished in %lf s\n", duration); if (BCL::rank() == 0 && verify_result) { BCL::CSRMatrix<T, index_type> mat(fname); auto local_a = BCL::cuda::to_gpu<T, index_type, allocator_type>(mat); auto s_c = spgemm_cusparse(local_a, local_a); fprintf(stderr, "Getting COO...\n"); auto local_c = c.get().get_coo(); local_c = BCL::cuda::remove_zeros(local_c); auto s_c_coo = BCL::cuda::to_cpu(s_c).get_coo(); fprintf(stderr, "local_computation (%lu nnz), distributed result (%lu nnz)\n", s_c_coo.size(), local_c.size()); if (s_c_coo.size() != local_c.size()) { fprintf(stderr, "ERROR: number of nonzeros does not match.\n"); } else { fprintf(stderr, "Nonzeros match %lu == %lu\n", s_c_coo.size(), local_c.size()); } T eps = 1.0e-5; for (size_t i = 0; i < s_c_coo.size(); i++) { auto idx_a = std::get<0>(s_c_coo[i]); auto idx_b = std::get<0>(local_c[i]); auto val_a = std::get<1>(s_c_coo[i]); auto val_b = std::get<1>(local_c[i]); assert(idx_a == idx_b); if (!equal(val_a, val_b, eps)) { fprintf(stderr, "(%lu, %lu) == (%lu, %lu)\n", idx_a.first, idx_a.second, idx_b.first, idx_b.second); fprintf(stderr, "%f ~= %f\n", val_a, val_b); fflush(stderr); } assert(equal(val_a, val_b, eps)); // printf("(%lu, %lu) == (%lu, %lu)\n", idx_a.first, idx_a.second, // idx_b.first, idx_b.second); // printf("%f ~= %f\n", val_a, val_b); } printf("OK!\n"); } BCL::finalize(); return 0; }
94d508e2106d02ed8081ee9b0f7e34fc596a8de7.cu
// SPDX-FileCopyrightText: 2021 Benjamin Brock // // SPDX-License-Identifier: BSD-3-Clause #define __thrust_compiler_fence() __sync_synchronize() #include <cusp/io/matrix_market.h> #include <cusp/csr_matrix.h> #include <cusp/array2d.h> #include <cusp/multiply.h> #include <cusp/array2d.h> #include <cusp/print.h> #include <bcl/bcl.hpp> #include <bcl/backends/experimental/nvshmem/backend.hpp> #include <bcl/containers/experimental/cuda/CudaMatrix.hpp> #include <bcl/containers/experimental/cuda/launch_kernel.cuh> #include <thrust/sort.h> #include <bcl/containers/experimental/cuda/CudaSPMatrix.hpp> #include <bcl/containers/experimental/cuda/algorithms/algorithm.hpp> #include <unordered_map> #include <chrono> #include <essl.h> template <typename T, typename U> struct PairHash { std::size_t operator()(const std::pair<T, U>& value) const noexcept { return std::hash<T>{}(value.first) ^ std::hash<U>{}(value.second); } }; // Check if two floating point numbers are // within epsilon of each other. template <typename T> bool equal(T a, T b, T eps = 1.0e-5) { if (std::abs((a - b) / a) <= eps) { return true; } else if (std::abs(a - b) <= eps) { return true; } else if (a == b) { return true; } return false; } int main(int argc, char** argv) { BCL::init(16); BCL::cuda::init(); using T = float; using index_type = int; bool verify_result = true; std::string fname = std::string(argv[1]); auto matrix_shape = BCL::matrix_io::matrix_info(fname); size_t m = matrix_shape.shape[0]; size_t n = matrix_shape.shape[1]; assert(m == n); size_t k = m; BCL::print("Choosing blocks...\n"); auto blocks = BCL::block_matmul(m, n, k); BCL::print("Reading matrices...\n"); BCL::cuda::SPMatrix<T, index_type> a(fname, std::move(blocks[0])); BCL::cuda::SPMatrix<T, index_type> b(fname, std::move(blocks[1])); BCL::cuda::SPMatrix<T, index_type> c(m, n, std::move(blocks[2])); BCL::print("Info:\n"); if (BCL::rank() == 0) { printf("A:\n"); a.print_info(); printf("B:\n"); b.print_info(); printf("C:\n"); c.print_info(); } cusparseStatus_t status = cusparseCreate(&BCL::cuda::bcl_cusparse_handle_); BCL::cuda::throw_cusparse(status); // printf("A taking %lf GB, B %lf GB\n", 1.0e-9*a.my_mem(), 1.0e-9*b.my_mem()); assert(a.grid_shape()[1] == b.grid_shape()[0]); using allocator_type = BCL::cuda::bcl_allocator<T>; BCL::cuda::duration_issue = 0; BCL::cuda::duration_sync = 0; BCL::cuda::duration_compute = 0; BCL::cuda::duration_accumulate = 0; BCL::cuda::duration_barrier = 0; BCL::print("Beginning SpGEMM...\n"); BCL::barrier(); auto begin = std::chrono::high_resolution_clock::now(); BCL::cuda::gemm<T, index_type, allocator_type>(a, b, c); auto end = std::chrono::high_resolution_clock::now(); double duration = std::chrono::duration<double>(end - begin).count(); double max_issue = BCL::allreduce(BCL::cuda::duration_issue, BCL::max<double>{}); double max_sync = BCL::allreduce(BCL::cuda::duration_sync, BCL::max<double>{}); double max_compute = BCL::allreduce(BCL::cuda::duration_compute, BCL::max<double>{}); double max_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, BCL::max<double>{}); double max_barrier = BCL::allreduce(BCL::cuda::duration_barrier, BCL::max<double>{}); double min_issue = BCL::allreduce(BCL::cuda::duration_issue, BCL::min<double>{}); double min_sync = BCL::allreduce(BCL::cuda::duration_sync, BCL::min<double>{}); double min_compute = BCL::allreduce(BCL::cuda::duration_compute, BCL::min<double>{}); double min_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, BCL::min<double>{}); double min_barrier = BCL::allreduce(BCL::cuda::duration_barrier, BCL::min<double>{}); BCL::cuda::duration_issue = BCL::allreduce(BCL::cuda::duration_issue, std::plus<double>{}); BCL::cuda::duration_sync = BCL::allreduce(BCL::cuda::duration_sync, std::plus<double>{}); BCL::cuda::duration_compute = BCL::allreduce(BCL::cuda::duration_compute, std::plus<double>{}); BCL::cuda::duration_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, std::plus<double>{}); BCL::cuda::duration_barrier = BCL::allreduce(BCL::cuda::duration_barrier, std::plus<double>{}); BCL::barrier(); fflush(stdout); BCL::barrier(); fprintf(stderr, "RANK(%lu) A has %lu nnz, B has %lu nnz, C has %lu nnz\n", BCL::rank(), a.my_nnzs(), b.my_nnzs(), c.my_nnzs()); BCL::barrier(); fflush(stderr); BCL::barrier(); if (BCL::rank() == 0) { printf("duration_issue %lf (%lf -> %lf)\n", BCL::cuda::duration_issue / BCL::nprocs(), min_issue, max_issue); printf("duration_sync %lf (%lf -> %lf)\n", BCL::cuda::duration_sync / BCL::nprocs(), min_sync, max_sync); printf("duration_compute %lf (%lf -> %lf)\n", BCL::cuda::duration_compute / BCL::nprocs(), min_compute, max_compute); printf("duration_accumulate %lf (%lf -> %lf)\n", BCL::cuda::duration_accumulate / BCL::nprocs(), min_accumulate, max_accumulate); printf("duration_barrier %lf (%lf -> %lf)\n", BCL::cuda::duration_barrier / BCL::nprocs(), min_barrier, max_barrier); } BCL::barrier(); fflush(stdout); BCL::barrier(); BCL::print("Matrix multiply finished in %lf s\n", duration); if (BCL::rank() == 0 && verify_result) { BCL::CSRMatrix<T, index_type> mat(fname); auto local_a = BCL::cuda::to_gpu<T, index_type, allocator_type>(mat); auto s_c = spgemm_cusparse(local_a, local_a); fprintf(stderr, "Getting COO...\n"); auto local_c = c.get().get_coo(); local_c = BCL::cuda::remove_zeros(local_c); auto s_c_coo = BCL::cuda::to_cpu(s_c).get_coo(); fprintf(stderr, "local_computation (%lu nnz), distributed result (%lu nnz)\n", s_c_coo.size(), local_c.size()); if (s_c_coo.size() != local_c.size()) { fprintf(stderr, "ERROR: number of nonzeros does not match.\n"); } else { fprintf(stderr, "Nonzeros match %lu == %lu\n", s_c_coo.size(), local_c.size()); } T eps = 1.0e-5; for (size_t i = 0; i < s_c_coo.size(); i++) { auto idx_a = std::get<0>(s_c_coo[i]); auto idx_b = std::get<0>(local_c[i]); auto val_a = std::get<1>(s_c_coo[i]); auto val_b = std::get<1>(local_c[i]); assert(idx_a == idx_b); if (!equal(val_a, val_b, eps)) { fprintf(stderr, "(%lu, %lu) == (%lu, %lu)\n", idx_a.first, idx_a.second, idx_b.first, idx_b.second); fprintf(stderr, "%f ~= %f\n", val_a, val_b); fflush(stderr); } assert(equal(val_a, val_b, eps)); // printf("(%lu, %lu) == (%lu, %lu)\n", idx_a.first, idx_a.second, // idx_b.first, idx_b.second); // printf("%f ~= %f\n", val_a, val_b); } printf("OK!\n"); } BCL::finalize(); return 0; }
554ca26d15991a331d1601089f726df31521dbf3.hip
// !!! This is a file automatically generated by hipify!!! /* multiply.cu */ #include <hip/hip_runtime.h> #include "mycuda.h" #include <stdio.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <math.h> using namespace std; __global__ void __multiply__ (double* a, double* b, double* c, int matrix_a_height, int matrix_a_width_matrix_b_height, int matrix_b_width, int offset, int mat_result_length) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; //I tried many solutions, but this is the only one that worked. It's not elegent, but it will have to do. //The var j keeps track of where on the array we are. Because we can't ensure that the kernel will be called the //Correct number of times as there are elements in the result matrix //The offset variable is at the halfpoint of the result matrix size //This ensures that the multiplication work will be done halfway on the first node //And the second half will be done on the second node if(j < mat_result_length/2 && offset == 0) { //printf ("TWO: %d is j, offset is%d\n", j, offset); for(int k = 0; k < matrix_a_width_matrix_b_height; k++) { c[i * matrix_b_width + j] += a[i * matrix_a_width_matrix_b_height + k] * b[k * matrix_b_width + j]; } } else if (j > offset && j < mat_result_length/2 + offset) { //printf ("ONE: %d is j, offset is%d\n", j, offset); for(int k = 0; k < matrix_a_width_matrix_b_height; k++) { c[i * matrix_b_width + j] += a[i * matrix_a_width_matrix_b_height + k] * b[k * matrix_b_width + j]; } } } void MatrixMultiplyCuda(double* mat_a, double* mat_b, double* mat_result, int matrix_a_height, int matrix_a_width_matrix_b_height, int matrix_b_width, int host_id) { hipError_t cudaStatus; double* mat_a_device; double* mat_b_device; double* mat_result_device; //figure out ideal thread/block numbers //I'm targeting 256 threads, because we found that to be optimal from assignment 4 int thread_number = 256; int block_number = 1; int mat_result_length = matrix_a_height*matrix_b_width; if(mat_result_length < thread_number) { thread_number = mat_result_length; } else if (mat_result_length > thread_number) { //get the ceiling of the division block_number = (mat_result_length + thread_number - 1)/thread_number; } //offset is the halfway point in the result matrix int offset = host_id * (mat_result_length)/2; hipMalloc((void**)&mat_a_device, sizeof(double)*matrix_a_height*matrix_a_width_matrix_b_height); hipMalloc((void**)&mat_b_device, sizeof(double)*matrix_a_width_matrix_b_height*matrix_b_width); hipMalloc((void**)&mat_result_device, sizeof(double)*mat_result_length); hipMemcpy(mat_a_device, mat_a, sizeof(double)*matrix_a_height*matrix_a_width_matrix_b_height, hipMemcpyHostToDevice); hipMemcpy(mat_b_device, mat_b, sizeof(double)*matrix_a_width_matrix_b_height*matrix_b_width, hipMemcpyHostToDevice); hipMemcpy(mat_result_device, mat_result, sizeof(double)*mat_result_length, hipMemcpyHostToDevice); hipLaunchKernelGGL(( __multiply__) , dim3(block_number), dim3(thread_number), 0, 0, mat_a_device, mat_b_device, mat_result_device, matrix_a_height, matrix_a_width_matrix_b_height, matrix_b_width, offset, mat_result_length); hipMemcpy(mat_result, mat_result_device, sizeof(double)*mat_result_length, hipMemcpyDeviceToHost); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); } hipFree(mat_a_device); hipFree(mat_b_device); hipFree(mat_result_device); cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); } /* ... Transfer data from GPU to CPU */ }
554ca26d15991a331d1601089f726df31521dbf3.cu
/* multiply.cu */ #include <cuda.h> #include "mycuda.h" #include <stdio.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <math.h> using namespace std; __global__ void __multiply__ (double* a, double* b, double* c, int matrix_a_height, int matrix_a_width_matrix_b_height, int matrix_b_width, int offset, int mat_result_length) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; //I tried many solutions, but this is the only one that worked. It's not elegent, but it will have to do. //The var j keeps track of where on the array we are. Because we can't ensure that the kernel will be called the //Correct number of times as there are elements in the result matrix //The offset variable is at the halfpoint of the result matrix size //This ensures that the multiplication work will be done halfway on the first node //And the second half will be done on the second node if(j < mat_result_length/2 && offset == 0) { //printf ("TWO: %d is j, offset is%d\n", j, offset); for(int k = 0; k < matrix_a_width_matrix_b_height; k++) { c[i * matrix_b_width + j] += a[i * matrix_a_width_matrix_b_height + k] * b[k * matrix_b_width + j]; } } else if (j > offset && j < mat_result_length/2 + offset) { //printf ("ONE: %d is j, offset is%d\n", j, offset); for(int k = 0; k < matrix_a_width_matrix_b_height; k++) { c[i * matrix_b_width + j] += a[i * matrix_a_width_matrix_b_height + k] * b[k * matrix_b_width + j]; } } } void MatrixMultiplyCuda(double* mat_a, double* mat_b, double* mat_result, int matrix_a_height, int matrix_a_width_matrix_b_height, int matrix_b_width, int host_id) { cudaError_t cudaStatus; double* mat_a_device; double* mat_b_device; double* mat_result_device; //figure out ideal thread/block numbers //I'm targeting 256 threads, because we found that to be optimal from assignment 4 int thread_number = 256; int block_number = 1; int mat_result_length = matrix_a_height*matrix_b_width; if(mat_result_length < thread_number) { thread_number = mat_result_length; } else if (mat_result_length > thread_number) { //get the ceiling of the division block_number = (mat_result_length + thread_number - 1)/thread_number; } //offset is the halfway point in the result matrix int offset = host_id * (mat_result_length)/2; cudaMalloc((void**)&mat_a_device, sizeof(double)*matrix_a_height*matrix_a_width_matrix_b_height); cudaMalloc((void**)&mat_b_device, sizeof(double)*matrix_a_width_matrix_b_height*matrix_b_width); cudaMalloc((void**)&mat_result_device, sizeof(double)*mat_result_length); cudaMemcpy(mat_a_device, mat_a, sizeof(double)*matrix_a_height*matrix_a_width_matrix_b_height, cudaMemcpyHostToDevice); cudaMemcpy(mat_b_device, mat_b, sizeof(double)*matrix_a_width_matrix_b_height*matrix_b_width, cudaMemcpyHostToDevice); cudaMemcpy(mat_result_device, mat_result, sizeof(double)*mat_result_length, cudaMemcpyHostToDevice); __multiply__ <<<block_number, thread_number>>> (mat_a_device, mat_b_device, mat_result_device, matrix_a_height, matrix_a_width_matrix_b_height, matrix_b_width, offset, mat_result_length); cudaMemcpy(mat_result, mat_result_device, sizeof(double)*mat_result_length, cudaMemcpyDeviceToHost); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); } cudaFree(mat_a_device); cudaFree(mat_b_device); cudaFree(mat_result_device); cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); } /* ... Transfer data from GPU to CPU */ }
16625cc9ea227eb4296f82050d64f53ae04f575d.hip
// !!! This is a file automatically generated by hipify!!! // --------------------------------------------------------------------------- // Unified Panoptic Segmentation Network // // Modifications Copyright (c) 2019 Uber Technologies, Inc. // --------------------------------------------------------------------------- /*! ******************* BEGIN Caffe Copyright Notice and Disclaimer **************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ******************** * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ #include <array> #include <cstdio> #include <cstdint> #include <cstring> #include <hip/hip_runtime.h> #include <rocblas.h> #include <hiprand/hiprand.h> #include <cfloat> #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } template <typename DType> __device__ DType deformable_im2col_bilinear(const DType *bottom_data, const int data_width, const int height, const int width, DType h, DType w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; DType lh = h - h_low; DType lw = w - w_low; DType hh = 1 - lh, hw = 1 - lw; DType v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; DType v2 = 0; if (h_low >=0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; DType v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; DType v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename DType> __device__ DType get_gradient_weight(DType argmax_h, DType argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; DType weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename DType> __device__ DType get_coordinate_weight(DType argmax_h, DType argmax_w, const int height, const int width, const DType *im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; DType weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } /*! * \brief deformable_im2col gpu kernel. * DO NOT call this directly. Use wrapper function im2col() instead; */ template <typename DType> __global__ void deformable_im2col_gpu_kernel(const int n, const DType *data_im, const DType *data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, DType *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; DType* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const DType* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const DType* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const DType* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const DType offset_h = data_offset_ptr[data_offset_h_ptr]; const DType offset_w = data_offset_ptr[data_offset_w_ptr]; DType val = static_cast<DType>(0); const DType h_im = h_in + i * dilation_h + offset_h; const DType w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const DType map_h = i * dilation_h + offset_h; //const DType map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } /*!\brief * cpu function of deformable_im2col algorithm * \param s device stream * \param data_im pointer of images (N, C, H, W, ...) in the image batch * \param data_offset pointer of offsets (N, deformable_group*kernel_h*kernel_w*2, H, W, ...) in the offset batch * \param im_shape input image shape in dimensions (N, C, H, W,) * \param col_shape column buffer shape (#channels, N, output_im_height, output_im_width, ...) * \param kernel_shape kernel filter shape * \param pad pad shape * \param stride stride shape * \param dilation dilation shape * \param deformable_group #offset group that deformable convolution use * \param data_col column buffer pointer */ void deformable_im2col_gpu_kernel_launcher(hipStream_t stream, const float *data_im, const float *data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, float *data_col) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; hipLaunchKernelGGL(( deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_im, data_offset, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deformable_im2col: %s\n", hipGetErrorString(err)); } } /*! * \brief deformable_col2im gpu kernel. * \brief DO NOT call this directly. Use wrapper function deformable_col2im() instead; */ template <typename DType> __global__ void deformable_col2im_gpu_kernel(const int n, const DType *data_col, const DType *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, DType *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const DType* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const DType offset_h = data_offset_ptr[data_offset_h_ptr]; const DType offset_w = data_offset_ptr[data_offset_w_ptr]; const DType cur_inv_h_data = h_in + i * dilation_h + offset_h; const DType cur_inv_w_data = w_in + j * dilation_w + offset_w; const DType cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1 ) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; DType weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } /*!\brief * gpu function of deformable_col2im algorithm * \param s device stream * \param data_col start pointer of the column buffer to be filled * \param data_offset pointer of offsets (N, deformable_group*kernel_h*kernel_w*2, H, W, ...) in the offset batch * \param im_shape input image shape in dimensions (N, C, H, W,) * \param col_shape column buffer shape * \param kernel_shape kernel filter shape * \param pad pad shape * \param stride stride shape * \param dilation dilation shape * \param deformable_group #offset group that deformable convolution use * \param grad_im pointer of images (N, C, H, W,...) in the image batch */ void deformable_col2im_gpu_kernel_launcher(hipStream_t stream, const float *data_col, const float *data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, float* grad_im) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; hipLaunchKernelGGL(( deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col, data_offset, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, deformable_group, height_col, width_col, grad_im); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deformable_col2im: %s\n", hipGetErrorString(err)); } } /*! * \brief deformable_col2im_coord gpu kernel. * \brief DO NOT call this directly. Use wrapper function deformable_col2im_coord() instead; */ template <typename DType> __global__ void deformable_col2im_coord_gpu_kernel(const int n, const DType *data_col, const DType *data_im, const DType *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, DType *grad_offset) { CUDA_KERNEL_LOOP(index, n) { DType val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const DType *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const DType *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const DType *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const DType offset_h = data_offset_ptr[data_offset_h_ptr]; const DType offset_w = data_offset_ptr[data_offset_w_ptr]; DType inv_h = h_in + i * dilation_h + offset_h; DType inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } const DType weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } /*!\brief * gpu function of deformable_col2im_coord algorithm * \param s device stream * \param data_col start pointer of the column buffer to be filled * \param data_im pointer of images (N, C, H, W, ...) in the image batch * \param data_offset pointer of offsets (N, deformable_group*kernel_h*kernel_w*2, H, W, ...) in the offset batch * \param im_shape input image shape in dimensions (N, C, H, W,) * \param col_shape column buffer shape * \param kernel_shape kernel filter shape * \param pad pad shape * \param stride stride shape * \param dilation dilation shape * \param deformable_group #offset group that deformable convolution use * \param grad_offset pointer of the offsets (N, deformable_group*kernel_h*kernel_w*2, H, W,...) in the offset batch */ void deformable_col2im_coord_gpu_kernel_launcher(hipStream_t stream, const float *data_col, const float *data_im, const float *data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, float *grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; // index_t num_spatial_axes = kernel_shape.ndim(); // index_t num_kernels = col_shape[1] * col_shape[2] * col_shape[3] * 2 * kernel_shape[0] * kernel_shape[1] * deformable_group; // index_t channel_per_deformable_group = col_shape[0] / deformable_group; // num_axes should be smaller than block size // CHECK_LT(num_spatial_axes, mshadow::cuda::kBaseThreadNum); // using namespace mxnet_op; // switch (num_spatial_axes) { // case 2: // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) // deformable_col2im_coord_gpu_kernel<DType> << <cuda_get_num_blocks(num_kernels), mshadow::cuda::kBaseThreadNum, // 0, mshadow::Stream<gpu>::GetStream(s) >> >( // num_kernels, data_col, data_im, data_offset, im_shape[1], im_shape[2], im_shape[3], // kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1], // dilation[0], dilation[1], channel_per_deformable_group, // col_shape[1], 2 * kernel_shape[0] * kernel_shape[1] * deformable_group, deformable_group, col_shape[2], col_shape[3], grad_offset, req); // MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_col2im_coord_gpu_kernel); // break; // default: // LOG(FATAL) << "col2im_nd_gpu does not support computation with " // << num_spatial_axes << " spatial axes"; hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col, data_im, data_offset, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, height_col, width_col, grad_offset); } // template void // deformable_im2col_gpu_kernel_launcher(hipStream_t stream, // const float *data_im, const float *data_offset, const int channels, // const int height, const int width, const int ksize_h, const int ksize_w, // const int pad_h, const int pad_w, const int stride_h, const int stride_w, // const int dilation_h, const int dilation_w, const int parallel_imgs, // const int deformable_group, float *data_col); // template void // deformable_col2im_gpu_kernel_launcher(hipStream_t stream, // const float *data_col, const float *data_offset, const int channels, // const int height, const int width, const int ksize_h, // const int ksize_w, const int pad_h, const int pad_w, // const int stride_h, const int stride_w, // const int dilation_h, const int dilation_w, // const int parallel_imgs, const int deformable_group, // float* grad_im); // template void // deformable_col2im_coord_gpu_kernel_launcher(hipStream_t stream, const float *data_col, // const float *data_im, const float *data_offset, // const int channels, const int height, const int width, // const int ksize_h, const int ksize_w, const int pad_h, // const int pad_w, const int stride_h, const int stride_w, // const int dilation_h, const int dilation_w, const int parallel_imgs, // const int deformable_group, float *grad_offset);
16625cc9ea227eb4296f82050d64f53ae04f575d.cu
// --------------------------------------------------------------------------- // Unified Panoptic Segmentation Network // // Modifications Copyright (c) 2019 Uber Technologies, Inc. // --------------------------------------------------------------------------- /*! ******************* BEGIN Caffe Copyright Notice and Disclaimer **************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ******************** * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ #include <array> #include <cstdio> #include <cstdint> #include <cstring> #include <cuda_runtime.h> #include <cublas_v2.h> #include <curand.h> #include <cfloat> #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } template <typename DType> __device__ DType deformable_im2col_bilinear(const DType *bottom_data, const int data_width, const int height, const int width, DType h, DType w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; DType lh = h - h_low; DType lw = w - w_low; DType hh = 1 - lh, hw = 1 - lw; DType v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; DType v2 = 0; if (h_low >=0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; DType v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; DType v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename DType> __device__ DType get_gradient_weight(DType argmax_h, DType argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; DType weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename DType> __device__ DType get_coordinate_weight(DType argmax_h, DType argmax_w, const int height, const int width, const DType *im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; DType weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } /*! * \brief deformable_im2col gpu kernel. * DO NOT call this directly. Use wrapper function im2col() instead; */ template <typename DType> __global__ void deformable_im2col_gpu_kernel(const int n, const DType *data_im, const DType *data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, DType *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; DType* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const DType* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const DType* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const DType* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const DType offset_h = data_offset_ptr[data_offset_h_ptr]; const DType offset_w = data_offset_ptr[data_offset_w_ptr]; DType val = static_cast<DType>(0); const DType h_im = h_in + i * dilation_h + offset_h; const DType w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const DType map_h = i * dilation_h + offset_h; //const DType map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } /*!\brief * cpu function of deformable_im2col algorithm * \param s device stream * \param data_im pointer of images (N, C, H, W, ...) in the image batch * \param data_offset pointer of offsets (N, deformable_group*kernel_h*kernel_w*2, H, W, ...) in the offset batch * \param im_shape input image shape in dimensions (N, C, H, W,) * \param col_shape column buffer shape (#channels, N, output_im_height, output_im_width, ...) * \param kernel_shape kernel filter shape * \param pad pad shape * \param stride stride shape * \param dilation dilation shape * \param deformable_group #offset group that deformable convolution use * \param data_col column buffer pointer */ void deformable_im2col_gpu_kernel_launcher(cudaStream_t stream, const float *data_im, const float *data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, float *data_col) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_im, data_offset, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); } } /*! * \brief deformable_col2im gpu kernel. * \brief DO NOT call this directly. Use wrapper function deformable_col2im() instead; */ template <typename DType> __global__ void deformable_col2im_gpu_kernel(const int n, const DType *data_col, const DType *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, DType *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const DType* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const DType offset_h = data_offset_ptr[data_offset_h_ptr]; const DType offset_w = data_offset_ptr[data_offset_w_ptr]; const DType cur_inv_h_data = h_in + i * dilation_h + offset_h; const DType cur_inv_w_data = w_in + j * dilation_w + offset_w; const DType cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1 ) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; DType weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } /*!\brief * gpu function of deformable_col2im algorithm * \param s device stream * \param data_col start pointer of the column buffer to be filled * \param data_offset pointer of offsets (N, deformable_group*kernel_h*kernel_w*2, H, W, ...) in the offset batch * \param im_shape input image shape in dimensions (N, C, H, W,) * \param col_shape column buffer shape * \param kernel_shape kernel filter shape * \param pad pad shape * \param stride stride shape * \param dilation dilation shape * \param deformable_group #offset group that deformable convolution use * \param grad_im pointer of images (N, C, H, W,...) in the image batch */ void deformable_col2im_gpu_kernel_launcher(cudaStream_t stream, const float *data_col, const float *data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, float* grad_im) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col, data_offset, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, deformable_group, height_col, width_col, grad_im); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deformable_col2im: %s\n", cudaGetErrorString(err)); } } /*! * \brief deformable_col2im_coord gpu kernel. * \brief DO NOT call this directly. Use wrapper function deformable_col2im_coord() instead; */ template <typename DType> __global__ void deformable_col2im_coord_gpu_kernel(const int n, const DType *data_col, const DType *data_im, const DType *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, DType *grad_offset) { CUDA_KERNEL_LOOP(index, n) { DType val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const DType *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const DType *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const DType *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const DType offset_h = data_offset_ptr[data_offset_h_ptr]; const DType offset_w = data_offset_ptr[data_offset_w_ptr]; DType inv_h = h_in + i * dilation_h + offset_h; DType inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } const DType weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } /*!\brief * gpu function of deformable_col2im_coord algorithm * \param s device stream * \param data_col start pointer of the column buffer to be filled * \param data_im pointer of images (N, C, H, W, ...) in the image batch * \param data_offset pointer of offsets (N, deformable_group*kernel_h*kernel_w*2, H, W, ...) in the offset batch * \param im_shape input image shape in dimensions (N, C, H, W,) * \param col_shape column buffer shape * \param kernel_shape kernel filter shape * \param pad pad shape * \param stride stride shape * \param dilation dilation shape * \param deformable_group #offset group that deformable convolution use * \param grad_offset pointer of the offsets (N, deformable_group*kernel_h*kernel_w*2, H, W,...) in the offset batch */ void deformable_col2im_coord_gpu_kernel_launcher(cudaStream_t stream, const float *data_col, const float *data_im, const float *data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, float *grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; // index_t num_spatial_axes = kernel_shape.ndim(); // index_t num_kernels = col_shape[1] * col_shape[2] * col_shape[3] * 2 * kernel_shape[0] * kernel_shape[1] * deformable_group; // index_t channel_per_deformable_group = col_shape[0] / deformable_group; // num_axes should be smaller than block size // CHECK_LT(num_spatial_axes, mshadow::cuda::kBaseThreadNum); // using namespace mxnet_op; // switch (num_spatial_axes) { // case 2: // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) // deformable_col2im_coord_gpu_kernel<DType> << <cuda_get_num_blocks(num_kernels), mshadow::cuda::kBaseThreadNum, // 0, mshadow::Stream<gpu>::GetStream(s) >> >( // num_kernels, data_col, data_im, data_offset, im_shape[1], im_shape[2], im_shape[3], // kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1], // dilation[0], dilation[1], channel_per_deformable_group, // col_shape[1], 2 * kernel_shape[0] * kernel_shape[1] * deformable_group, deformable_group, col_shape[2], col_shape[3], grad_offset, req); // MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_col2im_coord_gpu_kernel); // break; // default: // LOG(FATAL) << "col2im_nd_gpu does not support computation with " // << num_spatial_axes << " spatial axes"; deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col, data_im, data_offset, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, height_col, width_col, grad_offset); } // template void // deformable_im2col_gpu_kernel_launcher(cudaStream_t stream, // const float *data_im, const float *data_offset, const int channels, // const int height, const int width, const int ksize_h, const int ksize_w, // const int pad_h, const int pad_w, const int stride_h, const int stride_w, // const int dilation_h, const int dilation_w, const int parallel_imgs, // const int deformable_group, float *data_col); // template void // deformable_col2im_gpu_kernel_launcher(cudaStream_t stream, // const float *data_col, const float *data_offset, const int channels, // const int height, const int width, const int ksize_h, // const int ksize_w, const int pad_h, const int pad_w, // const int stride_h, const int stride_w, // const int dilation_h, const int dilation_w, // const int parallel_imgs, const int deformable_group, // float* grad_im); // template void // deformable_col2im_coord_gpu_kernel_launcher(cudaStream_t stream, const float *data_col, // const float *data_im, const float *data_offset, // const int channels, const int height, const int width, // const int ksize_h, const int ksize_w, const int pad_h, // const int pad_w, const int stride_h, const int stride_w, // const int dilation_h, const int dilation_w, const int parallel_imgs, // const int deformable_group, float *grad_offset);
57a41d8581fb165cbdaa6c599f211b8cd2b089ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "image_blur.h" #include "helpers.h" #include <iostream> #include <cmath> __global__ void blur_cuda(unsigned char* input_image, unsigned char* output_image, int width, int height); void image_blur_cuda(unsigned char* Input_Image, int Height, int Width, int Channels) { unsigned char* Dev_Input_Image = NULL; unsigned char* Dev_Output_Image = NULL; getError(hipMalloc((void**)&Dev_Input_Image, Width * Height * 3 * sizeof(unsigned char))); getError(hipMemcpy(Dev_Input_Image, Input_Image, Width * Height * 3 * sizeof(unsigned char), hipMemcpyHostToDevice)); getError(hipMalloc((void**)&Dev_Output_Image, Width * Height * 3 * sizeof(unsigned char))); dim3 blockDims(512, 1, 1); dim3 gridDims((unsigned int)ceil((double)(Width * Height * 3 / blockDims.x)), 1, 1); blur_cuda << <gridDims, blockDims >> > (Dev_Input_Image, Dev_Output_Image,Width,Height); //copy processed data back to cpu from gpu getError(hipMemcpy(Input_Image, Dev_Output_Image, Width * Height * 3 * sizeof(unsigned char), hipMemcpyDeviceToHost)); getError(hipFree(Dev_Input_Image)); getError(hipFree(Dev_Output_Image)); //free gpu mempry } __global__ void blur_cuda(unsigned char* input_image, unsigned char* output_image, int Width, int Height) { const unsigned int offset = blockIdx.x * blockDim.x + threadIdx.x; int x = offset % Width; int y = (offset - x) / Width; int fsize = 5; // Filter size if (offset < Width * Height) { float output_red = 0; float output_green = 0; float output_blue = 0; int hits = 0; for (int ox = -fsize; ox < fsize + 1; ++ox) { for (int oy = -fsize; oy < fsize + 1; ++oy) { if ((x + ox) > -1 && (x + ox) < Width && (y + oy) > -1 && (y + oy) < Height) { const int currentoffset = (offset + ox + oy * Width) * 3; output_red += input_image[currentoffset]; output_green += input_image[currentoffset + 1]; output_blue += input_image[currentoffset + 2]; hits++; } } } output_image[offset * 3] = output_red / hits; output_image[offset * 3 + 1] = output_green / hits; output_image[offset * 3 + 2] = output_blue / hits; } }
57a41d8581fb165cbdaa6c599f211b8cd2b089ff.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "image_blur.h" #include "helpers.h" #include <iostream> #include <cmath> __global__ void blur_cuda(unsigned char* input_image, unsigned char* output_image, int width, int height); void image_blur_cuda(unsigned char* Input_Image, int Height, int Width, int Channels) { unsigned char* Dev_Input_Image = NULL; unsigned char* Dev_Output_Image = NULL; getError(cudaMalloc((void**)&Dev_Input_Image, Width * Height * 3 * sizeof(unsigned char))); getError(cudaMemcpy(Dev_Input_Image, Input_Image, Width * Height * 3 * sizeof(unsigned char), cudaMemcpyHostToDevice)); getError(cudaMalloc((void**)&Dev_Output_Image, Width * Height * 3 * sizeof(unsigned char))); dim3 blockDims(512, 1, 1); dim3 gridDims((unsigned int)ceil((double)(Width * Height * 3 / blockDims.x)), 1, 1); blur_cuda << <gridDims, blockDims >> > (Dev_Input_Image, Dev_Output_Image,Width,Height); //copy processed data back to cpu from gpu getError(cudaMemcpy(Input_Image, Dev_Output_Image, Width * Height * 3 * sizeof(unsigned char), cudaMemcpyDeviceToHost)); getError(cudaFree(Dev_Input_Image)); getError(cudaFree(Dev_Output_Image)); //free gpu mempry } __global__ void blur_cuda(unsigned char* input_image, unsigned char* output_image, int Width, int Height) { const unsigned int offset = blockIdx.x * blockDim.x + threadIdx.x; int x = offset % Width; int y = (offset - x) / Width; int fsize = 5; // Filter size if (offset < Width * Height) { float output_red = 0; float output_green = 0; float output_blue = 0; int hits = 0; for (int ox = -fsize; ox < fsize + 1; ++ox) { for (int oy = -fsize; oy < fsize + 1; ++oy) { if ((x + ox) > -1 && (x + ox) < Width && (y + oy) > -1 && (y + oy) < Height) { const int currentoffset = (offset + ox + oy * Width) * 3; output_red += input_image[currentoffset]; output_green += input_image[currentoffset + 1]; output_blue += input_image[currentoffset + 2]; hits++; } } } output_image[offset * 3] = output_red / hits; output_image[offset * 3 + 1] = output_green / hits; output_image[offset * 3 + 2] = output_blue / hits; } }
4dcb35612a7f1f741a03ef5624468648765e8906.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CUAPI.h" #include "CUFLU.h" #ifdef GRAVITY #include "CUPOT.h" #endif #ifdef GPU // fluid solver prototypes in different models #if ( MODEL == HYDRO ) #if ( FLU_SCHEME == RTVD ) __global__ void CUFLU_FluidSolver_RTVD( real g_Fluid_In [] [NCOMP_TOTAL][ FLU_NXT*FLU_NXT*FLU_NXT ], real g_Fluid_Out[] [NCOMP_TOTAL][ PS2*PS2*PS2 ], real g_Flux [][9][NCOMP_TOTAL][ PS2*PS2 ], const double g_Corner[][3], const real g_Pot_USG[][ USG_NXT_F*USG_NXT_F*USG_NXT_F ], const real dt, const real _dh, const real Gamma, const bool StoreFlux, const bool XYZ, const real MinDens, const real MinPres ); #elif ( FLU_SCHEME == WAF ) __global__ void CUFLU_FluidSolver_WAF( real g_Fluid_In [] [NCOMP_TOTAL][ FLU_NXT*FLU_NXT*FLU_NXT ], real g_Fluid_Out[] [NCOMP_TOTAL][ PS2*PS2*PS2 ], real g_Flux [][9][NCOMP_TOTAL][ PS2*PS2 ], const double g_Corner[][3], const real g_Pot_USG[][ USG_NXT_F*USG_NXT_F*USG_NXT_F ], const real dt, const real _dh, const real Gamma, const bool StoreFlux, const bool XYZ, const WAF_Limiter_t WAF_Limiter, const real MinDens, const real MinPres ); #elif ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP ) __global__ void CUFLU_FluidSolver_MHM( const real g_Fluid_In[] [NCOMP_TOTAL][ FLU_NXT*FLU_NXT*FLU_NXT ], real g_Fluid_Out [] [NCOMP_TOTAL][ PS2*PS2*PS2 ], char g_DE_Out [] [ PS2*PS2*PS2 ], real g_Flux [][9][NCOMP_TOTAL][ PS2*PS2 ], const double g_Corner[][3], const real g_Pot_USG[] [ USG_NXT_F*USG_NXT_F*USG_NXT_F ], real g_PriVar [][NCOMP_TOTAL][ FLU_NXT*FLU_NXT*FLU_NXT ], real g_Slope_PPM_x[][NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM], real g_Slope_PPM_y[][NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM], real g_Slope_PPM_z[][NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM], real g_FC_Var_xL [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_xR [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_yL [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_yR [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_zL [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_zR [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Flux_x [][NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ], real g_FC_Flux_y [][NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ], real g_FC_Flux_z [][NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ], const real dt, const real _dh, const real Gamma, const bool StoreFlux, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const real EP_Coeff, const double Time, const OptGravityType_t GravityType, const real MinDens, const real MinPres, const real DualEnergySwitch, const bool NormPassive, const int NNorm, const bool JeansMinPres, const real JeansMinPres_Coeff ); #if ( NCOMP_PASSIVE > 0 ) int CUFLU_FluidSolver_SetConstMem_NormIdx( int NormIdx_h[] ); #endif #elif ( FLU_SCHEME == CTU ) __global__ void CUFLU_FluidSolver_CTU( const real g_Fluid_In[] [NCOMP_TOTAL][ FLU_NXT*FLU_NXT*FLU_NXT ], real g_Fluid_Out [] [NCOMP_TOTAL][ PS2*PS2*PS2 ], char g_DE_Out [] [ PS2*PS2*PS2 ], real g_Flux [][9][NCOMP_TOTAL][ PS2*PS2 ], const double g_Corner[][3], const real g_Pot_USG[] [ USG_NXT_F*USG_NXT_F*USG_NXT_F ], real g_PriVar [][NCOMP_TOTAL][ FLU_NXT*FLU_NXT*FLU_NXT ], real g_Slope_PPM_x[][NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM], real g_Slope_PPM_y[][NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM], real g_Slope_PPM_z[][NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM], real g_FC_Var_xL [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_xR [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_yL [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_yR [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_zL [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_zR [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Flux_x [][NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ], real g_FC_Flux_y [][NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ], real g_FC_Flux_z [][NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ], const real dt, const real _dh, const real Gamma, const bool StoreFlux, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const real EP_Coeff, const double Time, const OptGravityType_t GravityType, const real MinDens, const real MinPres, const real DualEnergySwitch, const bool NormPassive, const int NNorm, const bool JeansMinPres, const real JeansMinPres_Coeff ); #if ( NCOMP_PASSIVE > 0 ) int CUFLU_FluidSolver_SetConstMem_NormIdx( int NormIdx_h[] ); #endif #endif // FLU_SCHEME __global__ void CUFLU_dtSolver_HydroCFL( real g_dt_Array[], const real g_Flu_Array[][NCOMP_FLUID][ CUBE(PS1) ], const double g_Corner_Array[][3], const real dh, const real Safety, const real Gamma, const real MinPres ); #ifdef GRAVITY __global__ void CUPOT_dtSolver_HydroGravity( real g_dt_Array[], const real g_Pot_Array[][ CUBE(GRA_NXT) ], const double g_Corner_Array[][3], const real dh, const real Safety, const bool P5_Gradient, const OptGravityType_t GravityType, const double ExtAcc_Time ); #endif #elif ( MODEL == MHD ) #warning : WAIT MHD !!! #elif ( MODEL == ELBDM ) __global__ void CUFLU_ELBDMSolver( real g_Fluid_In [][FLU_NIN ][ FLU_NXT*FLU_NXT*FLU_NXT ], real g_Fluid_Out[][FLU_NOUT][ PS2*PS2*PS2 ], real g_Flux [][9][NFLUX_TOTAL][ PS2*PS2 ], const real dt, const real _dh, const real Eta, const bool StoreFlux, const real Taylor3_Coeff, const bool XYZ, const real MinDens ); #else #error : ERROR : unsupported MODEL !! #endif // MODEL #ifdef GRAVITY // Poisson solver prototypes #if ( POT_SCHEME == SOR ) #ifdef USE_PSOLVER_10TO14 __global__ void CUPOT_PoissonSolver_SOR_10to14cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #else __global__ void CUPOT_PoissonSolver_SOR_16to18cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #endif // #ifdef USE_PSOLVER_10TO14 ... else ... #elif ( POT_SCHEME == MG ) __global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const real dh_Min, const int Max_Iter, const int NPre_Smooth, const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff, const IntScheme_t IntScheme ); #endif // POT_SCHEME // Gravity solver prototypes in different models #if ( MODEL == HYDRO ) __global__ void CUPOT_HydroGravitySolver( real g_Flu_Array_New[][GRA_NIN][ PS1*PS1*PS1 ], const real g_Pot_Array_New[][ GRA_NXT*GRA_NXT*GRA_NXT ], const double g_Corner_Array[][3], const real g_Pot_Array_USG[][ USG_NXT_G*USG_NXT_G*USG_NXT_G ], const real g_Flu_Array_USG[][GRA_NIN-1][ PS1*PS1*PS1 ], char g_DE_Array[][ PS1*PS1*PS1 ], const real Gra_Const, const bool P5_Gradient, const OptGravityType_t GravityType, const double TimeNew, const double TimeOld, const real dt, const real dh, const real MinEint ); #elif ( MODEL == MHD ) #warning :: WAIT MHD !!! #elif ( MODEL == ELBDM ) __global__ void CUPOT_ELBDMGravitySolver( real g_Flu_Array[][GRA_NIN][ PS1*PS1*PS1 ], const real g_Pot_Array[][ GRA_NXT*GRA_NXT*GRA_NXT ], const double g_Corner_Array[][3], const real EtaDt, const real dh, const real Lambda, const bool ExtPot, const double Time ); #else #error : ERROR : unsupported MODEL !! #endif // MODEL int CUPOT_PoissonSolver_SetConstMem(); #endif // GRAVITY //------------------------------------------------------------------------------------------------------- // Function : CUAPI_Set_Default_GPU_Parameter // Description : Set several GPU parameters to the default values if they are not set in the input file // // Parameter : GPU_NStream : Number of streams for the asynchronous memory copy in GPU // Flu_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the fluid solver // Pot_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the Poisson solver // Che_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the Grackle solver // dt_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the dt solver //------------------------------------------------------------------------------------------------------- void CUAPI_Set_Default_GPU_Parameter( int &GPU_NStream, int &Flu_GPU_NPGroup, int &Pot_GPU_NPGroup, int &Che_GPU_NPGroup, int &dt_GPU_NPGroup ) { if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ...\n", __FUNCTION__ ); // get the device ID int GetDeviceID = 999; CUDA_CHECK_ERROR( hipGetDevice( &GetDeviceID ) ); // load the device properties hipDeviceProp_t DeviceProp; CUDA_CHECK_ERROR( hipGetDeviceProperties( &DeviceProp, GetDeviceID ) ); // set the default GPU parameters // (1) GPU_NSTREAM if ( GPU_NStream <= 0 ) { if ( DeviceProp.deviceOverlap ) { # if ( MODEL == HYDRO ) # if ( GPU_ARCH == FERMI ) GPU_NStream = 8; # elif ( GPU_ARCH == KEPLER ) GPU_NStream = 32; # elif ( GPU_ARCH == MAXWELL ) GPU_NStream = 32; # elif ( GPU_ARCH == PASCAL ) GPU_NStream = 32; # elif ( GPU_ARCH == VOLTA ) GPU_NStream = 32; # else # error : UNKNOWN GPU_ARCH !! # endif # elif ( MODEL == MHD ) # warning :: WAIT MHD !!! # elif ( MODEL == ELBDM ) # if ( GPU_ARCH == FERMI ) GPU_NStream = 8; # elif ( GPU_ARCH == KEPLER ) GPU_NStream = 32; # elif ( GPU_ARCH == MAXWELL ) GPU_NStream = 32; # elif ( GPU_ARCH == PASCAL ) GPU_NStream = 32; # elif ( GPU_ARCH == VOLTA ) GPU_NStream = 32; # else # error : ERROR : UNKNOWN GPU_ARCH !! # endif # else # error : ERROR : UNKNOWN MODEL !! # endif // MODEL } // if ( DeviceProp.deviceOverlap ) else GPU_NStream = 1; if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "GPU_NSTREAM", GPU_NSTREAM ); } // if ( GPU_NStream <= 0 ) // (2) XXX_GPU_NPGROUP // (2-1) FLU_GPU_NPGROUP if ( Flu_GPU_NPGroup <= 0 ) { # if ( MODEL == HYDRO ) # if ( GPU_ARCH == FERMI ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif # elif ( MODEL == MHD ) # warning :: WAIT MHD !!! # elif ( MODEL == ELBDM ) # if ( GPU_ARCH == FERMI ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif # else # error : ERROR : UNKNOWN MODEL !! # endif // MODEL if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "FLU_GPU_NPGROUP", Flu_GPU_NPGroup ); } // if ( Flu_GPU_NPGroup <= 0 ) // (2-2) POT_GPU_NPGROUP # ifdef GRAVITY if ( Pot_GPU_NPGroup <= 0 ) { # if ( GPU_ARCH == FERMI ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "POT_GPU_NPGROUP", Pot_GPU_NPGroup ); } // if ( Pot_GPU_NPGroup <= 0 ) # endif // (2-3) CHE_GPU_NPGROUP # ifdef SUPPORT_GRACKLE if ( Che_GPU_NPGroup <= 0 ) { # if ( GPU_ARCH == FERMI ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "CHE_GPU_NPGROUP", Che_GPU_NPGroup ); } // if ( Che_GPU_NPGroup <= 0 ) # endif // (2-4) DT_GPU_NPGROUP if ( dt_GPU_NPGroup <= 0 ) { # if ( GPU_ARCH == FERMI ) dt_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; // not optimized yet # elif ( GPU_ARCH == KEPLER ) dt_GPU_NPGroup = 32*DeviceProp.multiProcessorCount; // not optimized yet # elif ( GPU_ARCH == MAXWELL ) dt_GPU_NPGroup = 32*DeviceProp.multiProcessorCount; // not optimized yet # elif ( GPU_ARCH == PASCAL ) dt_GPU_NPGroup = 32*DeviceProp.multiProcessorCount; // not optimized yet # else # error : UNKNOWN GPU_ARCH !! # endif if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d\n", "DT_GPU_NPGROUP", dt_GPU_NPGroup ); } // if ( dt_GPU_NPGroup <= 0 ) // (3) cache preference // (3-1) fluid solver # if ( MODEL == HYDRO ) # if ( FLU_SCHEME == RTVD ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_FluidSolver_RTVD, hipFuncCachePreferShared ) ); # elif ( FLU_SCHEME == WAF ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_FluidSolver_WAF, hipFuncCachePreferShared ) ); # elif ( FLU_SCHEME == MHM ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_FluidSolver_MHM, hipFuncCachePreferL1 ) ); # elif ( FLU_SCHEME == MHM_RP ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_FluidSolver_MHM, hipFuncCachePreferL1 ) ); # elif ( FLU_SCHEME == CTU ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_FluidSolver_CTU, hipFuncCachePreferL1 ) ); # endif CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_dtSolver_HydroCFL, hipFuncCachePreferShared ) ); # ifdef GRAVITY CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_dtSolver_HydroGravity, hipFuncCachePreferShared ) ); # endif # elif ( MODEL == MHD ) # warning :: WAIT MHD !!! # elif ( MODEL == ELBDM ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUFLU_ELBDMSolver, hipFuncCachePreferShared ) ); # else # error : ERROR : unsupported MODEL !! # endif // MODEL # ifdef GRAVITY // (3-2) Poisson solver # if ( POT_SCHEME == SOR ) # ifdef USE_PSOLVER_10TO14 CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_PoissonSolver_SOR_10to14cube, hipFuncCachePreferShared ) ); # else CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_PoissonSolver_SOR_16to18cube, hipFuncCachePreferShared ) ); # endif # elif ( POT_SCHEME == MG ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_PoissonSolver_MG, hipFuncCachePreferShared ) ); # endif // POT_SCHEME // (3-3) gravity solver # if ( MODEL == HYDRO ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_HydroGravitySolver, hipFuncCachePreferShared ) ); # elif ( MODEL == MHD ) # warning : WAIT MHD !!! # elif ( MODEL == ELBDM ) CUDA_CHECK_ERROR( hipFuncSetCacheConfig( CUPOT_ELBDMGravitySolver, hipFuncCachePreferL1 ) ); # else # error : ERROR : unsupported MODEL !! # endif // MODEL # endif // GRAVITY // (4) set the constant variables // --> note that the auxiliary arrays for the external acceleration and potential are set by CUAPI_Init_ExternalAccPot() # if ( NCOMP_PASSIVE > 0 ) if ( OPT__NORMALIZE_PASSIVE ) { # if ( MODEL == HYDRO && ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) ) if ( CUFLU_FluidSolver_SetConstMem_NormIdx(PassiveNorm_VarIdx) != 0 ) Aux_Error( ERROR_INFO, "CUFLU_FluidSolver_SetConstMem_NormIdx failed ...\n" ); # elif ( MODEL == MHD ) # warning : WAIT MHD !!! # endif // MODEL } # endif // #if ( NCOMP_PASSIVE > 0 ) # ifdef GRAVITY if ( CUPOT_PoissonSolver_SetConstMem() != 0 ) Aux_Error( ERROR_INFO, "CUPOT_PoissonSolver_SetConstMem failed ...\n" ); # endif // #ifdef GRAVITY if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ... done\n", __FUNCTION__ ); } // FUNCTION : CUAPI_Set_Default_GPU_Parameter #endif // #ifdef GPU
4dcb35612a7f1f741a03ef5624468648765e8906.cu
#include "CUAPI.h" #include "CUFLU.h" #ifdef GRAVITY #include "CUPOT.h" #endif #ifdef GPU // fluid solver prototypes in different models #if ( MODEL == HYDRO ) #if ( FLU_SCHEME == RTVD ) __global__ void CUFLU_FluidSolver_RTVD( real g_Fluid_In [] [NCOMP_TOTAL][ FLU_NXT*FLU_NXT*FLU_NXT ], real g_Fluid_Out[] [NCOMP_TOTAL][ PS2*PS2*PS2 ], real g_Flux [][9][NCOMP_TOTAL][ PS2*PS2 ], const double g_Corner[][3], const real g_Pot_USG[][ USG_NXT_F*USG_NXT_F*USG_NXT_F ], const real dt, const real _dh, const real Gamma, const bool StoreFlux, const bool XYZ, const real MinDens, const real MinPres ); #elif ( FLU_SCHEME == WAF ) __global__ void CUFLU_FluidSolver_WAF( real g_Fluid_In [] [NCOMP_TOTAL][ FLU_NXT*FLU_NXT*FLU_NXT ], real g_Fluid_Out[] [NCOMP_TOTAL][ PS2*PS2*PS2 ], real g_Flux [][9][NCOMP_TOTAL][ PS2*PS2 ], const double g_Corner[][3], const real g_Pot_USG[][ USG_NXT_F*USG_NXT_F*USG_NXT_F ], const real dt, const real _dh, const real Gamma, const bool StoreFlux, const bool XYZ, const WAF_Limiter_t WAF_Limiter, const real MinDens, const real MinPres ); #elif ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP ) __global__ void CUFLU_FluidSolver_MHM( const real g_Fluid_In[] [NCOMP_TOTAL][ FLU_NXT*FLU_NXT*FLU_NXT ], real g_Fluid_Out [] [NCOMP_TOTAL][ PS2*PS2*PS2 ], char g_DE_Out [] [ PS2*PS2*PS2 ], real g_Flux [][9][NCOMP_TOTAL][ PS2*PS2 ], const double g_Corner[][3], const real g_Pot_USG[] [ USG_NXT_F*USG_NXT_F*USG_NXT_F ], real g_PriVar [][NCOMP_TOTAL][ FLU_NXT*FLU_NXT*FLU_NXT ], real g_Slope_PPM_x[][NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM], real g_Slope_PPM_y[][NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM], real g_Slope_PPM_z[][NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM], real g_FC_Var_xL [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_xR [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_yL [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_yR [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_zL [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_zR [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Flux_x [][NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ], real g_FC_Flux_y [][NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ], real g_FC_Flux_z [][NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ], const real dt, const real _dh, const real Gamma, const bool StoreFlux, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const real EP_Coeff, const double Time, const OptGravityType_t GravityType, const real MinDens, const real MinPres, const real DualEnergySwitch, const bool NormPassive, const int NNorm, const bool JeansMinPres, const real JeansMinPres_Coeff ); #if ( NCOMP_PASSIVE > 0 ) int CUFLU_FluidSolver_SetConstMem_NormIdx( int NormIdx_h[] ); #endif #elif ( FLU_SCHEME == CTU ) __global__ void CUFLU_FluidSolver_CTU( const real g_Fluid_In[] [NCOMP_TOTAL][ FLU_NXT*FLU_NXT*FLU_NXT ], real g_Fluid_Out [] [NCOMP_TOTAL][ PS2*PS2*PS2 ], char g_DE_Out [] [ PS2*PS2*PS2 ], real g_Flux [][9][NCOMP_TOTAL][ PS2*PS2 ], const double g_Corner[][3], const real g_Pot_USG[] [ USG_NXT_F*USG_NXT_F*USG_NXT_F ], real g_PriVar [][NCOMP_TOTAL][ FLU_NXT*FLU_NXT*FLU_NXT ], real g_Slope_PPM_x[][NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM], real g_Slope_PPM_y[][NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM], real g_Slope_PPM_z[][NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM], real g_FC_Var_xL [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_xR [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_yL [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_yR [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_zL [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Var_zR [][NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ], real g_FC_Flux_x [][NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ], real g_FC_Flux_y [][NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ], real g_FC_Flux_z [][NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ], const real dt, const real _dh, const real Gamma, const bool StoreFlux, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const real EP_Coeff, const double Time, const OptGravityType_t GravityType, const real MinDens, const real MinPres, const real DualEnergySwitch, const bool NormPassive, const int NNorm, const bool JeansMinPres, const real JeansMinPres_Coeff ); #if ( NCOMP_PASSIVE > 0 ) int CUFLU_FluidSolver_SetConstMem_NormIdx( int NormIdx_h[] ); #endif #endif // FLU_SCHEME __global__ void CUFLU_dtSolver_HydroCFL( real g_dt_Array[], const real g_Flu_Array[][NCOMP_FLUID][ CUBE(PS1) ], const double g_Corner_Array[][3], const real dh, const real Safety, const real Gamma, const real MinPres ); #ifdef GRAVITY __global__ void CUPOT_dtSolver_HydroGravity( real g_dt_Array[], const real g_Pot_Array[][ CUBE(GRA_NXT) ], const double g_Corner_Array[][3], const real dh, const real Safety, const bool P5_Gradient, const OptGravityType_t GravityType, const double ExtAcc_Time ); #endif #elif ( MODEL == MHD ) #warning : WAIT MHD !!! #elif ( MODEL == ELBDM ) __global__ void CUFLU_ELBDMSolver( real g_Fluid_In [][FLU_NIN ][ FLU_NXT*FLU_NXT*FLU_NXT ], real g_Fluid_Out[][FLU_NOUT][ PS2*PS2*PS2 ], real g_Flux [][9][NFLUX_TOTAL][ PS2*PS2 ], const real dt, const real _dh, const real Eta, const bool StoreFlux, const real Taylor3_Coeff, const bool XYZ, const real MinDens ); #else #error : ERROR : unsupported MODEL !! #endif // MODEL #ifdef GRAVITY // Poisson solver prototypes #if ( POT_SCHEME == SOR ) #ifdef USE_PSOLVER_10TO14 __global__ void CUPOT_PoissonSolver_SOR_10to14cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #else __global__ void CUPOT_PoissonSolver_SOR_16to18cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const int Min_Iter, const int Max_Iter, const real Omega_6, const real Const, const IntScheme_t IntScheme ); #endif // #ifdef USE_PSOLVER_10TO14 ... else ... #elif ( POT_SCHEME == MG ) __global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], const real dh_Min, const int Max_Iter, const int NPre_Smooth, const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff, const IntScheme_t IntScheme ); #endif // POT_SCHEME // Gravity solver prototypes in different models #if ( MODEL == HYDRO ) __global__ void CUPOT_HydroGravitySolver( real g_Flu_Array_New[][GRA_NIN][ PS1*PS1*PS1 ], const real g_Pot_Array_New[][ GRA_NXT*GRA_NXT*GRA_NXT ], const double g_Corner_Array[][3], const real g_Pot_Array_USG[][ USG_NXT_G*USG_NXT_G*USG_NXT_G ], const real g_Flu_Array_USG[][GRA_NIN-1][ PS1*PS1*PS1 ], char g_DE_Array[][ PS1*PS1*PS1 ], const real Gra_Const, const bool P5_Gradient, const OptGravityType_t GravityType, const double TimeNew, const double TimeOld, const real dt, const real dh, const real MinEint ); #elif ( MODEL == MHD ) #warning :: WAIT MHD !!! #elif ( MODEL == ELBDM ) __global__ void CUPOT_ELBDMGravitySolver( real g_Flu_Array[][GRA_NIN][ PS1*PS1*PS1 ], const real g_Pot_Array[][ GRA_NXT*GRA_NXT*GRA_NXT ], const double g_Corner_Array[][3], const real EtaDt, const real dh, const real Lambda, const bool ExtPot, const double Time ); #else #error : ERROR : unsupported MODEL !! #endif // MODEL int CUPOT_PoissonSolver_SetConstMem(); #endif // GRAVITY //------------------------------------------------------------------------------------------------------- // Function : CUAPI_Set_Default_GPU_Parameter // Description : Set several GPU parameters to the default values if they are not set in the input file // // Parameter : GPU_NStream : Number of streams for the asynchronous memory copy in GPU // Flu_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the fluid solver // Pot_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the Poisson solver // Che_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the Grackle solver // dt_GPU_NPGroup : Number of patch groups sent into GPU simultaneously for the dt solver //------------------------------------------------------------------------------------------------------- void CUAPI_Set_Default_GPU_Parameter( int &GPU_NStream, int &Flu_GPU_NPGroup, int &Pot_GPU_NPGroup, int &Che_GPU_NPGroup, int &dt_GPU_NPGroup ) { if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ...\n", __FUNCTION__ ); // get the device ID int GetDeviceID = 999; CUDA_CHECK_ERROR( cudaGetDevice( &GetDeviceID ) ); // load the device properties cudaDeviceProp DeviceProp; CUDA_CHECK_ERROR( cudaGetDeviceProperties( &DeviceProp, GetDeviceID ) ); // set the default GPU parameters // (1) GPU_NSTREAM if ( GPU_NStream <= 0 ) { if ( DeviceProp.deviceOverlap ) { # if ( MODEL == HYDRO ) # if ( GPU_ARCH == FERMI ) GPU_NStream = 8; # elif ( GPU_ARCH == KEPLER ) GPU_NStream = 32; # elif ( GPU_ARCH == MAXWELL ) GPU_NStream = 32; # elif ( GPU_ARCH == PASCAL ) GPU_NStream = 32; # elif ( GPU_ARCH == VOLTA ) GPU_NStream = 32; # else # error : UNKNOWN GPU_ARCH !! # endif # elif ( MODEL == MHD ) # warning :: WAIT MHD !!! # elif ( MODEL == ELBDM ) # if ( GPU_ARCH == FERMI ) GPU_NStream = 8; # elif ( GPU_ARCH == KEPLER ) GPU_NStream = 32; # elif ( GPU_ARCH == MAXWELL ) GPU_NStream = 32; # elif ( GPU_ARCH == PASCAL ) GPU_NStream = 32; # elif ( GPU_ARCH == VOLTA ) GPU_NStream = 32; # else # error : ERROR : UNKNOWN GPU_ARCH !! # endif # else # error : ERROR : UNKNOWN MODEL !! # endif // MODEL } // if ( DeviceProp.deviceOverlap ) else GPU_NStream = 1; if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "GPU_NSTREAM", GPU_NSTREAM ); } // if ( GPU_NStream <= 0 ) // (2) XXX_GPU_NPGROUP // (2-1) FLU_GPU_NPGROUP if ( Flu_GPU_NPGroup <= 0 ) { # if ( MODEL == HYDRO ) # if ( GPU_ARCH == FERMI ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif # elif ( MODEL == MHD ) # warning :: WAIT MHD !!! # elif ( MODEL == ELBDM ) # if ( GPU_ARCH == FERMI ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Flu_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif # else # error : ERROR : UNKNOWN MODEL !! # endif // MODEL if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "FLU_GPU_NPGROUP", Flu_GPU_NPGroup ); } // if ( Flu_GPU_NPGroup <= 0 ) // (2-2) POT_GPU_NPGROUP # ifdef GRAVITY if ( Pot_GPU_NPGroup <= 0 ) { # if ( GPU_ARCH == FERMI ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Pot_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "POT_GPU_NPGROUP", Pot_GPU_NPGroup ); } // if ( Pot_GPU_NPGroup <= 0 ) # endif // (2-3) CHE_GPU_NPGROUP # ifdef SUPPORT_GRACKLE if ( Che_GPU_NPGroup <= 0 ) { # if ( GPU_ARCH == FERMI ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == KEPLER ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == MAXWELL ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == PASCAL ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # elif ( GPU_ARCH == VOLTA ) Che_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; # else # error : UNKNOWN GPU_ARCH !! # endif if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d" " --> might be further fine-tuned\n", "CHE_GPU_NPGROUP", Che_GPU_NPGroup ); } // if ( Che_GPU_NPGroup <= 0 ) # endif // (2-4) DT_GPU_NPGROUP if ( dt_GPU_NPGroup <= 0 ) { # if ( GPU_ARCH == FERMI ) dt_GPU_NPGroup = 1*GPU_NStream*DeviceProp.multiProcessorCount; // not optimized yet # elif ( GPU_ARCH == KEPLER ) dt_GPU_NPGroup = 32*DeviceProp.multiProcessorCount; // not optimized yet # elif ( GPU_ARCH == MAXWELL ) dt_GPU_NPGroup = 32*DeviceProp.multiProcessorCount; // not optimized yet # elif ( GPU_ARCH == PASCAL ) dt_GPU_NPGroup = 32*DeviceProp.multiProcessorCount; // not optimized yet # else # error : UNKNOWN GPU_ARCH !! # endif if ( MPI_Rank == 0 ) Aux_Message( stdout, "NOTE : parameter \"%s\" is set to the default value = %d\n", "DT_GPU_NPGROUP", dt_GPU_NPGroup ); } // if ( dt_GPU_NPGroup <= 0 ) // (3) cache preference // (3-1) fluid solver # if ( MODEL == HYDRO ) # if ( FLU_SCHEME == RTVD ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_FluidSolver_RTVD, cudaFuncCachePreferShared ) ); # elif ( FLU_SCHEME == WAF ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_FluidSolver_WAF, cudaFuncCachePreferShared ) ); # elif ( FLU_SCHEME == MHM ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_FluidSolver_MHM, cudaFuncCachePreferL1 ) ); # elif ( FLU_SCHEME == MHM_RP ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_FluidSolver_MHM, cudaFuncCachePreferL1 ) ); # elif ( FLU_SCHEME == CTU ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_FluidSolver_CTU, cudaFuncCachePreferL1 ) ); # endif CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_dtSolver_HydroCFL, cudaFuncCachePreferShared ) ); # ifdef GRAVITY CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_dtSolver_HydroGravity, cudaFuncCachePreferShared ) ); # endif # elif ( MODEL == MHD ) # warning :: WAIT MHD !!! # elif ( MODEL == ELBDM ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUFLU_ELBDMSolver, cudaFuncCachePreferShared ) ); # else # error : ERROR : unsupported MODEL !! # endif // MODEL # ifdef GRAVITY // (3-2) Poisson solver # if ( POT_SCHEME == SOR ) # ifdef USE_PSOLVER_10TO14 CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_PoissonSolver_SOR_10to14cube, cudaFuncCachePreferShared ) ); # else CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_PoissonSolver_SOR_16to18cube, cudaFuncCachePreferShared ) ); # endif # elif ( POT_SCHEME == MG ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_PoissonSolver_MG, cudaFuncCachePreferShared ) ); # endif // POT_SCHEME // (3-3) gravity solver # if ( MODEL == HYDRO ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_HydroGravitySolver, cudaFuncCachePreferShared ) ); # elif ( MODEL == MHD ) # warning : WAIT MHD !!! # elif ( MODEL == ELBDM ) CUDA_CHECK_ERROR( cudaFuncSetCacheConfig( CUPOT_ELBDMGravitySolver, cudaFuncCachePreferL1 ) ); # else # error : ERROR : unsupported MODEL !! # endif // MODEL # endif // GRAVITY // (4) set the constant variables // --> note that the auxiliary arrays for the external acceleration and potential are set by CUAPI_Init_ExternalAccPot() # if ( NCOMP_PASSIVE > 0 ) if ( OPT__NORMALIZE_PASSIVE ) { # if ( MODEL == HYDRO && ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) ) if ( CUFLU_FluidSolver_SetConstMem_NormIdx(PassiveNorm_VarIdx) != 0 ) Aux_Error( ERROR_INFO, "CUFLU_FluidSolver_SetConstMem_NormIdx failed ...\n" ); # elif ( MODEL == MHD ) # warning : WAIT MHD !!! # endif // MODEL } # endif // #if ( NCOMP_PASSIVE > 0 ) # ifdef GRAVITY if ( CUPOT_PoissonSolver_SetConstMem() != 0 ) Aux_Error( ERROR_INFO, "CUPOT_PoissonSolver_SetConstMem failed ...\n" ); # endif // #ifdef GRAVITY if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ... done\n", __FUNCTION__ ); } // FUNCTION : CUAPI_Set_Default_GPU_Parameter #endif // #ifdef GPU
249daa2280ba27b9352a80bacae583ac7e98026d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "yolo_layer.h" using namespace Yolo; namespace { // Write values into buffer template <typename T> void write(char*& buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } // Read values from buffer template <typename T> void read(const char*& buffer, T& val) { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } } // namespace namespace nvinfer1 { YoloLayerPlugin::YoloLayerPlugin(int yolo_width, int yolo_height, int num_anchors, float* anchors, int num_classes, int input_width, int input_height, float scale_x_y, int new_coords) { mYoloWidth = yolo_width; mYoloHeight = yolo_height; mNumAnchors = num_anchors; memcpy(mAnchorsHost, anchors, num_anchors * 2 * sizeof(float)); mNumClasses = num_classes; mInputWidth = input_width; mInputHeight = input_height; mScaleXY = scale_x_y; mNewCoords = new_coords; CHECK(hipMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float))); CHECK(hipMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), hipMemcpyHostToDevice)); } YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) { const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mThreadCount); read(d, mYoloWidth); read(d, mYoloHeight); read(d, mNumAnchors); memcpy(mAnchorsHost, d, MAX_ANCHORS * 2 * sizeof(float)); d += MAX_ANCHORS * 2 * sizeof(float); read(d, mNumClasses); read(d, mInputWidth); read(d, mInputHeight); read(d, mScaleXY); read(d, mNewCoords); CHECK(hipMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float))); CHECK(hipMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), hipMemcpyHostToDevice)); assert(d == a + length); } void YoloLayerPlugin::serialize(void* buffer) const { char* d = static_cast<char*>(buffer), *a = d; write(d, mThreadCount); write(d, mYoloWidth); write(d, mYoloHeight); write(d, mNumAnchors); memcpy(d, mAnchorsHost, MAX_ANCHORS * 2 * sizeof(float)); d += MAX_ANCHORS * 2 * sizeof(float); write(d, mNumClasses); write(d, mInputWidth); write(d, mInputHeight); write(d, mScaleXY); write(d, mNewCoords); assert(d == a + getSerializationSize()); } size_t YoloLayerPlugin::getSerializationSize() const { return sizeof(mThreadCount) + \ sizeof(mYoloWidth) + sizeof(mYoloHeight) + \ sizeof(mNumAnchors) + MAX_ANCHORS * 2 * sizeof(float) + \ sizeof(mNumClasses) + \ sizeof(mInputWidth) + sizeof(mInputHeight) + \ sizeof(mScaleXY) + sizeof(mNewCoords); } int YoloLayerPlugin::initialize() { return 0; } void YoloLayerPlugin::terminate() { CHECK(hipFree(mAnchors)); } Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { assert(index == 0); assert(nbInputDims == 1); assert(inputs[0].d[0] == (mNumClasses + 5) * mNumAnchors); assert(inputs[0].d[1] == mYoloHeight); assert(inputs[0].d[2] == mYoloWidth); // output detection results to the channel dimension int totalsize = mYoloWidth * mYoloHeight * mNumAnchors * sizeof(Detection) / sizeof(float); return Dims3(totalsize, 1, 1); } void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) { mPluginNamespace = pluginNamespace; } const char* YoloLayerPlugin::getPluginNamespace() const { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType YoloLayerPlugin::getOutputDataType(int index, const DataType* inputTypes, int nbInputs) const { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const { return false; } void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) { } // Detach the plugin object from its execution context. void YoloLayerPlugin::detachFromContext() { } const char* YoloLayerPlugin::getPluginType() const { return "YoloLayer_TRT"; } const char* YoloLayerPlugin::getPluginVersion() const { return "1"; } void YoloLayerPlugin::destroy() { delete this; } // Clone the plugin IPluginV2IOExt* YoloLayerPlugin::clone() const { YoloLayerPlugin *p = new YoloLayerPlugin(mYoloWidth, mYoloHeight, mNumAnchors, (float*) mAnchorsHost, mNumClasses, mInputWidth, mInputHeight, mScaleXY, mNewCoords); p->setPluginNamespace(mPluginNamespace); return p; } inline __device__ float sigmoidGPU(float x) { return 1.0f / (1.0f + __expf(-x)); } inline __device__ float scale_sigmoidGPU(float x, float s) { return s * sigmoidGPU(x) - (s - 1.0f) * 0.5f; } // CalDetection(): This kernel processes 1 yolo layer calculation. It // distributes calculations so that 1 GPU thread would be responsible // for each grid/anchor combination. // NOTE: The output (x, y, w, h) are between 0.0 and 1.0 // (relative to orginal image width and height). __global__ void CalDetection(const float *input, float *output, int batch_size, int yolo_width, int yolo_height, int num_anchors, const float *anchors, int num_classes, int input_w, int input_h, float scale_x_y) { int idx = threadIdx.x + blockDim.x * blockIdx.x; Detection* det = ((Detection*) output) + idx; int total_grids = yolo_width * yolo_height; if (idx >= batch_size * total_grids * num_anchors) return; int info_len = 5 + num_classes; //int batch_idx = idx / (total_grids * num_anchors); int group_idx = idx / total_grids; int anchor_idx = group_idx % num_anchors; const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids); int class_id; float max_cls_logit = -CUDART_INF_F; // minus infinity for (int i = 5; i < info_len; ++i) { float l = *(cur_input + i * total_grids); if (l > max_cls_logit) { max_cls_logit = l; class_id = i - 5; } } float max_cls_prob = sigmoidGPU(max_cls_logit); float box_prob = sigmoidGPU(*(cur_input + 4 * total_grids)); //if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) // return; int row = (idx % total_grids) / yolo_width; int col = (idx % total_grids) % yolo_width; det->bbox[0] = (col + scale_sigmoidGPU(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1] det->bbox[1] = (row + scale_sigmoidGPU(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1] det->bbox[2] = __expf(*(cur_input + 2 * total_grids)) * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1] det->bbox[3] = __expf(*(cur_input + 3 * total_grids)) * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1] det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left det->bbox[1] -= det->bbox[3] / 2; det->det_confidence = box_prob; det->class_id = class_id; det->class_confidence = max_cls_prob; } inline __device__ float scale(float x, float s) { return s * x - (s - 1.0f) * 0.5f; } inline __device__ float square(float x) { return x * x; } __global__ void CalDetection_NewCoords(const float *input, float *output, int batch_size, int yolo_width, int yolo_height, int num_anchors, const float *anchors, int num_classes, int input_w, int input_h, float scale_x_y) { int idx = threadIdx.x + blockDim.x * blockIdx.x; Detection* det = ((Detection*) output) + idx; int total_grids = yolo_width * yolo_height; if (idx >= batch_size * total_grids * num_anchors) return; int info_len = 5 + num_classes; //int batch_idx = idx / (total_grids * num_anchors); int group_idx = idx / total_grids; int anchor_idx = group_idx % num_anchors; const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids); int class_id; float max_cls_prob = -CUDART_INF_F; // minus infinity for (int i = 5; i < info_len; ++i) { float l = *(cur_input + i * total_grids); if (l > max_cls_prob) { max_cls_prob = l; class_id = i - 5; } } float box_prob = *(cur_input + 4 * total_grids); //if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) // return; int row = (idx % total_grids) / yolo_width; int col = (idx % total_grids) % yolo_width; det->bbox[0] = (col + scale(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1] det->bbox[1] = (row + scale(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1] det->bbox[2] = square(*(cur_input + 2 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1] det->bbox[3] = square(*(cur_input + 3 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1] det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left det->bbox[1] -= det->bbox[3] / 2; det->det_confidence = box_prob; det->class_id = class_id; det->class_confidence = max_cls_prob; } void YoloLayerPlugin::forwardGpu(const float* const* inputs, float* output, hipStream_t stream, int batchSize) { int num_elements = batchSize * mNumAnchors * mYoloWidth * mYoloHeight; //CHECK(hipMemset(output, 0, num_elements * sizeof(Detection))); if (mNewCoords) { hipLaunchKernelGGL(( CalDetection_NewCoords), dim3((num_elements + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, stream, inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY); } else { hipLaunchKernelGGL(( CalDetection), dim3((num_elements + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, stream, inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY); } } int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, hipStream_t stream) { forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize); return 0; } YoloPluginCreator::YoloPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* YoloPluginCreator::getPluginName() const { return "YoloLayer_TRT"; } const char* YoloPluginCreator::getPluginVersion() const { return "1"; } const PluginFieldCollection* YoloPluginCreator::getFieldNames() { return &mFC; } IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) { assert(!strcmp(name, getPluginName())); const PluginField* fields = fc->fields; int yolo_width, yolo_height, num_anchors = 0; float anchors[MAX_ANCHORS * 2]; int num_classes, input_multiplier, new_coords = 0; float scale_x_y = 1.0; for (int i = 0; i < fc->nbFields; ++i) { const char* attrName = fields[i].name; if (!strcmp(attrName, "yoloWidth")) { assert(fields[i].type == PluginFieldType::kINT32); yolo_width = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "yoloHeight")) { assert(fields[i].type == PluginFieldType::kINT32); yolo_height = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "numAnchors")) { assert(fields[i].type == PluginFieldType::kINT32); num_anchors = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "numClasses")) { assert(fields[i].type == PluginFieldType::kINT32); num_classes = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "inputMultiplier")) { assert(fields[i].type == PluginFieldType::kINT32); input_multiplier = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "anchors")){ assert(num_anchors > 0 && num_anchors <= MAX_ANCHORS); assert(fields[i].type == PluginFieldType::kFLOAT32); memcpy(anchors, static_cast<const float*>(fields[i].data), num_anchors * 2 * sizeof(float)); } else if (!strcmp(attrName, "scaleXY")) { assert(fields[i].type == PluginFieldType::kFLOAT32); scale_x_y = *(static_cast<const float*>(fields[i].data)); } else if (!strcmp(attrName, "newCoords")) { assert(fields[i].type == PluginFieldType::kINT32); new_coords = *(static_cast<const int*>(fields[i].data)); } else { std::cerr << "Unknown attribute: " << attrName << std::endl; assert(0); } } assert(yolo_width > 0 && yolo_height > 0); assert(anchors[0] > 0.0f && anchors[1] > 0.0f); assert(num_classes > 0); assert(input_multiplier == 8 || input_multiplier == 16 || input_multiplier == 32 || input_multiplier == 64 || input_multiplier == 128); assert(scale_x_y >= 1.0); YoloLayerPlugin* obj = new YoloLayerPlugin(yolo_width, yolo_height, num_anchors, anchors, num_classes, yolo_width * input_multiplier, yolo_height * input_multiplier, scale_x_y, new_coords); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) { YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } PluginFieldCollection YoloPluginCreator::mFC{}; std::vector<PluginField> YoloPluginCreator::mPluginAttributes; REGISTER_TENSORRT_PLUGIN(YoloPluginCreator); } // namespace nvinfer1
249daa2280ba27b9352a80bacae583ac7e98026d.cu
#include "yolo_layer.h" using namespace Yolo; namespace { // Write values into buffer template <typename T> void write(char*& buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } // Read values from buffer template <typename T> void read(const char*& buffer, T& val) { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } } // namespace namespace nvinfer1 { YoloLayerPlugin::YoloLayerPlugin(int yolo_width, int yolo_height, int num_anchors, float* anchors, int num_classes, int input_width, int input_height, float scale_x_y, int new_coords) { mYoloWidth = yolo_width; mYoloHeight = yolo_height; mNumAnchors = num_anchors; memcpy(mAnchorsHost, anchors, num_anchors * 2 * sizeof(float)); mNumClasses = num_classes; mInputWidth = input_width; mInputHeight = input_height; mScaleXY = scale_x_y; mNewCoords = new_coords; CHECK(cudaMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float))); CHECK(cudaMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), cudaMemcpyHostToDevice)); } YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) { const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mThreadCount); read(d, mYoloWidth); read(d, mYoloHeight); read(d, mNumAnchors); memcpy(mAnchorsHost, d, MAX_ANCHORS * 2 * sizeof(float)); d += MAX_ANCHORS * 2 * sizeof(float); read(d, mNumClasses); read(d, mInputWidth); read(d, mInputHeight); read(d, mScaleXY); read(d, mNewCoords); CHECK(cudaMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float))); CHECK(cudaMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), cudaMemcpyHostToDevice)); assert(d == a + length); } void YoloLayerPlugin::serialize(void* buffer) const { char* d = static_cast<char*>(buffer), *a = d; write(d, mThreadCount); write(d, mYoloWidth); write(d, mYoloHeight); write(d, mNumAnchors); memcpy(d, mAnchorsHost, MAX_ANCHORS * 2 * sizeof(float)); d += MAX_ANCHORS * 2 * sizeof(float); write(d, mNumClasses); write(d, mInputWidth); write(d, mInputHeight); write(d, mScaleXY); write(d, mNewCoords); assert(d == a + getSerializationSize()); } size_t YoloLayerPlugin::getSerializationSize() const { return sizeof(mThreadCount) + \ sizeof(mYoloWidth) + sizeof(mYoloHeight) + \ sizeof(mNumAnchors) + MAX_ANCHORS * 2 * sizeof(float) + \ sizeof(mNumClasses) + \ sizeof(mInputWidth) + sizeof(mInputHeight) + \ sizeof(mScaleXY) + sizeof(mNewCoords); } int YoloLayerPlugin::initialize() { return 0; } void YoloLayerPlugin::terminate() { CHECK(cudaFree(mAnchors)); } Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { assert(index == 0); assert(nbInputDims == 1); assert(inputs[0].d[0] == (mNumClasses + 5) * mNumAnchors); assert(inputs[0].d[1] == mYoloHeight); assert(inputs[0].d[2] == mYoloWidth); // output detection results to the channel dimension int totalsize = mYoloWidth * mYoloHeight * mNumAnchors * sizeof(Detection) / sizeof(float); return Dims3(totalsize, 1, 1); } void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) { mPluginNamespace = pluginNamespace; } const char* YoloLayerPlugin::getPluginNamespace() const { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType YoloLayerPlugin::getOutputDataType(int index, const DataType* inputTypes, int nbInputs) const { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const { return false; } void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) { } // Detach the plugin object from its execution context. void YoloLayerPlugin::detachFromContext() { } const char* YoloLayerPlugin::getPluginType() const { return "YoloLayer_TRT"; } const char* YoloLayerPlugin::getPluginVersion() const { return "1"; } void YoloLayerPlugin::destroy() { delete this; } // Clone the plugin IPluginV2IOExt* YoloLayerPlugin::clone() const { YoloLayerPlugin *p = new YoloLayerPlugin(mYoloWidth, mYoloHeight, mNumAnchors, (float*) mAnchorsHost, mNumClasses, mInputWidth, mInputHeight, mScaleXY, mNewCoords); p->setPluginNamespace(mPluginNamespace); return p; } inline __device__ float sigmoidGPU(float x) { return 1.0f / (1.0f + __expf(-x)); } inline __device__ float scale_sigmoidGPU(float x, float s) { return s * sigmoidGPU(x) - (s - 1.0f) * 0.5f; } // CalDetection(): This kernel processes 1 yolo layer calculation. It // distributes calculations so that 1 GPU thread would be responsible // for each grid/anchor combination. // NOTE: The output (x, y, w, h) are between 0.0 and 1.0 // (relative to orginal image width and height). __global__ void CalDetection(const float *input, float *output, int batch_size, int yolo_width, int yolo_height, int num_anchors, const float *anchors, int num_classes, int input_w, int input_h, float scale_x_y) { int idx = threadIdx.x + blockDim.x * blockIdx.x; Detection* det = ((Detection*) output) + idx; int total_grids = yolo_width * yolo_height; if (idx >= batch_size * total_grids * num_anchors) return; int info_len = 5 + num_classes; //int batch_idx = idx / (total_grids * num_anchors); int group_idx = idx / total_grids; int anchor_idx = group_idx % num_anchors; const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids); int class_id; float max_cls_logit = -CUDART_INF_F; // minus infinity for (int i = 5; i < info_len; ++i) { float l = *(cur_input + i * total_grids); if (l > max_cls_logit) { max_cls_logit = l; class_id = i - 5; } } float max_cls_prob = sigmoidGPU(max_cls_logit); float box_prob = sigmoidGPU(*(cur_input + 4 * total_grids)); //if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) // return; int row = (idx % total_grids) / yolo_width; int col = (idx % total_grids) % yolo_width; det->bbox[0] = (col + scale_sigmoidGPU(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1] det->bbox[1] = (row + scale_sigmoidGPU(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1] det->bbox[2] = __expf(*(cur_input + 2 * total_grids)) * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1] det->bbox[3] = __expf(*(cur_input + 3 * total_grids)) * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1] det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left det->bbox[1] -= det->bbox[3] / 2; det->det_confidence = box_prob; det->class_id = class_id; det->class_confidence = max_cls_prob; } inline __device__ float scale(float x, float s) { return s * x - (s - 1.0f) * 0.5f; } inline __device__ float square(float x) { return x * x; } __global__ void CalDetection_NewCoords(const float *input, float *output, int batch_size, int yolo_width, int yolo_height, int num_anchors, const float *anchors, int num_classes, int input_w, int input_h, float scale_x_y) { int idx = threadIdx.x + blockDim.x * blockIdx.x; Detection* det = ((Detection*) output) + idx; int total_grids = yolo_width * yolo_height; if (idx >= batch_size * total_grids * num_anchors) return; int info_len = 5 + num_classes; //int batch_idx = idx / (total_grids * num_anchors); int group_idx = idx / total_grids; int anchor_idx = group_idx % num_anchors; const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids); int class_id; float max_cls_prob = -CUDART_INF_F; // minus infinity for (int i = 5; i < info_len; ++i) { float l = *(cur_input + i * total_grids); if (l > max_cls_prob) { max_cls_prob = l; class_id = i - 5; } } float box_prob = *(cur_input + 4 * total_grids); //if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) // return; int row = (idx % total_grids) / yolo_width; int col = (idx % total_grids) % yolo_width; det->bbox[0] = (col + scale(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1] det->bbox[1] = (row + scale(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1] det->bbox[2] = square(*(cur_input + 2 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1] det->bbox[3] = square(*(cur_input + 3 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1] det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left det->bbox[1] -= det->bbox[3] / 2; det->det_confidence = box_prob; det->class_id = class_id; det->class_confidence = max_cls_prob; } void YoloLayerPlugin::forwardGpu(const float* const* inputs, float* output, cudaStream_t stream, int batchSize) { int num_elements = batchSize * mNumAnchors * mYoloWidth * mYoloHeight; //CHECK(cudaMemset(output, 0, num_elements * sizeof(Detection))); if (mNewCoords) { CalDetection_NewCoords<<<(num_elements + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream>>> (inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY); } else { CalDetection<<<(num_elements + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream>>> (inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY); } } int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream) { forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize); return 0; } YoloPluginCreator::YoloPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* YoloPluginCreator::getPluginName() const { return "YoloLayer_TRT"; } const char* YoloPluginCreator::getPluginVersion() const { return "1"; } const PluginFieldCollection* YoloPluginCreator::getFieldNames() { return &mFC; } IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) { assert(!strcmp(name, getPluginName())); const PluginField* fields = fc->fields; int yolo_width, yolo_height, num_anchors = 0; float anchors[MAX_ANCHORS * 2]; int num_classes, input_multiplier, new_coords = 0; float scale_x_y = 1.0; for (int i = 0; i < fc->nbFields; ++i) { const char* attrName = fields[i].name; if (!strcmp(attrName, "yoloWidth")) { assert(fields[i].type == PluginFieldType::kINT32); yolo_width = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "yoloHeight")) { assert(fields[i].type == PluginFieldType::kINT32); yolo_height = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "numAnchors")) { assert(fields[i].type == PluginFieldType::kINT32); num_anchors = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "numClasses")) { assert(fields[i].type == PluginFieldType::kINT32); num_classes = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "inputMultiplier")) { assert(fields[i].type == PluginFieldType::kINT32); input_multiplier = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "anchors")){ assert(num_anchors > 0 && num_anchors <= MAX_ANCHORS); assert(fields[i].type == PluginFieldType::kFLOAT32); memcpy(anchors, static_cast<const float*>(fields[i].data), num_anchors * 2 * sizeof(float)); } else if (!strcmp(attrName, "scaleXY")) { assert(fields[i].type == PluginFieldType::kFLOAT32); scale_x_y = *(static_cast<const float*>(fields[i].data)); } else if (!strcmp(attrName, "newCoords")) { assert(fields[i].type == PluginFieldType::kINT32); new_coords = *(static_cast<const int*>(fields[i].data)); } else { std::cerr << "Unknown attribute: " << attrName << std::endl; assert(0); } } assert(yolo_width > 0 && yolo_height > 0); assert(anchors[0] > 0.0f && anchors[1] > 0.0f); assert(num_classes > 0); assert(input_multiplier == 8 || input_multiplier == 16 || input_multiplier == 32 || input_multiplier == 64 || input_multiplier == 128); assert(scale_x_y >= 1.0); YoloLayerPlugin* obj = new YoloLayerPlugin(yolo_width, yolo_height, num_anchors, anchors, num_classes, yolo_width * input_multiplier, yolo_height * input_multiplier, scale_x_y, new_coords); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) { YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } PluginFieldCollection YoloPluginCreator::mFC{}; std::vector<PluginField> YoloPluginCreator::mPluginAttributes; REGISTER_TENSORRT_PLUGIN(YoloPluginCreator); } // namespace nvinfer1
62e11e5a99ea544de2392f11749783703d4037f5.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <raft/label/classlabels.hpp> #include <hipcub/hipcub.hpp> #include <cuml/common/logger.hpp> #include <raft/cudart_utils.h> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <raft/sparse/convert/csr.hpp> #include <raft/sparse/op/sort.hpp> #include <thrust/execution_policy.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <cuml/cluster/hdbscan.hpp> namespace ML { namespace HDBSCAN { namespace Common { struct TupleComp { template <typename one, typename two> __host__ __device__ bool operator()(const one& t1, const two& t2) { // sort first by each parent, if (thrust::get<0>(t1) < thrust::get<0>(t2)) return true; if (thrust::get<0>(t1) > thrust::get<0>(t2)) return false; // within each parent, sort by each child, if (thrust::get<1>(t1) < thrust::get<1>(t2)) return true; if (thrust::get<1>(t1) > thrust::get<1>(t2)) return false; // then sort by value in descending order return thrust::get<2>(t1) < thrust::get<2>(t2); } }; template <typename value_idx, typename value_t> CondensedHierarchy<value_idx, value_t>::CondensedHierarchy(const raft::handle_t& handle_, size_t n_leaves_) : handle(handle_), n_leaves(n_leaves_), parents(0, handle.get_stream()), children(0, handle.get_stream()), lambdas(0, handle.get_stream()), sizes(0, handle.get_stream()) { } template <typename value_idx, typename value_t> CondensedHierarchy<value_idx, value_t>::CondensedHierarchy(const raft::handle_t& handle_, size_t n_leaves_, int n_edges_, value_idx* parents_, value_idx* children_, value_t* lambdas_, value_idx* sizes_) : handle(handle_), n_leaves(n_leaves_), n_edges(n_edges_), parents(n_edges_, handle.get_stream()), children(n_edges_, handle.get_stream()), lambdas(n_edges_, handle.get_stream()), sizes(n_edges_, handle.get_stream()) { raft::copy(parents.begin(), parents_, n_edges_, handle.get_stream()); raft::copy(children.begin(), children_, n_edges_, handle.get_stream()); raft::copy(lambdas.begin(), lambdas_, n_edges_, handle.get_stream()); raft::copy(sizes.begin(), sizes_, n_edges_, handle.get_stream()); auto parents_ptr = thrust::device_pointer_cast(parents.data()); auto parents_min_max = thrust::minmax_element( thrust::hip::par.on(handle.get_stream()), parents_ptr, parents_ptr + n_edges); auto min_cluster = *parents_min_max.first; auto max_cluster = *parents_min_max.second; n_clusters = max_cluster - min_cluster + 1; auto sort_keys = thrust::make_zip_iterator(thrust::make_tuple(parents.begin(), children.begin(), sizes.begin())); auto sort_values = thrust::make_zip_iterator(thrust::make_tuple(lambdas.begin())); thrust::sort_by_key(thrust::hip::par.on(handle.get_stream()), sort_keys, sort_keys + n_edges, sort_values, TupleComp()); } template <typename value_idx, typename value_t> CondensedHierarchy<value_idx, value_t>::CondensedHierarchy( const raft::handle_t& handle_, size_t n_leaves_, int n_edges_, int n_clusters_, rmm::device_uvector<value_idx>&& parents_, rmm::device_uvector<value_idx>&& children_, rmm::device_uvector<value_t>&& lambdas_, rmm::device_uvector<value_idx>&& sizes_) : handle(handle_), n_leaves(n_leaves_), n_edges(n_edges_), n_clusters(n_clusters_), parents(std::move(parents_)), children(std::move(children_)), lambdas(std::move(lambdas_)), sizes(std::move(sizes_)) { } /** * Populates the condensed hierarchy object with the output * from Condense::condense_hierarchy * @param full_parents * @param full_children * @param full_lambdas * @param full_sizes */ template <typename value_idx, typename value_t> void CondensedHierarchy<value_idx, value_t>::condense(value_idx* full_parents, value_idx* full_children, value_t* full_lambdas, value_idx* full_sizes, value_idx size) { auto stream = handle.get_stream(); if (size == -1) size = 4 * (n_leaves - 1) + 2; n_edges = thrust::transform_reduce( thrust::hip::par.on(stream), full_sizes, full_sizes + size, [=] __device__(value_idx a) { return a != -1; }, 0, thrust::plus<value_idx>()); parents.resize(n_edges, stream); children.resize(n_edges, stream); lambdas.resize(n_edges, stream); sizes.resize(n_edges, stream); auto in = thrust::make_zip_iterator( thrust::make_tuple(full_parents, full_children, full_lambdas, full_sizes)); auto out = thrust::make_zip_iterator( thrust::make_tuple(parents.data(), children.data(), lambdas.data(), sizes.data())); thrust::copy_if(thrust::hip::par.on(stream), in, in + size, out, [=] __device__(thrust::tuple<value_idx, value_idx, value_t, value_idx> tup) { return thrust::get<3>(tup) != -1; }); // TODO: Avoid the copies here by updating kernel rmm::device_uvector<value_idx> parent_child(n_edges * 2, stream); raft::copy_async(parent_child.begin(), children.begin(), n_edges, stream); raft::copy_async(parent_child.begin() + n_edges, parents.begin(), n_edges, stream); // find n_clusters auto parents_ptr = thrust::device_pointer_cast(parents.data()); auto max_parent = *(thrust::max_element(thrust::hip::par.on(stream), parents_ptr, parents_ptr + n_edges)); // now invert labels auto invert_op = [max_parent, n_leaves = n_leaves] __device__(auto& x) { return x >= n_leaves ? max_parent - x + n_leaves : x; }; thrust::transform(thrust::hip::par.on(stream), parent_child.begin(), parent_child.end(), parent_child.begin(), invert_op); raft::label::make_monotonic( parent_child.data(), parent_child.data(), parent_child.size(), stream, true); raft::copy_async(children.begin(), parent_child.begin(), n_edges, stream); raft::copy_async(parents.begin(), parent_child.begin() + n_edges, n_edges, stream); auto parents_min_max = thrust::minmax_element(thrust::hip::par.on(stream), parents_ptr, parents_ptr + n_edges); auto min_cluster = *parents_min_max.first; auto max_cluster = *parents_min_max.second; n_clusters = max_cluster - min_cluster + 1; auto sort_keys = thrust::make_zip_iterator(thrust::make_tuple(parents.begin(), children.begin(), sizes.begin())); auto sort_values = thrust::make_zip_iterator(thrust::make_tuple(lambdas.begin())); thrust::sort_by_key( thrust::hip::par.on(stream), sort_keys, sort_keys + n_edges, sort_values, TupleComp()); } }; // namespace Common }; // namespace HDBSCAN }; // namespace ML
62e11e5a99ea544de2392f11749783703d4037f5.cu
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <raft/label/classlabels.hpp> #include <cub/cub.cuh> #include <cuml/common/logger.hpp> #include <raft/cudart_utils.h> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <raft/sparse/convert/csr.hpp> #include <raft/sparse/op/sort.hpp> #include <thrust/execution_policy.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <cuml/cluster/hdbscan.hpp> namespace ML { namespace HDBSCAN { namespace Common { struct TupleComp { template <typename one, typename two> __host__ __device__ bool operator()(const one& t1, const two& t2) { // sort first by each parent, if (thrust::get<0>(t1) < thrust::get<0>(t2)) return true; if (thrust::get<0>(t1) > thrust::get<0>(t2)) return false; // within each parent, sort by each child, if (thrust::get<1>(t1) < thrust::get<1>(t2)) return true; if (thrust::get<1>(t1) > thrust::get<1>(t2)) return false; // then sort by value in descending order return thrust::get<2>(t1) < thrust::get<2>(t2); } }; template <typename value_idx, typename value_t> CondensedHierarchy<value_idx, value_t>::CondensedHierarchy(const raft::handle_t& handle_, size_t n_leaves_) : handle(handle_), n_leaves(n_leaves_), parents(0, handle.get_stream()), children(0, handle.get_stream()), lambdas(0, handle.get_stream()), sizes(0, handle.get_stream()) { } template <typename value_idx, typename value_t> CondensedHierarchy<value_idx, value_t>::CondensedHierarchy(const raft::handle_t& handle_, size_t n_leaves_, int n_edges_, value_idx* parents_, value_idx* children_, value_t* lambdas_, value_idx* sizes_) : handle(handle_), n_leaves(n_leaves_), n_edges(n_edges_), parents(n_edges_, handle.get_stream()), children(n_edges_, handle.get_stream()), lambdas(n_edges_, handle.get_stream()), sizes(n_edges_, handle.get_stream()) { raft::copy(parents.begin(), parents_, n_edges_, handle.get_stream()); raft::copy(children.begin(), children_, n_edges_, handle.get_stream()); raft::copy(lambdas.begin(), lambdas_, n_edges_, handle.get_stream()); raft::copy(sizes.begin(), sizes_, n_edges_, handle.get_stream()); auto parents_ptr = thrust::device_pointer_cast(parents.data()); auto parents_min_max = thrust::minmax_element( thrust::cuda::par.on(handle.get_stream()), parents_ptr, parents_ptr + n_edges); auto min_cluster = *parents_min_max.first; auto max_cluster = *parents_min_max.second; n_clusters = max_cluster - min_cluster + 1; auto sort_keys = thrust::make_zip_iterator(thrust::make_tuple(parents.begin(), children.begin(), sizes.begin())); auto sort_values = thrust::make_zip_iterator(thrust::make_tuple(lambdas.begin())); thrust::sort_by_key(thrust::cuda::par.on(handle.get_stream()), sort_keys, sort_keys + n_edges, sort_values, TupleComp()); } template <typename value_idx, typename value_t> CondensedHierarchy<value_idx, value_t>::CondensedHierarchy( const raft::handle_t& handle_, size_t n_leaves_, int n_edges_, int n_clusters_, rmm::device_uvector<value_idx>&& parents_, rmm::device_uvector<value_idx>&& children_, rmm::device_uvector<value_t>&& lambdas_, rmm::device_uvector<value_idx>&& sizes_) : handle(handle_), n_leaves(n_leaves_), n_edges(n_edges_), n_clusters(n_clusters_), parents(std::move(parents_)), children(std::move(children_)), lambdas(std::move(lambdas_)), sizes(std::move(sizes_)) { } /** * Populates the condensed hierarchy object with the output * from Condense::condense_hierarchy * @param full_parents * @param full_children * @param full_lambdas * @param full_sizes */ template <typename value_idx, typename value_t> void CondensedHierarchy<value_idx, value_t>::condense(value_idx* full_parents, value_idx* full_children, value_t* full_lambdas, value_idx* full_sizes, value_idx size) { auto stream = handle.get_stream(); if (size == -1) size = 4 * (n_leaves - 1) + 2; n_edges = thrust::transform_reduce( thrust::cuda::par.on(stream), full_sizes, full_sizes + size, [=] __device__(value_idx a) { return a != -1; }, 0, thrust::plus<value_idx>()); parents.resize(n_edges, stream); children.resize(n_edges, stream); lambdas.resize(n_edges, stream); sizes.resize(n_edges, stream); auto in = thrust::make_zip_iterator( thrust::make_tuple(full_parents, full_children, full_lambdas, full_sizes)); auto out = thrust::make_zip_iterator( thrust::make_tuple(parents.data(), children.data(), lambdas.data(), sizes.data())); thrust::copy_if(thrust::cuda::par.on(stream), in, in + size, out, [=] __device__(thrust::tuple<value_idx, value_idx, value_t, value_idx> tup) { return thrust::get<3>(tup) != -1; }); // TODO: Avoid the copies here by updating kernel rmm::device_uvector<value_idx> parent_child(n_edges * 2, stream); raft::copy_async(parent_child.begin(), children.begin(), n_edges, stream); raft::copy_async(parent_child.begin() + n_edges, parents.begin(), n_edges, stream); // find n_clusters auto parents_ptr = thrust::device_pointer_cast(parents.data()); auto max_parent = *(thrust::max_element(thrust::cuda::par.on(stream), parents_ptr, parents_ptr + n_edges)); // now invert labels auto invert_op = [max_parent, n_leaves = n_leaves] __device__(auto& x) { return x >= n_leaves ? max_parent - x + n_leaves : x; }; thrust::transform(thrust::cuda::par.on(stream), parent_child.begin(), parent_child.end(), parent_child.begin(), invert_op); raft::label::make_monotonic( parent_child.data(), parent_child.data(), parent_child.size(), stream, true); raft::copy_async(children.begin(), parent_child.begin(), n_edges, stream); raft::copy_async(parents.begin(), parent_child.begin() + n_edges, n_edges, stream); auto parents_min_max = thrust::minmax_element(thrust::cuda::par.on(stream), parents_ptr, parents_ptr + n_edges); auto min_cluster = *parents_min_max.first; auto max_cluster = *parents_min_max.second; n_clusters = max_cluster - min_cluster + 1; auto sort_keys = thrust::make_zip_iterator(thrust::make_tuple(parents.begin(), children.begin(), sizes.begin())); auto sort_values = thrust::make_zip_iterator(thrust::make_tuple(lambdas.begin())); thrust::sort_by_key( thrust::cuda::par.on(stream), sort_keys, sort_keys + n_edges, sort_values, TupleComp()); } }; // namespace Common }; // namespace HDBSCAN }; // namespace ML
5d27a15c851e680f53594f8731123177856c0afa.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2020-2023, XGBoost contributors */ #include <algorithm> #include <memory> #include <type_traits> #include "../common/hist_util.cuh" #include "batch_utils.h" // for RegenGHist #include "device_adapter_hip.cuh" #include "ellpack_page.cuh" #include "gradient_index.h" #include "iterative_dmatrix.h" #include "proxy_dmatrix.cuh" #include "proxy_dmatrix.h" #include "simple_batch_iterator.h" #include "sparse_page_source.h" namespace xgboost::data { void IterativeDMatrix::InitFromCUDA(Context const* ctx, BatchParam const& p, DataIterHandle iter_handle, float missing, std::shared_ptr<DMatrix> ref) { // A handle passed to external iterator. DMatrixProxy* proxy = MakeProxy(proxy_); CHECK(proxy); // The external iterator auto iter = DataIterProxy<DataIterResetCallback, XGDMatrixCallbackNext>{iter_handle, reset_, next_}; dh::XGBCachingDeviceAllocator<char> alloc; auto num_rows = [&]() { return cuda_impl::Dispatch(proxy, [](auto const& value) { return value.NumRows(); }); }; auto num_cols = [&]() { return cuda_impl::Dispatch(proxy, [](auto const& value) { return value.NumCols(); }); }; size_t row_stride = 0; size_t nnz = 0; // Sketch for all batches. std::vector<common::SketchContainer> sketch_containers; size_t batches = 0; size_t accumulated_rows = 0; bst_feature_t cols = 0; int32_t current_device; dh::safe_cuda(hipGetDevice(&current_device)); auto get_device = [&]() -> int32_t { std::int32_t d = (ctx->gpu_id == Context::kCpuId) ? current_device : ctx->gpu_id; CHECK_NE(d, Context::kCpuId); return d; }; /** * Generate quantiles */ common::HistogramCuts cuts; do { // We use do while here as the first batch is fetched in ctor // ctx_.gpu_id = proxy->DeviceIdx(); CHECK_LT(ctx->gpu_id, common::AllVisibleGPUs()); dh::safe_cuda(hipSetDevice(get_device())); if (cols == 0) { cols = num_cols(); collective::Allreduce<collective::Operation::kMax>(&cols, 1); this->info_.num_col_ = cols; } else { CHECK_EQ(cols, num_cols()) << "Inconsistent number of columns."; } if (!ref) { sketch_containers.emplace_back(proxy->Info().feature_types, p.max_bin, cols, num_rows(), get_device()); auto* p_sketch = &sketch_containers.back(); proxy->Info().weights_.SetDevice(get_device()); cuda_impl::Dispatch(proxy, [&](auto const& value) { common::AdapterDeviceSketch(value, p.max_bin, proxy->Info(), missing, p_sketch); }); } auto batch_rows = num_rows(); accumulated_rows += batch_rows; dh::device_vector<size_t> row_counts(batch_rows + 1, 0); common::Span<size_t> row_counts_span(row_counts.data().get(), row_counts.size()); row_stride = ::max(row_stride, cuda_impl::Dispatch(proxy, [=](auto const& value) { return GetRowCounts(value, row_counts_span, get_device(), missing); })); nnz += thrust::reduce(thrust::hip::par(alloc), row_counts.begin(), row_counts.end()); batches++; } while (iter.Next()); iter.Reset(); auto n_features = cols; CHECK_GE(n_features, 1) << "Data must has at least 1 column."; dh::safe_cuda(hipSetDevice(get_device())); if (!ref) { HostDeviceVector<FeatureType> ft; common::SketchContainer final_sketch( sketch_containers.empty() ? ft : sketch_containers.front().FeatureTypes(), p.max_bin, cols, accumulated_rows, get_device()); for (auto const& sketch : sketch_containers) { final_sketch.Merge(sketch.ColumnsPtr(), sketch.Data()); final_sketch.FixError(); } sketch_containers.clear(); sketch_containers.shrink_to_fit(); final_sketch.MakeCuts(&cuts, this->info_.IsColumnSplit()); } else { GetCutsFromRef(ctx, ref, Info().num_col_, p, &cuts); } this->info_.num_row_ = accumulated_rows; this->info_.num_nonzero_ = nnz; auto init_page = [this, &proxy, &cuts, row_stride, accumulated_rows, get_device]() { if (!ellpack_) { // Should be put inside the while loop to protect against empty batch. In // that case device id is invalid. ellpack_.reset(new EllpackPage); *(ellpack_->Impl()) = EllpackPageImpl(get_device(), cuts, this->IsDense(), row_stride, accumulated_rows); } }; /** * Generate gradient index. */ size_t offset = 0; iter.Reset(); size_t n_batches_for_verification = 0; while (iter.Next()) { init_page(); dh::safe_cuda(hipSetDevice(get_device())); auto rows = num_rows(); dh::device_vector<size_t> row_counts(rows + 1, 0); common::Span<size_t> row_counts_span(row_counts.data().get(), row_counts.size()); cuda_impl::Dispatch(proxy, [=](auto const& value) { return GetRowCounts(value, row_counts_span, get_device(), missing); }); auto is_dense = this->IsDense(); proxy->Info().feature_types.SetDevice(get_device()); auto d_feature_types = proxy->Info().feature_types.ConstDeviceSpan(); auto new_impl = cuda_impl::Dispatch(proxy, [&](auto const& value) { return EllpackPageImpl(value, missing, get_device(), is_dense, row_counts_span, d_feature_types, row_stride, rows, cuts); }); size_t num_elements = ellpack_->Impl()->Copy(get_device(), &new_impl, offset); offset += num_elements; proxy->Info().num_row_ = num_rows(); proxy->Info().num_col_ = cols; if (batches != 1) { this->info_.Extend(std::move(proxy->Info()), false, true); } n_batches_for_verification++; } CHECK_EQ(batches, n_batches_for_verification) << "Different number of batches returned between 2 iterations"; if (batches == 1) { this->info_ = std::move(proxy->Info()); this->info_.num_nonzero_ = nnz; CHECK_EQ(proxy->Info().labels.Size(), 0); } iter.Reset(); // Synchronise worker columns info_.SynchronizeNumberOfColumns(); } BatchSet<EllpackPage> IterativeDMatrix::GetEllpackBatches(Context const* ctx, BatchParam const& param) { if (param.Initialized()) { CheckParam(param); CHECK(!detail::RegenGHist(param, batch_)) << error::InconsistentMaxBin(); } if (!ellpack_ && !ghist_) { LOG(FATAL) << "`QuantileDMatrix` not initialized."; } if (!ellpack_) { ellpack_.reset(new EllpackPage()); if (ctx->IsCUDA()) { this->Info().feature_types.SetDevice(ctx->gpu_id); *ellpack_->Impl() = EllpackPageImpl(ctx, *this->ghist_, this->Info().feature_types.ConstDeviceSpan()); } else if (fmat_ctx_.IsCUDA()) { this->Info().feature_types.SetDevice(fmat_ctx_.gpu_id); *ellpack_->Impl() = EllpackPageImpl(&fmat_ctx_, *this->ghist_, this->Info().feature_types.ConstDeviceSpan()); } else { // Can happen when QDM is initialized on CPU, but a GPU version is queried by a different QDM // for cut reference. auto cuda_ctx = ctx->MakeCUDA(); this->Info().feature_types.SetDevice(cuda_ctx.gpu_id); *ellpack_->Impl() = EllpackPageImpl(&cuda_ctx, *this->ghist_, this->Info().feature_types.ConstDeviceSpan()); } } CHECK(ellpack_); auto begin_iter = BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(ellpack_)); return BatchSet<EllpackPage>(begin_iter); } void GetCutsFromEllpack(EllpackPage const& page, common::HistogramCuts* cuts) { *cuts = page.Impl()->Cuts(); } } // namespace xgboost::data
5d27a15c851e680f53594f8731123177856c0afa.cu
/** * Copyright 2020-2023, XGBoost contributors */ #include <algorithm> #include <memory> #include <type_traits> #include "../common/hist_util.cuh" #include "batch_utils.h" // for RegenGHist #include "device_adapter.cuh" #include "ellpack_page.cuh" #include "gradient_index.h" #include "iterative_dmatrix.h" #include "proxy_dmatrix.cuh" #include "proxy_dmatrix.h" #include "simple_batch_iterator.h" #include "sparse_page_source.h" namespace xgboost::data { void IterativeDMatrix::InitFromCUDA(Context const* ctx, BatchParam const& p, DataIterHandle iter_handle, float missing, std::shared_ptr<DMatrix> ref) { // A handle passed to external iterator. DMatrixProxy* proxy = MakeProxy(proxy_); CHECK(proxy); // The external iterator auto iter = DataIterProxy<DataIterResetCallback, XGDMatrixCallbackNext>{iter_handle, reset_, next_}; dh::XGBCachingDeviceAllocator<char> alloc; auto num_rows = [&]() { return cuda_impl::Dispatch(proxy, [](auto const& value) { return value.NumRows(); }); }; auto num_cols = [&]() { return cuda_impl::Dispatch(proxy, [](auto const& value) { return value.NumCols(); }); }; size_t row_stride = 0; size_t nnz = 0; // Sketch for all batches. std::vector<common::SketchContainer> sketch_containers; size_t batches = 0; size_t accumulated_rows = 0; bst_feature_t cols = 0; int32_t current_device; dh::safe_cuda(cudaGetDevice(&current_device)); auto get_device = [&]() -> int32_t { std::int32_t d = (ctx->gpu_id == Context::kCpuId) ? current_device : ctx->gpu_id; CHECK_NE(d, Context::kCpuId); return d; }; /** * Generate quantiles */ common::HistogramCuts cuts; do { // We use do while here as the first batch is fetched in ctor // ctx_.gpu_id = proxy->DeviceIdx(); CHECK_LT(ctx->gpu_id, common::AllVisibleGPUs()); dh::safe_cuda(cudaSetDevice(get_device())); if (cols == 0) { cols = num_cols(); collective::Allreduce<collective::Operation::kMax>(&cols, 1); this->info_.num_col_ = cols; } else { CHECK_EQ(cols, num_cols()) << "Inconsistent number of columns."; } if (!ref) { sketch_containers.emplace_back(proxy->Info().feature_types, p.max_bin, cols, num_rows(), get_device()); auto* p_sketch = &sketch_containers.back(); proxy->Info().weights_.SetDevice(get_device()); cuda_impl::Dispatch(proxy, [&](auto const& value) { common::AdapterDeviceSketch(value, p.max_bin, proxy->Info(), missing, p_sketch); }); } auto batch_rows = num_rows(); accumulated_rows += batch_rows; dh::device_vector<size_t> row_counts(batch_rows + 1, 0); common::Span<size_t> row_counts_span(row_counts.data().get(), row_counts.size()); row_stride = std::max(row_stride, cuda_impl::Dispatch(proxy, [=](auto const& value) { return GetRowCounts(value, row_counts_span, get_device(), missing); })); nnz += thrust::reduce(thrust::cuda::par(alloc), row_counts.begin(), row_counts.end()); batches++; } while (iter.Next()); iter.Reset(); auto n_features = cols; CHECK_GE(n_features, 1) << "Data must has at least 1 column."; dh::safe_cuda(cudaSetDevice(get_device())); if (!ref) { HostDeviceVector<FeatureType> ft; common::SketchContainer final_sketch( sketch_containers.empty() ? ft : sketch_containers.front().FeatureTypes(), p.max_bin, cols, accumulated_rows, get_device()); for (auto const& sketch : sketch_containers) { final_sketch.Merge(sketch.ColumnsPtr(), sketch.Data()); final_sketch.FixError(); } sketch_containers.clear(); sketch_containers.shrink_to_fit(); final_sketch.MakeCuts(&cuts, this->info_.IsColumnSplit()); } else { GetCutsFromRef(ctx, ref, Info().num_col_, p, &cuts); } this->info_.num_row_ = accumulated_rows; this->info_.num_nonzero_ = nnz; auto init_page = [this, &proxy, &cuts, row_stride, accumulated_rows, get_device]() { if (!ellpack_) { // Should be put inside the while loop to protect against empty batch. In // that case device id is invalid. ellpack_.reset(new EllpackPage); *(ellpack_->Impl()) = EllpackPageImpl(get_device(), cuts, this->IsDense(), row_stride, accumulated_rows); } }; /** * Generate gradient index. */ size_t offset = 0; iter.Reset(); size_t n_batches_for_verification = 0; while (iter.Next()) { init_page(); dh::safe_cuda(cudaSetDevice(get_device())); auto rows = num_rows(); dh::device_vector<size_t> row_counts(rows + 1, 0); common::Span<size_t> row_counts_span(row_counts.data().get(), row_counts.size()); cuda_impl::Dispatch(proxy, [=](auto const& value) { return GetRowCounts(value, row_counts_span, get_device(), missing); }); auto is_dense = this->IsDense(); proxy->Info().feature_types.SetDevice(get_device()); auto d_feature_types = proxy->Info().feature_types.ConstDeviceSpan(); auto new_impl = cuda_impl::Dispatch(proxy, [&](auto const& value) { return EllpackPageImpl(value, missing, get_device(), is_dense, row_counts_span, d_feature_types, row_stride, rows, cuts); }); size_t num_elements = ellpack_->Impl()->Copy(get_device(), &new_impl, offset); offset += num_elements; proxy->Info().num_row_ = num_rows(); proxy->Info().num_col_ = cols; if (batches != 1) { this->info_.Extend(std::move(proxy->Info()), false, true); } n_batches_for_verification++; } CHECK_EQ(batches, n_batches_for_verification) << "Different number of batches returned between 2 iterations"; if (batches == 1) { this->info_ = std::move(proxy->Info()); this->info_.num_nonzero_ = nnz; CHECK_EQ(proxy->Info().labels.Size(), 0); } iter.Reset(); // Synchronise worker columns info_.SynchronizeNumberOfColumns(); } BatchSet<EllpackPage> IterativeDMatrix::GetEllpackBatches(Context const* ctx, BatchParam const& param) { if (param.Initialized()) { CheckParam(param); CHECK(!detail::RegenGHist(param, batch_)) << error::InconsistentMaxBin(); } if (!ellpack_ && !ghist_) { LOG(FATAL) << "`QuantileDMatrix` not initialized."; } if (!ellpack_) { ellpack_.reset(new EllpackPage()); if (ctx->IsCUDA()) { this->Info().feature_types.SetDevice(ctx->gpu_id); *ellpack_->Impl() = EllpackPageImpl(ctx, *this->ghist_, this->Info().feature_types.ConstDeviceSpan()); } else if (fmat_ctx_.IsCUDA()) { this->Info().feature_types.SetDevice(fmat_ctx_.gpu_id); *ellpack_->Impl() = EllpackPageImpl(&fmat_ctx_, *this->ghist_, this->Info().feature_types.ConstDeviceSpan()); } else { // Can happen when QDM is initialized on CPU, but a GPU version is queried by a different QDM // for cut reference. auto cuda_ctx = ctx->MakeCUDA(); this->Info().feature_types.SetDevice(cuda_ctx.gpu_id); *ellpack_->Impl() = EllpackPageImpl(&cuda_ctx, *this->ghist_, this->Info().feature_types.ConstDeviceSpan()); } } CHECK(ellpack_); auto begin_iter = BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(ellpack_)); return BatchSet<EllpackPage>(begin_iter); } void GetCutsFromEllpack(EllpackPage const& page, common::HistogramCuts* cuts) { *cuts = page.Impl()->Cuts(); } } // namespace xgboost::data
e964b79bd2f0cd6f0c0ec3ba8f70448a01652f1e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> using namespace std; const int vsize = 512; const int blocksize = 256; __global__ void vsum(float *x, float *y, float *z) { //int i = blockIdx.x * blockDim.x + threadIdx.x; @ blockdim, not block_size @ int i = blockIdx.x * blocksize + threadIdx.x; if(i < vsize) z[i] = x[i]+y[i]; } int main() { float *A = (float*)malloc(vsize*sizeof(float)); float *B = (float*)malloc(vsize*sizeof(float)); float *C = (float*)malloc(vsize*sizeof(float)); for(int i = 1; i<=vsize; i++) { A[i]=(float)i; B[i]=(float)i; //C[i]=(float)i; } float *dA, *dB, *dC; hipMalloc(&dA, vsize * sizeof(float)); hipMalloc(&dB, vsize * sizeof(float)); hipMalloc(&dC, vsize * sizeof(float)); hipMemcpy(dA, A, vsize*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dB, B, vsize*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dC, C, vsize*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( vsum), dim3((vsize/blocksize)), dim3(blocksize), 0, 0, A, B, C); hipDeviceSynchronize(); for (int i = 0; i<vsize; i+=16) { for (int j = 0; j<32; j++) cout << C[j] << " "; cout << endl; } hipFree(dA); hipFree(dB); hipFree(dC); free(A); free(B); free(C); return 0; }
e964b79bd2f0cd6f0c0ec3ba8f70448a01652f1e.cu
#include <iostream> using namespace std; const int vsize = 512; const int blocksize = 256; __global__ void vsum(float *x, float *y, float *z) { //int i = blockIdx.x * blockDim.x + threadIdx.x; @ blockdim, not block_size @ int i = blockIdx.x * blocksize + threadIdx.x; if(i < vsize) z[i] = x[i]+y[i]; } int main() { float *A = (float*)malloc(vsize*sizeof(float)); float *B = (float*)malloc(vsize*sizeof(float)); float *C = (float*)malloc(vsize*sizeof(float)); for(int i = 1; i<=vsize; i++) { A[i]=(float)i; B[i]=(float)i; //C[i]=(float)i; } float *dA, *dB, *dC; cudaMalloc(&dA, vsize * sizeof(float)); cudaMalloc(&dB, vsize * sizeof(float)); cudaMalloc(&dC, vsize * sizeof(float)); cudaMemcpy(dA, A, vsize*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dB, B, vsize*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dC, C, vsize*sizeof(float), cudaMemcpyHostToDevice); vsum<<<(vsize/blocksize), blocksize>>>(A, B, C); cudaDeviceSynchronize(); for (int i = 0; i<vsize; i+=16) { for (int j = 0; j<32; j++) cout << C[j] << " "; cout << endl; } cudaFree(dA); cudaFree(dB); cudaFree(dC); free(A); free(B); free(C); return 0; }
e9ef626f7f884dd16dc7ec9dcf8d94178fb6878b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_complex.h" #include "math_functions.h" #include "math_constants.h": #include <hipfft.h> #include <stdio.h> hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } __device__ __forceinline__ hipComplex expf(hipComplex z) { hipComplex res; float t = expf(z.x); sincosf(z.y, &res.y, &res.x); res.x *= t; res.y *= t; return res; } __global__ void calculate(hipComplex *fths, int *xo, int *yo, int *uo, float *zo2, float dfxs, float lambda, float k0, int Ts, float *fxs, float * y0seg, float* x0seg, int S_Bx, int S_By, int N_Bx, int N_By) { // int index = blockIdx.y*(blockDim.x*gridDim.x) + blockIdx.x*blockDim.x + threadIdx.x; // int index = threadIdx.x*blockIdx.z + blockIdx.y*blockDim.z + blockIdx.x*blockDim.x; // int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; // int threadId = blockId * blockDim.x + threadIdx.x; // int pt_indx = threadIdx.x*blockIdx.z; float yp = yo[threadIdx.x] - y0seg[blockIdx.y]; float xp = xo[threadIdx.x] - x0seg[blockIdx.x]; float rp = sqrt(zo2[threadIdx.x ] + xp*xp + yp*yp); float inv_rp = 1 / rp; float fxp = xp*inv_rp / lambda; float fyp = yp*inv_rp / lambda; int iifx = round(fxp / dfxs) + S_Bx / 2 + 1; int iify = round(fyp / dfxs) + S_By / 2 + 1; if (iifx <= 0 || iifx > S_Bx || iify <= 0 || iify > S_Bx){ iifx = S_Bx / 2 + 1; iify = S_Bx / 2 + 1; } hipComplex c0; hipComplex arg; arg.x = (k0*rp - 2 * CUDART_PI_F*(fxs[iifx] + fxs[iify])*(Ts / 2)*inv_rp); c0 = expf(arg); // c0.x = uo[blockDim.x] * c0.x; // c0.y = uo[blockDim.x] * c0.y; //fths[threadId] = c0; // Nep[threadId] = iifx; // Nip[threadId] = iify; fths[iifx + blockIdx.x*S_Bx + iify*S_Bx*N_Bx + blockIdx.x* S_Bx*N_Bx*S_By].x += c0.x; fths[iifx + blockIdx.x*S_Bx + iify*S_Bx*N_Bx + blockIdx.x* S_Bx*N_Bx*S_By].y += c0.y; } hipfftResult preparePlan2D(hipfftHandle* plan, int nRows, int nCols, int batch){ int n[2] = { nRows, nCols }; hipfftResult result = hipfftPlanMany(plan, 2, //rank n, //dimensions = {nRows, nCols} 0, //inembed batch, //istride 1, //idist 0, //onembed batch, //ostride 1, //odist HIPFFT_C2C, //hipfftType batch /*batch*/); if (result != 0){ // std::cout << "preparePlan2D error, result: " << result << "/n"; return result; } return result; } hipfftResult execute2D(hipfftHandle* plan, hipfftComplex* idata, hipfftComplex* odata, int direction){ hipfftResult result = hipfftExecC2C(*plan, idata, odata, direction); if (result != 0){ // cout << "execute2D error, result: " << result << "/n"; return result; } return result; } __global__ void copy2bitmap(hipComplex *H, int *bitmap_H) { } __global__ void asemble(hipComplex *fths, int *xo, int *yo, int *uo, float *zo2, float *dfxs, int *Nxs, float *lambda, int *Ts, float * fxs, float* y0seg, float* x0seg, int* Nep, int* Nip) { //int index = blockIdx.y*(blockDim.x*gridDim.x) + blockIdx.x*blockDim.x + threadIdx.x; // int index = threadIdx.x*blockIdx.z + blockIdx.y*blockDim.z + blockIdx.x*blockDim.x; int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * blockDim.x + threadIdx.x; int pt_indx = threadIdx.x*blockIdx.z; // fths2[] = fths2[] + fths[threadId] // fths[threadId] = c0; // Nep[threadId] = iifx; // Nip[threadId] = iify; } void CPAS_CGH_3DPS_2d(int Np, int* xo, int* yo, int* zo, int* uo, int Nx, int Ny, int dx, float lambda, int S_Bx, int S_By, hipComplex* fths_p) { hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); double k0 = 2 * CUDART_PI_F / lambda; int x_size = (Nx / 2) + ((Nx / 2) - 1) + 1; int y_size = (Ny / 2) + ((Ny / 2) - 1) + 1; float *x = (float*) malloc(x_size * sizeof(float)); float *y = (float*) malloc(y_size * sizeof(float)); for (int t = 0; t < x_size; t++){ x[t] = (-Nx / 2 + t)*dx; } for (int t = 0; t < y_size; t++){ y[t] = (-Ny / 2 + t)*dx; } int N_Bx = Nx / S_Bx; // doda obsug nie cakowitych dziele int N_By = Ny / S_By; int Ts = S_Bx*dx; float dfxs = 1 / (float)Ts; int fxs_size = (S_Bx / 2) + ((S_Bx / 2) - 1) + 1; float *fxs = (float*)malloc(fxs_size * sizeof(float)); for (int t = 0; t < fxs_size; t++){ fxs[t] = (float)(-S_Bx / 2 + t)*dfxs; } float * x0seg = (float*)malloc((N_Bx)* sizeof(float)); for (int t = 0; t < N_By; t++){ x0seg[t] = (x[0] + (t*Ts) + Ts / 2); } float * y0seg = (float*)malloc((N_By)* sizeof(float)); for (int t = 0; t < N_By; t++){ y0seg[t] = (y[0] + (t*Ts) + Ts / 2); } /* float * nseg_bx = (float*)malloc((Nosx)* sizeof(float)); for (int t = 0; t < Nosx; t++){ nseg_bx[t] = (1 + (t*Nxs)); } float * nseg_by = (float*)malloc((Nosy)* sizeof(float)); for (int t = 0; t < Nosy; t++){ nseg_by[t] = (1 + (t*Nxs)); } float *h = (float*)calloc(Nx, sizeof(float)); */ float *z02; z02 = (float*)malloc((Np)* sizeof(float)); for (int t = 0; t < Np; t++) z02[t] = zo[t] * zo[t]; // hipMalloc(&fths_p, sizeof(hipComplex)*N_Bx*N_By*S_Bx*S_By); // hipMemset(fths_p, 0, sizeof(hipComplex)*N_Bx*N_By*S_Bx*S_By); int *d_xo; int *d_yo; float *d_z02; hipMalloc((void**)&d_xo, sizeof(int)*Np); hipMalloc((void**)&d_yo, sizeof(int)*Np); hipMalloc((void**)&d_z02, sizeof(float)*Np); hipMemcpy(d_xo, xo, Np*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_yo, yo, Np*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_z02, z02, Np*sizeof(float), hipMemcpyHostToDevice); float *d_fxs; float *d_y0seg; float *d_x0seg; hipMalloc((void**)&d_x0seg, sizeof(float)*N_Bx); hipMalloc((void**)&d_y0seg, sizeof(float)*N_By); hipMalloc((void**)&d_fxs, sizeof(float)*fxs_size); hipMemcpy(d_fxs, fxs, fxs_size*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_x0seg, x0seg, N_Bx*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_y0seg, y0seg, N_By*sizeof(float), hipMemcpyHostToDevice); dim3 grid; grid.x = N_Bx;//y grid.y = N_By;//x dim3 block; block.x = Np; //z block.y = 1; hipEventRecord(start, 0); calculate << < grid, block >> >(fths_p, d_xo, d_yo, uo, d_z02, dfxs, lambda, k0, Ts, d_fxs, d_y0seg, d_x0seg, S_Bx, S_Bx, N_Bx, N_By); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("Time for the kernel: %f ms\n", time); /* hipComplex *host; host = (hipComplex*)malloc(sizeof(hipComplex)*Nosx*Nosy*Np); hipMemcpy(host, fths, sizeof(hipComplex)*Nosx*Nosy*Np, hipMemcpyDeviceToHost); */ } int main() { hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); /*START CUDA CALC PART - DEKLARACJE*/ int Nx = 1024; int Ny = 1024; int dx = 8; float lambda = 0.5; /*START CUDA FFT 2D PART - DEKLARACJE*/ int S_Bx = 8; int S_By = 8; hipfftComplex* h_out; //dane wynikowe CPU hipfftComplex* holo; //dane wyjciowe GPU int batch = Nx/S_Bx * Ny/S_By; //N_Bx*N_By hipfftHandle forwardPlan; preparePlan2D(&forwardPlan, S_Bx, S_By, batch); h_out = (hipfftComplex*)malloc(sizeof(hipfftComplex)*S_Bx*S_By*batch); //allokacja pamici na wynik (CPU) hipMalloc(&holo, sizeof(hipfftComplex) *S_Bx*S_By*batch); //allokacja pamici na dane wyjciowe (GPU) hipMemset(holo, 0, sizeof(hipfftComplex)*S_Bx*S_By*batch); //Wypenianie zaalokowanej pamici zerami (GPU) /*END CUDA FFT 2D PART - DEKLARACJE*/ /*Kod kernela*/ int Np = 1000; int *xo; int *yo; int *zo; int *uo; xo = (int*)malloc((Np)* sizeof(int)); yo = (int*)malloc((Np)* sizeof(int)); zo = (int*)malloc((Np)* sizeof(int)); uo = (int*)malloc((Np)* sizeof(int)); for (int tt = 0; tt < Np; tt++) { xo[tt] = tt; yo[tt] = Np - tt; zo[tt] = yo[tt] * xo[tt]; } hipComplex *fths_p; hipfftComplex* fhs; // hipMalloc(&fhs, sizeof(hipfftComplex)*S_Bx*S_By*batch); //allokacja pamici na dane wejciowe (GPU) hipMalloc(&fths_p, sizeof(hipComplex)*Nx*Ny); hipMemset(fths_p, 0, sizeof(hipComplex)*Nx*Ny); hipEventRecord(start, 0); /*START CUDA CALC PART */ CPAS_CGH_3DPS_2d(Np, xo, yo, zo, uo, Nx, Ny, dx, lambda, S_Bx, S_By, fths_p); /*START CUDA CALC PART */ /*START CUDA FFT PART */ execute2D(&forwardPlan, fths_p, holo, HIPFFT_FORWARD); hipEventRecord(stop, 0); hipEventSynchronize(stop); /*Wyswietlanie modulu/fazy*/ hipMemcpy(h_out, holo, sizeof(hipfftComplex)*S_Bx*S_By*batch, hipMemcpyDeviceToHost); /*END CUDA FFT PART */ // Retrieve result from device and store it in host array hipEventElapsedTime(&time, start, stop); printf("Time for the kernel: %f ms\n", time); printf("Time for the kernel: %f ms\n", time); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. /* cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } */ return 0; }
e9ef626f7f884dd16dc7ec9dcf8d94178fb6878b.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuComplex.h" #include "math_functions.h" #include "math_constants.h": #include <cufft.h> #include <stdio.h> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } __device__ __forceinline__ cuComplex expf(cuComplex z) { cuComplex res; float t = expf(z.x); sincosf(z.y, &res.y, &res.x); res.x *= t; res.y *= t; return res; } __global__ void calculate(cuComplex *fths, int *xo, int *yo, int *uo, float *zo2, float dfxs, float lambda, float k0, int Ts, float *fxs, float * y0seg, float* x0seg, int S_Bx, int S_By, int N_Bx, int N_By) { // int index = blockIdx.y*(blockDim.x*gridDim.x) + blockIdx.x*blockDim.x + threadIdx.x; // int index = threadIdx.x*blockIdx.z + blockIdx.y*blockDim.z + blockIdx.x*blockDim.x; // int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; // int threadId = blockId * blockDim.x + threadIdx.x; // int pt_indx = threadIdx.x*blockIdx.z; float yp = yo[threadIdx.x] - y0seg[blockIdx.y]; float xp = xo[threadIdx.x] - x0seg[blockIdx.x]; float rp = sqrt(zo2[threadIdx.x ] + xp*xp + yp*yp); float inv_rp = 1 / rp; float fxp = xp*inv_rp / lambda; float fyp = yp*inv_rp / lambda; int iifx = round(fxp / dfxs) + S_Bx / 2 + 1; int iify = round(fyp / dfxs) + S_By / 2 + 1; if (iifx <= 0 || iifx > S_Bx || iify <= 0 || iify > S_Bx){ iifx = S_Bx / 2 + 1; iify = S_Bx / 2 + 1; } cuComplex c0; cuComplex arg; arg.x = (k0*rp - 2 * CUDART_PI_F*(fxs[iifx] + fxs[iify])*(Ts / 2)*inv_rp); c0 = expf(arg); // c0.x = uo[blockDim.x] * c0.x; // c0.y = uo[blockDim.x] * c0.y; //fths[threadId] = c0; // Nep[threadId] = iifx; // Nip[threadId] = iify; fths[iifx + blockIdx.x*S_Bx + iify*S_Bx*N_Bx + blockIdx.x* S_Bx*N_Bx*S_By].x += c0.x; fths[iifx + blockIdx.x*S_Bx + iify*S_Bx*N_Bx + blockIdx.x* S_Bx*N_Bx*S_By].y += c0.y; } cufftResult preparePlan2D(cufftHandle* plan, int nRows, int nCols, int batch){ int n[2] = { nRows, nCols }; cufftResult result = cufftPlanMany(plan, 2, //rank n, //dimensions = {nRows, nCols} 0, //inembed batch, //istride 1, //idist 0, //onembed batch, //ostride 1, //odist CUFFT_C2C, //cufftType batch /*batch*/); if (result != 0){ // std::cout << "preparePlan2D error, result: " << result << "/n"; return result; } return result; } cufftResult execute2D(cufftHandle* plan, cufftComplex* idata, cufftComplex* odata, int direction){ cufftResult result = cufftExecC2C(*plan, idata, odata, direction); if (result != 0){ // cout << "execute2D error, result: " << result << "/n"; return result; } return result; } __global__ void copy2bitmap(cuComplex *H, int *bitmap_H) { } __global__ void asemble(cuComplex *fths, int *xo, int *yo, int *uo, float *zo2, float *dfxs, int *Nxs, float *lambda, int *Ts, float * fxs, float* y0seg, float* x0seg, int* Nep, int* Nip) { //int index = blockIdx.y*(blockDim.x*gridDim.x) + blockIdx.x*blockDim.x + threadIdx.x; // int index = threadIdx.x*blockIdx.z + blockIdx.y*blockDim.z + blockIdx.x*blockDim.x; int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * blockDim.x + threadIdx.x; int pt_indx = threadIdx.x*blockIdx.z; // fths2[] = fths2[] + fths[threadId] // fths[threadId] = c0; // Nep[threadId] = iifx; // Nip[threadId] = iify; } void CPAS_CGH_3DPS_2d(int Np, int* xo, int* yo, int* zo, int* uo, int Nx, int Ny, int dx, float lambda, int S_Bx, int S_By, cuComplex* fths_p) { cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); double k0 = 2 * CUDART_PI_F / lambda; int x_size = (Nx / 2) + ((Nx / 2) - 1) + 1; int y_size = (Ny / 2) + ((Ny / 2) - 1) + 1; float *x = (float*) malloc(x_size * sizeof(float)); float *y = (float*) malloc(y_size * sizeof(float)); for (int t = 0; t < x_size; t++){ x[t] = (-Nx / 2 + t)*dx; } for (int t = 0; t < y_size; t++){ y[t] = (-Ny / 2 + t)*dx; } int N_Bx = Nx / S_Bx; // dodać obsługę nie całkowitych dzieleń int N_By = Ny / S_By; int Ts = S_Bx*dx; float dfxs = 1 / (float)Ts; int fxs_size = (S_Bx / 2) + ((S_Bx / 2) - 1) + 1; float *fxs = (float*)malloc(fxs_size * sizeof(float)); for (int t = 0; t < fxs_size; t++){ fxs[t] = (float)(-S_Bx / 2 + t)*dfxs; } float * x0seg = (float*)malloc((N_Bx)* sizeof(float)); for (int t = 0; t < N_By; t++){ x0seg[t] = (x[0] + (t*Ts) + Ts / 2); } float * y0seg = (float*)malloc((N_By)* sizeof(float)); for (int t = 0; t < N_By; t++){ y0seg[t] = (y[0] + (t*Ts) + Ts / 2); } /* float * nseg_bx = (float*)malloc((Nosx)* sizeof(float)); for (int t = 0; t < Nosx; t++){ nseg_bx[t] = (1 + (t*Nxs)); } float * nseg_by = (float*)malloc((Nosy)* sizeof(float)); for (int t = 0; t < Nosy; t++){ nseg_by[t] = (1 + (t*Nxs)); } float *h = (float*)calloc(Nx, sizeof(float)); */ float *z02; z02 = (float*)malloc((Np)* sizeof(float)); for (int t = 0; t < Np; t++) z02[t] = zo[t] * zo[t]; // cudaMalloc(&fths_p, sizeof(cuComplex)*N_Bx*N_By*S_Bx*S_By); // cudaMemset(fths_p, 0, sizeof(cuComplex)*N_Bx*N_By*S_Bx*S_By); int *d_xo; int *d_yo; float *d_z02; cudaMalloc((void**)&d_xo, sizeof(int)*Np); cudaMalloc((void**)&d_yo, sizeof(int)*Np); cudaMalloc((void**)&d_z02, sizeof(float)*Np); cudaMemcpy(d_xo, xo, Np*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_yo, yo, Np*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_z02, z02, Np*sizeof(float), cudaMemcpyHostToDevice); float *d_fxs; float *d_y0seg; float *d_x0seg; cudaMalloc((void**)&d_x0seg, sizeof(float)*N_Bx); cudaMalloc((void**)&d_y0seg, sizeof(float)*N_By); cudaMalloc((void**)&d_fxs, sizeof(float)*fxs_size); cudaMemcpy(d_fxs, fxs, fxs_size*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_x0seg, x0seg, N_Bx*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y0seg, y0seg, N_By*sizeof(float), cudaMemcpyHostToDevice); dim3 grid; grid.x = N_Bx;//y grid.y = N_By;//x dim3 block; block.x = Np; //z block.y = 1; cudaEventRecord(start, 0); calculate << < grid, block >> >(fths_p, d_xo, d_yo, uo, d_z02, dfxs, lambda, k0, Ts, d_fxs, d_y0seg, d_x0seg, S_Bx, S_Bx, N_Bx, N_By); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("Time for the kernel: %f ms\n", time); /* cuComplex *host; host = (cuComplex*)malloc(sizeof(cuComplex)*Nosx*Nosy*Np); cudaMemcpy(host, fths, sizeof(cuComplex)*Nosx*Nosy*Np, cudaMemcpyDeviceToHost); */ } int main() { cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); /*START CUDA CALC PART - DEKLARACJE*/ int Nx = 1024; int Ny = 1024; int dx = 8; float lambda = 0.5; /*START CUDA FFT 2D PART - DEKLARACJE*/ int S_Bx = 8; int S_By = 8; cufftComplex* h_out; //dane wynikowe CPU cufftComplex* holo; //dane wyjściowe GPU int batch = Nx/S_Bx * Ny/S_By; //N_Bx*N_By cufftHandle forwardPlan; preparePlan2D(&forwardPlan, S_Bx, S_By, batch); h_out = (cufftComplex*)malloc(sizeof(cufftComplex)*S_Bx*S_By*batch); //allokacja pamięci na wynik (CPU) cudaMalloc(&holo, sizeof(cufftComplex) *S_Bx*S_By*batch); //allokacja pamięci na dane wyjściowe (GPU) cudaMemset(holo, 0, sizeof(cufftComplex)*S_Bx*S_By*batch); //Wypełnianie zaalokowanej pamięci zerami (GPU) /*END CUDA FFT 2D PART - DEKLARACJE*/ /*Kod kernela*/ int Np = 1000; int *xo; int *yo; int *zo; int *uo; xo = (int*)malloc((Np)* sizeof(int)); yo = (int*)malloc((Np)* sizeof(int)); zo = (int*)malloc((Np)* sizeof(int)); uo = (int*)malloc((Np)* sizeof(int)); for (int tt = 0; tt < Np; tt++) { xo[tt] = tt; yo[tt] = Np - tt; zo[tt] = yo[tt] * xo[tt]; } cuComplex *fths_p; cufftComplex* fhs; // cudaMalloc(&fhs, sizeof(cufftComplex)*S_Bx*S_By*batch); //allokacja pamięci na dane wejściowe (GPU) cudaMalloc(&fths_p, sizeof(cuComplex)*Nx*Ny); cudaMemset(fths_p, 0, sizeof(cuComplex)*Nx*Ny); cudaEventRecord(start, 0); /*START CUDA CALC PART */ CPAS_CGH_3DPS_2d(Np, xo, yo, zo, uo, Nx, Ny, dx, lambda, S_Bx, S_By, fths_p); /*START CUDA CALC PART */ /*START CUDA FFT PART */ execute2D(&forwardPlan, fths_p, holo, CUFFT_FORWARD); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); /*Wyswietlanie modulu/fazy*/ cudaMemcpy(h_out, holo, sizeof(cufftComplex)*S_Bx*S_By*batch, cudaMemcpyDeviceToHost); /*END CUDA FFT PART */ // Retrieve result from device and store it in host array cudaEventElapsedTime(&time, start, stop); printf("Time for the kernel: %f ms\n", time); printf("Time for the kernel: %f ms\n", time); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. /* cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } */ return 0; }
7e856e5831c9c6ef3b9d91df7746ff84f142ba51.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> __global__ void map1(int m, double *xs, double *weightih, double *hidden, int d, int n_hidden){ // m is the no. of samples and d is the number of features in xs(input data) int index = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.x * blockDim.x + threadIdx.x; //int k = blockIdx.x * blockDim.x + threadIdx.x; if (index<m){ for (int j = 0; j < n_hidden; j++){ double accum = weightih[j]; for (int i=0; i<d; i++){ accum += xs[index*d + i] * weightih[i*d + j]; } hidden[index*d + j] = 1.0/ (1.0 + exp(-accum)); } /*for (int j=0; j<n_hidden; j++){ double accum = matmul(xs, weightih, result, index, d); hidden[index*d + j] = 1.0/ (1.0 + exp(-accum); } */ } } __global__ void map2(int m, double *xs, double *ys, double *hidden, double *weightho, double *output, double *deltao, int d, int n_hidden, int n_output){ //double error = 0.0; int index = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.x * blockDim.x + threadIdx.x; //int k = blockIdx.x * blockDim.x + threadIdx.x; if (index<m){ for (int k = 0; k < n_output; k++){ double accum = weightho[k]; for (int j=0; j<n_hidden; j++){ accum += hidden[index*d + j] * weightho[j*d + k]; } output[index*d + k] = 1.0/ (1.0 + exp(-accum)); //error[0] += (ys[index*d + k] - output[index*d + k]); deltao[k] = (ys[index*d + k] - output[index*d + k]) * output[index*d + k] * (1 - output[index*d + k]); } } } __global__ void map3(int m, double *xs, double *ys, double *hidden, double *weightho, double *deltao, double *deltah, int d, int n_hidden, int n_output){ int index = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.x * blockDim.x + threadIdx.x; //int k = blockIdx.x * blockDim.x + threadIdx.x; if (index<m){ for (int j = 0; j < n_hidden; j++){ double accum = 0.0; for (int k = 0; k < n_output; k++){ accum += weightho[j * d + k] * deltao[k]; } deltah[j] = accum * hidden[index*d + j] * (1 - hidden[index*d + j]); } } } __global__ void map4(int m, double *xs, double *ys, double eta, double *deltah, double *deltaweightih, double *weightih, int d, int n_hidden, int n_output){ int index = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.x * blockDim.x + threadIdx.x; //int i = blockIdx.x * blockDim.x + threadIdx.x; if (index<m){ for (int j = 0; j < n_hidden; j++){ deltaweightih[j] = eta * deltah[j]; for (int i = 0; i < d; i++){ deltaweightih[i * d + j] += eta * xs[index * d + i] * deltah[j]; weightih[i * d + j] += deltaweightih[i * d + j]; } } } } __global__ void map5(int m, double *xs, double *ys, double eta, double *hidden, double *deltao, double *deltah, double *deltaweightho, double *weightho, int d, int n_hidden, int n_output){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index<m){ for (int k = 0; k < n_output; k++){ deltaweightho[k] = eta * deltao[k]; for (int j = 0; j < n_hidden; j++){ deltaweightho[j * d + k] += eta * hidden[index * d + j] * deltao[k]; weightho[j * d + k] += deltaweightho[j * d + k]; } } } } #define num_iterations 50 #define eta 0.5 // eta denotes the learning rate. # include<time.h> int main(){ clock_t start, end; double time_used; //Initialize number of samples and features int n_patterns = 2500; int n_inputs = 20; int n_hidden = 10; int n_outputs = 1; //Allocate host memory variables size_t size1 = n_patterns * n_inputs * sizeof(double); size_t size2 = n_patterns * n_hidden * sizeof(double); size_t size3 = n_patterns * sizeof(double); size_t size4 = n_inputs * sizeof(double); size_t size5 = n_patterns * n_hidden * sizeof(double); size_t size6 = n_patterns * n_outputs * sizeof(double); size_t size7 = n_inputs * n_hidden * sizeof(double); size_t size8 = n_hidden * n_outputs * sizeof(double); size_t size9 = n_outputs * sizeof(double); size_t size10 = n_hidden * sizeof(double); double *input; double *hidden; double *weightih; double *deltaweightih; double *weightho; double *deltaweightho; double *output; double *target; double *deltao; double *deltah; input = (double*)malloc(size1); hidden = (double*)malloc(size5); weightih = (double*)malloc(size7); deltaweightih = (double*)malloc(size7); weightho = (double*)malloc(size8); deltaweightho = (double*)malloc(size8); output = (double*)malloc(size6); target = (double*)malloc(size6); deltao = (double*)malloc(size9); deltah = (double*)malloc(size10); //Read input data from file FILE *fp, *fp1; fp = fopen ("input", "r"); if (!fp){ printf ("Unable to open file!"); return 1; } for (int i=0; i<n_patterns; i++){ for (int j=0; j<n_inputs; j++){ fscanf(fp, "%lf", &input[i*(n_inputs) + j]); } fscanf(fp, "%lf", &target[i]); } fclose(fp); for(int j = 0 ; j < n_hidden ; j++ ) { /* initialize WeightIH and DeltaWeightIH */ for(int i = 0 ; i < n_inputs ; i++ ) { deltaweightih[i * n_inputs + j]= 0.0 ; weightih[i * n_inputs + j] = 2.0 * ( rand()%n_patterns - 0.5 ) * 0.02 ; } } /*for(int k = 0 ; k < n_outputs ; k ++ ) { // initialize WeightHO and DeltaWeightHO for(int j = 0 ; j < n_hidden ; j++ ) { deltaweightho[j * n_hidden + k] = 0.0 ; weightho[j * n_hidden + k] = 2.0 * ( rand()%n_patterns - 0.5 ) * 0.01 ; } } */ weightho[0] = 25.510000; weightho[1] = 48.070000; weightho[2] = 38.850000; weightho[3] = 15.250000; weightho[4] = 42.250000; weightho[5] = 40.750000; weightho[6] = 22.110000; weightho[7] = 36.790000; weightho[8] = 8.070000; weightho[9] = 46.35000; deltaweightho[0] = 0; deltaweightho[1] = 0; deltaweightho[2] = 0; deltaweightho[3] = 0; deltaweightho[4] = 0; deltaweightho[5] = 0; deltaweightho[6] = 0; deltaweightho[7] = 0; deltaweightho[8] = 0; deltaweightho[9] = 0; for (int i=0; i<10; i++){ printf("%lf \n", weightho[i]); } /*for (int i=0; i<n_patterns; i++){ for (int j=0; j<n_hidden; j++){ hidden[i*(n_hidden) + j] = 1.0; } } */ /*//Initialize weights for (int i=0; i<d; i++){ params[i] = 0.0; } //Initialize nodes in each layer in the neural network float *out_input = (float *)malloc(sizeof(float) * (n_inputs + 1)); float *out_hidden = (float *)malloc(sizeof(float) * n_hidden); float *out_output = (float *)malloc(sizeof(float) * n_outputs); buildLayer(out_input, n_inputs + 1, 1.0f); buildLayer(out_hidden, n_hidden, 1.0f); buildLayer(out_output, n_outputs, 1.0f); // Initialize changes layer float *changes_input_hidden = buildWeightsLayer(n_inputs + 1, n_hidden, 0.0f); float *changes_hidden_output = buildWeightsLayer(n_hidden, n_outputs, 0.0f); // Initialize weight matrix float *w_input_hidden = buildWeightsLayer(n_inputs + 1, n_hidden, -1.0f); float *w_hidden_output = buildWeightsLayer(n_hidden, n_outputs, -1.0f); // Print first 10 rows of input data for (int i=0; i<20; i+=2) { printf("%lf %lf => %lf \n", xs[i], xs[i+1], ys[i/2]); } */ //Allocate variables in device memory double *input_d; double *hidden_d; double *weightih_d; double *deltaweightih_d; double *weightho_d; double *deltaweightho_d; double *output_d; double *target_d; double *deltao_d; double *deltah_d; double *error; hipMalloc (&input_d , size1); hipMalloc (&hidden_d , size5); hipMalloc (&weightih_d , size7); hipMalloc (&deltaweightih_d , size7); hipMalloc (&weightho_d , size8); hipMalloc (&deltaweightho_d , size8); hipMalloc (&output_d , size6); hipMalloc (&target_d , size6); hipMalloc (&deltao_d , size9); hipMalloc (&deltah_d , size10); hipMalloc (&error, sizeof(double)); //Copy vectors from host memory to device memory hipMemcpy(input_d, input, size1, hipMemcpyHostToDevice); //hipMemcpy(output_d, output, size5, hipMemcpyHostToDevice); //hipMemcpy(hidden_d, hidden, size5, hipMemcpyHostToDevice); hipMemcpy(weightih_d, weightih, size7, hipMemcpyHostToDevice); hipMemcpy(deltaweightih_d, deltaweightih, size7, hipMemcpyHostToDevice); hipMemcpy(weightho_d, weightho, size8, hipMemcpyHostToDevice); hipMemcpy(deltaweightho_d, deltaweightho, size8, hipMemcpyHostToDevice); //hipMemcpy(output_d, deltaweightho, size8, hipMemcpyHostToDevice); hipMemcpy(target_d, target, size6, hipMemcpyHostToDevice); //hipMemcpy(deltao_d, deltao, size8, hipMemcpyHostToDevice); //hipMemcpy(deltah_d, deltah, size8, hipMemcpyHostToDevice); //clock_t start, end; //double time_used; start = clock(); for (int i=0; i<num_iterations; i++){ hipMemset((void*)error, 0, sizeof(double)); printf("HI1"); hipLaunchKernelGGL(( map1), dim3(2000),dim3(512), 0, 0, n_patterns, input_d, weightih_d, hidden_d, n_inputs, n_hidden); printf("HI2"); hipLaunchKernelGGL(( map2), dim3(2000),dim3(512), 0, 0, n_patterns, input_d, target_d, hidden_d, weightho_d, output_d, deltao_d, n_inputs, n_hidden, n_outputs); //hipMemcpy (output, output_d, size6, hipMemcpyDeviceToHost); /*for (int j=0; j<10; j++){ printf("%lf \n", weightho[j]); }*/ printf("HI3"); hipLaunchKernelGGL(( map3), dim3(2000),dim3(512), 0, 0, n_patterns, input_d, target_d, hidden_d, weightho_d, deltao_d, deltah_d, n_inputs, n_hidden, n_outputs); printf("HI4"); hipLaunchKernelGGL(( map4), dim3(2000),dim3(512), 0, 0, n_patterns, input_d, target_d, eta, deltah_d, deltaweightih_d, weightih_d, n_inputs, n_hidden, n_outputs); printf("HI5"); hipLaunchKernelGGL(( map5), dim3(2000),dim3(512), 0, 0, n_patterns, input_d, target_d, eta, hidden_d, deltao_d, deltah_d, deltaweightho_d, weightho_d, n_inputs, n_hidden, n_outputs); printf("HI6"); hipMemcpy (weightih, weightih_d, size7, hipMemcpyDeviceToHost); printf("HI7"); hipMemcpy (weightho, weightho_d, size8, hipMemcpyDeviceToHost); printf("HI8"); } end = clock(); time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("Time taken for copy in : %f \n", time_used); hipMemcpy (output, output_d, size6, hipMemcpyDeviceToHost); for (int i=0; i<10; i++){ printf("%lf \n", weightih[i]); } for (int i=0; i<10; i++){ printf("%lf \n", weightho[i]); } fp1 = fopen("nnet.out","w"); for (int i=0; i<2500;i++){ fprintf(fp1, "%lf \n", output[i]); } }
7e856e5831c9c6ef3b9d91df7746ff84f142ba51.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #include <curand.h> #include <curand_kernel.h> __global__ void map1(int m, double *xs, double *weightih, double *hidden, int d, int n_hidden){ // m is the no. of samples and d is the number of features in xs(input data) int index = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.x * blockDim.x + threadIdx.x; //int k = blockIdx.x * blockDim.x + threadIdx.x; if (index<m){ for (int j = 0; j < n_hidden; j++){ double accum = weightih[j]; for (int i=0; i<d; i++){ accum += xs[index*d + i] * weightih[i*d + j]; } hidden[index*d + j] = 1.0/ (1.0 + exp(-accum)); } /*for (int j=0; j<n_hidden; j++){ double accum = matmul(xs, weightih, result, index, d); hidden[index*d + j] = 1.0/ (1.0 + exp(-accum); } */ } } __global__ void map2(int m, double *xs, double *ys, double *hidden, double *weightho, double *output, double *deltao, int d, int n_hidden, int n_output){ //double error = 0.0; int index = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.x * blockDim.x + threadIdx.x; //int k = blockIdx.x * blockDim.x + threadIdx.x; if (index<m){ for (int k = 0; k < n_output; k++){ double accum = weightho[k]; for (int j=0; j<n_hidden; j++){ accum += hidden[index*d + j] * weightho[j*d + k]; } output[index*d + k] = 1.0/ (1.0 + exp(-accum)); //error[0] += (ys[index*d + k] - output[index*d + k]); deltao[k] = (ys[index*d + k] - output[index*d + k]) * output[index*d + k] * (1 - output[index*d + k]); } } } __global__ void map3(int m, double *xs, double *ys, double *hidden, double *weightho, double *deltao, double *deltah, int d, int n_hidden, int n_output){ int index = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.x * blockDim.x + threadIdx.x; //int k = blockIdx.x * blockDim.x + threadIdx.x; if (index<m){ for (int j = 0; j < n_hidden; j++){ double accum = 0.0; for (int k = 0; k < n_output; k++){ accum += weightho[j * d + k] * deltao[k]; } deltah[j] = accum * hidden[index*d + j] * (1 - hidden[index*d + j]); } } } __global__ void map4(int m, double *xs, double *ys, double eta, double *deltah, double *deltaweightih, double *weightih, int d, int n_hidden, int n_output){ int index = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.x * blockDim.x + threadIdx.x; //int i = blockIdx.x * blockDim.x + threadIdx.x; if (index<m){ for (int j = 0; j < n_hidden; j++){ deltaweightih[j] = eta * deltah[j]; for (int i = 0; i < d; i++){ deltaweightih[i * d + j] += eta * xs[index * d + i] * deltah[j]; weightih[i * d + j] += deltaweightih[i * d + j]; } } } } __global__ void map5(int m, double *xs, double *ys, double eta, double *hidden, double *deltao, double *deltah, double *deltaweightho, double *weightho, int d, int n_hidden, int n_output){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index<m){ for (int k = 0; k < n_output; k++){ deltaweightho[k] = eta * deltao[k]; for (int j = 0; j < n_hidden; j++){ deltaweightho[j * d + k] += eta * hidden[index * d + j] * deltao[k]; weightho[j * d + k] += deltaweightho[j * d + k]; } } } } #define num_iterations 50 #define eta 0.5 // eta denotes the learning rate. # include<time.h> int main(){ clock_t start, end; double time_used; //Initialize number of samples and features int n_patterns = 2500; int n_inputs = 20; int n_hidden = 10; int n_outputs = 1; //Allocate host memory variables size_t size1 = n_patterns * n_inputs * sizeof(double); size_t size2 = n_patterns * n_hidden * sizeof(double); size_t size3 = n_patterns * sizeof(double); size_t size4 = n_inputs * sizeof(double); size_t size5 = n_patterns * n_hidden * sizeof(double); size_t size6 = n_patterns * n_outputs * sizeof(double); size_t size7 = n_inputs * n_hidden * sizeof(double); size_t size8 = n_hidden * n_outputs * sizeof(double); size_t size9 = n_outputs * sizeof(double); size_t size10 = n_hidden * sizeof(double); double *input; double *hidden; double *weightih; double *deltaweightih; double *weightho; double *deltaweightho; double *output; double *target; double *deltao; double *deltah; input = (double*)malloc(size1); hidden = (double*)malloc(size5); weightih = (double*)malloc(size7); deltaweightih = (double*)malloc(size7); weightho = (double*)malloc(size8); deltaweightho = (double*)malloc(size8); output = (double*)malloc(size6); target = (double*)malloc(size6); deltao = (double*)malloc(size9); deltah = (double*)malloc(size10); //Read input data from file FILE *fp, *fp1; fp = fopen ("input", "r"); if (!fp){ printf ("Unable to open file!"); return 1; } for (int i=0; i<n_patterns; i++){ for (int j=0; j<n_inputs; j++){ fscanf(fp, "%lf", &input[i*(n_inputs) + j]); } fscanf(fp, "%lf", &target[i]); } fclose(fp); for(int j = 0 ; j < n_hidden ; j++ ) { /* initialize WeightIH and DeltaWeightIH */ for(int i = 0 ; i < n_inputs ; i++ ) { deltaweightih[i * n_inputs + j]= 0.0 ; weightih[i * n_inputs + j] = 2.0 * ( rand()%n_patterns - 0.5 ) * 0.02 ; } } /*for(int k = 0 ; k < n_outputs ; k ++ ) { // initialize WeightHO and DeltaWeightHO for(int j = 0 ; j < n_hidden ; j++ ) { deltaweightho[j * n_hidden + k] = 0.0 ; weightho[j * n_hidden + k] = 2.0 * ( rand()%n_patterns - 0.5 ) * 0.01 ; } } */ weightho[0] = 25.510000; weightho[1] = 48.070000; weightho[2] = 38.850000; weightho[3] = 15.250000; weightho[4] = 42.250000; weightho[5] = 40.750000; weightho[6] = 22.110000; weightho[7] = 36.790000; weightho[8] = 8.070000; weightho[9] = 46.35000; deltaweightho[0] = 0; deltaweightho[1] = 0; deltaweightho[2] = 0; deltaweightho[3] = 0; deltaweightho[4] = 0; deltaweightho[5] = 0; deltaweightho[6] = 0; deltaweightho[7] = 0; deltaweightho[8] = 0; deltaweightho[9] = 0; for (int i=0; i<10; i++){ printf("%lf \n", weightho[i]); } /*for (int i=0; i<n_patterns; i++){ for (int j=0; j<n_hidden; j++){ hidden[i*(n_hidden) + j] = 1.0; } } */ /*//Initialize weights for (int i=0; i<d; i++){ params[i] = 0.0; } //Initialize nodes in each layer in the neural network float *out_input = (float *)malloc(sizeof(float) * (n_inputs + 1)); float *out_hidden = (float *)malloc(sizeof(float) * n_hidden); float *out_output = (float *)malloc(sizeof(float) * n_outputs); buildLayer(out_input, n_inputs + 1, 1.0f); buildLayer(out_hidden, n_hidden, 1.0f); buildLayer(out_output, n_outputs, 1.0f); // Initialize changes layer float *changes_input_hidden = buildWeightsLayer(n_inputs + 1, n_hidden, 0.0f); float *changes_hidden_output = buildWeightsLayer(n_hidden, n_outputs, 0.0f); // Initialize weight matrix float *w_input_hidden = buildWeightsLayer(n_inputs + 1, n_hidden, -1.0f); float *w_hidden_output = buildWeightsLayer(n_hidden, n_outputs, -1.0f); // Print first 10 rows of input data for (int i=0; i<20; i+=2) { printf("%lf %lf => %lf \n", xs[i], xs[i+1], ys[i/2]); } */ //Allocate variables in device memory double *input_d; double *hidden_d; double *weightih_d; double *deltaweightih_d; double *weightho_d; double *deltaweightho_d; double *output_d; double *target_d; double *deltao_d; double *deltah_d; double *error; cudaMalloc (&input_d , size1); cudaMalloc (&hidden_d , size5); cudaMalloc (&weightih_d , size7); cudaMalloc (&deltaweightih_d , size7); cudaMalloc (&weightho_d , size8); cudaMalloc (&deltaweightho_d , size8); cudaMalloc (&output_d , size6); cudaMalloc (&target_d , size6); cudaMalloc (&deltao_d , size9); cudaMalloc (&deltah_d , size10); cudaMalloc (&error, sizeof(double)); //Copy vectors from host memory to device memory cudaMemcpy(input_d, input, size1, cudaMemcpyHostToDevice); //cudaMemcpy(output_d, output, size5, cudaMemcpyHostToDevice); //cudaMemcpy(hidden_d, hidden, size5, cudaMemcpyHostToDevice); cudaMemcpy(weightih_d, weightih, size7, cudaMemcpyHostToDevice); cudaMemcpy(deltaweightih_d, deltaweightih, size7, cudaMemcpyHostToDevice); cudaMemcpy(weightho_d, weightho, size8, cudaMemcpyHostToDevice); cudaMemcpy(deltaweightho_d, deltaweightho, size8, cudaMemcpyHostToDevice); //cudaMemcpy(output_d, deltaweightho, size8, cudaMemcpyHostToDevice); cudaMemcpy(target_d, target, size6, cudaMemcpyHostToDevice); //cudaMemcpy(deltao_d, deltao, size8, cudaMemcpyHostToDevice); //cudaMemcpy(deltah_d, deltah, size8, cudaMemcpyHostToDevice); //clock_t start, end; //double time_used; start = clock(); for (int i=0; i<num_iterations; i++){ cudaMemset((void*)error, 0, sizeof(double)); printf("HI1"); map1<<<2000,512>>>(n_patterns, input_d, weightih_d, hidden_d, n_inputs, n_hidden); printf("HI2"); map2<<<2000,512>>>(n_patterns, input_d, target_d, hidden_d, weightho_d, output_d, deltao_d, n_inputs, n_hidden, n_outputs); //cudaMemcpy (output, output_d, size6, cudaMemcpyDeviceToHost); /*for (int j=0; j<10; j++){ printf("%lf \n", weightho[j]); }*/ printf("HI3"); map3<<<2000,512>>>(n_patterns, input_d, target_d, hidden_d, weightho_d, deltao_d, deltah_d, n_inputs, n_hidden, n_outputs); printf("HI4"); map4<<<2000,512>>>(n_patterns, input_d, target_d, eta, deltah_d, deltaweightih_d, weightih_d, n_inputs, n_hidden, n_outputs); printf("HI5"); map5<<<2000,512>>>(n_patterns, input_d, target_d, eta, hidden_d, deltao_d, deltah_d, deltaweightho_d, weightho_d, n_inputs, n_hidden, n_outputs); printf("HI6"); cudaMemcpy (weightih, weightih_d, size7, cudaMemcpyDeviceToHost); printf("HI7"); cudaMemcpy (weightho, weightho_d, size8, cudaMemcpyDeviceToHost); printf("HI8"); } end = clock(); time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("Time taken for copy in : %f \n", time_used); cudaMemcpy (output, output_d, size6, cudaMemcpyDeviceToHost); for (int i=0; i<10; i++){ printf("%lf \n", weightih[i]); } for (int i=0; i<10; i++){ printf("%lf \n", weightho[i]); } fp1 = fopen("nnet.out","w"); for (int i=0; i<2500;i++){ fprintf(fp1, "%lf \n", output[i]); } }
229f3476a10a40cac273c0aad729fc19e9fd97cc.hip
// !!! This is a file automatically generated by hipify!!! #include <UnitTest++.h> #include "../CrossingDistanceHelper.hh" #include "../RayTraceHelper.hh" #include "MonteRay_CylindricalGrid.hh" namespace MonteRay_CylindricalGrid_crossingDistance_tests{ using namespace MonteRay; SUITE( MonteRay_CylindricalGrid_crossingDistance_Tests) { using GridBins_t = MonteRay_GridBins; using Position_t = MonteRay::Vector3D<gpuRayFloat_t>; const gpuFloatType_t s2 = std::sqrt(2.0); enum coord {R=0,Z=1,Theta=2,DIM=3}; inline void checkDistances( const char *file, int line, const std::vector<unsigned>& expectedIndex, const std::vector<gpuFloatType_t>& expectedDistance, const singleDimRayTraceMap_t& distances ) { char const* const errorFormat = "%s(%d): error: Failure \n"; if( expectedIndex.size() != expectedDistance.size() ) { printf(errorFormat, file, line); } CHECK_EQUAL( expectedIndex.size(), expectedDistance.size() ); if( expectedIndex.size() != distances.size() ) { printf(errorFormat, file, line); } CHECK_EQUAL( expectedIndex.size(), distances.size() ); for( auto i=0; i<distances.size(); ++i ) { if( expectedIndex[i] != distances.id(i) ) { printf("%s(%d): error: Failure in cell id #%d \n", file, line, i); } CHECK_EQUAL( expectedIndex [i], distances.id(i) ); if( std::abs( expectedDistance[i] - distances.dist(i) ) > 1.0e-5 ) { printf("%s(%d): error: Failure in distance #%d \n", file, line, i); } CHECK_CLOSE( expectedDistance[i], distances.dist(i), 1e-5 ); } } #define checkDistances(expectedIndex, expectedDistance, distances) { checkDistances(__FILE__, __LINE__, expectedIndex, expectedDistance, distances); } using distances_t = singleDimRayTraceMap_t; using CylindricalGrid = MonteRay_CylindricalGrid; using GridBins = MonteRay_GridBins; class CylindricalGridTester{ public: std::unique_ptr<CylindricalGrid> pCyl; CylindricalGridTester(){ std::vector<gpuRayFloat_t> Rverts = { 1.0, 2.0, 3.0, 5.0 }; std::vector<gpuRayFloat_t> Zverts = { 0.0, 1.0, 2.0, 3.0, 5.0 }; pCyl = std::make_unique<CylindricalGrid>(2, GridBins{Rverts, GridBins::RADIAL}, GridBins{Zverts}); } }; TEST_FIXTURE(CylindricalGridTester, CrossingDistance_in_1D_R_inward_from_outside_to_outside ) { Position_t position ( -6.5, 0.0, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 100.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 9, distances.size() ); checkDistances( std::vector<unsigned>({4,3,2,1,0,1,2,3,4}), std::vector<gpuFloatType_t>({1.5,3.5,4.5,5.5,7.5,8.5,9.5,11.5,distance}), distances ); #ifdef __HIPCC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_in_1D_R_inward_from_outside_to_inside_stop_inward ) { Position_t position ( -6.5, 0.0, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 6.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 5, distances.size() ); checkDistances( std::vector<unsigned>({4,3,2,1,0}), std::vector<gpuFloatType_t>({1.5,3.5,4.5,5.5,6.0}), distances ); #ifdef __HIPCC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_in_1D_R_inward_from_outside_to_inside_stop_outward ) { Position_t position ( -6.5, 0.0, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 9.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 7, distances.size() ); checkDistances( std::vector<unsigned>({4,3,2,1,0,1,2}), std::vector<gpuFloatType_t>({1.5,3.5,4.5,5.5,7.5,8.5,9.0}), distances ); #ifdef __HIPCC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_through_a_single_cylinder_in_2D_R_inward_from_inside_to_outside ) { gpuFloatType_t y = 3.0f / std::sqrt(2.0f ); gpuFloatType_t last_dist = std::sqrt( 25 - y*y ); Position_t position ( -4.0, y, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 9.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 4, distances.size() ); checkDistances( std::vector<unsigned>({3,2,3,4}), std::vector<gpuFloatType_t>({4.0f-y,4.0f+y,4.0f+last_dist,9.0}), distances ); #ifdef __HIPCC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_tanget_to_first_inner_cylinder_posY ) { gpuFloatType_t x = -3.5; gpuFloatType_t y = 3.0; gpuFloatType_t last_dist = std::sqrt( 25 - y*y ); Position_t position ( x, y, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 9.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 4, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( 3.5, distances.dist(0), 1e-5 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( 3.5, distances.dist(1), 1e-5 ); CHECK_EQUAL( 3, distances.id(2) ); CHECK_CLOSE( 7.5, distances.dist(2), 1e-5 ); CHECK_EQUAL( 4, distances.id(3) ); CHECK_CLOSE( 9.0, distances.dist(3), 1e-5 ); #ifdef __HIPCC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_tanget_to_first_inner_cylinder_negY ) { gpuFloatType_t x = -3.5; gpuFloatType_t y = -3.0; gpuFloatType_t last_dist = std::sqrt( 25 - y*y ); Position_t position ( x, y, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 9.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 4, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( 3.5, distances.dist(0), 1e-5 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( 3.5, distances.dist(1), 1e-5 ); CHECK_EQUAL( 3, distances.id(2) ); CHECK_CLOSE( 7.5, distances.dist(2), 1e-5 ); CHECK_EQUAL( 4, distances.id(3) ); CHECK_CLOSE( 9.0, distances.dist(3), 1e-5 ); #ifdef __HIPCC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_tanget_to_first_second_cylinder_posY ) { gpuFloatType_t y = 2.0; Position_t position ( -4.0, y, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 9.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 6, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( 4.0 - std::sqrt(9.0-4.0), distances.dist(0), 1e-5 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( 4.0, distances.dist(1), 1e-5 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( 4.0, distances.dist(2), 1e-5 ); CHECK_EQUAL( 2, distances.id(3) ); CHECK_CLOSE( 4.0 + std::sqrt(9.0-4.0), distances.dist(3), 1e-5 ); CHECK_EQUAL( 3, distances.id(4) ); CHECK_CLOSE( 4.0 + std::sqrt(25.0-4.0), distances.dist(4), 1e-5 ); CHECK_EQUAL( 4, distances.id(5) ); CHECK_CLOSE( distance, distances.dist(5), 1e-5 ); #ifdef __HIPCC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_outward_from_Origin_posX_to_outside ) { // std::cout << "Debug: ---------------------------------------------------------" << std::endl; Position_t position ( 0.0, 0.0, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 9.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( 0, distances.id(0) ); CHECK_CLOSE( 1.0, distances.dist(0), 1e-5 ); CHECK_EQUAL( 1, distances.id(1) ); CHECK_CLOSE( 2.0, distances.dist(1), 1e-5 ); CHECK_EQUAL( 2, distances.id(2) ); CHECK_CLOSE( 3.0, distances.dist(2), 1e-5 ); CHECK_EQUAL( 3, distances.id(3) ); CHECK_CLOSE( 5.0, distances.dist(3), 1e-5 ); CHECK_EQUAL( 4, distances.id(4) ); CHECK_CLOSE( 9.0, distances.dist(4), 1e-5 ); #ifdef __HIPCC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_outward_from_Origin_posX_to_inside ) { // std::cout << "Debug: ---------------------------------------------------------" << std::endl; Position_t position ( 0.0, 0.0, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 4.5; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 4, distances.size() ); CHECK_EQUAL( 0, distances.id(0) ); CHECK_CLOSE( 1.0, distances.dist(0), 1e-5 ); CHECK_EQUAL( 1, distances.id(1) ); CHECK_CLOSE( 2.0, distances.dist(1), 1e-5 ); CHECK_EQUAL( 2, distances.id(2) ); CHECK_CLOSE( 3.0, distances.dist(2), 1e-5 ); CHECK_EQUAL( 3, distances.id(3) ); CHECK_CLOSE( 4.5, distances.dist(3), 1e-5 ); #ifdef __HIPCC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_outward_from_posX_Postion_negX_Direction ) { // std::cout << "Debug: ---------------------------------------------------------" << std::endl; Position_t position ( 3.5, 0.0, 0.5 ); Position_t direction( -1, 0, 0 ); gpuFloatType_t distance = 9.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 8, distances.size() ); checkDistances( std::vector<unsigned>({3,2,1,0,1,2,3,4}), std::vector<gpuFloatType_t>({0.5, 1.5, 2.5, 4.5, 5.5, 6.5, 8.5, 9.0}), distances ); #ifdef __HIPCC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_outward_from_posX_Postion_negX_Direction_not_outside ) { // std::cout << "Debug: ---------------------------------------------------------" << std::endl; Position_t position ( 3.5, 0.0, 0.5 ); Position_t direction( -1, 0, 0 ); gpuFloatType_t distance = 7.5; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 7, distances.size() ); checkDistances( std::vector<unsigned>({3,2,1,0,1,2,3}), std::vector<gpuFloatType_t>({0.5, 1.5, 2.5, 4.5, 5.5, 6.5, 7.5}), distances ); #ifdef __HIPCC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, radialCrossingDistances_inside_thru_to_outside ) { Position_t position ( -4.5, 0.0, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 100.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 8, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( 1.5, distances.dist(0), 1e-5 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( 2.5, distances.dist(1), 1e-5 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( 3.5, distances.dist(2), 1e-5 ); CHECK_EQUAL( 0, distances.id(3) ); CHECK_CLOSE( 5.5, distances.dist(3), 1e-5 ); CHECK_EQUAL( 1, distances.id(4) ); CHECK_CLOSE( 6.5, distances.dist(4), 1e-5 ); CHECK_EQUAL( 2, distances.id(5) ); CHECK_CLOSE( 7.5, distances.dist(5), 1e-5 ); CHECK_EQUAL( 3, distances.id(6) ); CHECK_CLOSE( 9.5, distances.dist(6), 1e-5 ); CHECK_EQUAL( 4, distances.id(7) ); CHECK_CLOSE( 100.0, distances.dist(7), 1e-5 ); #ifdef __HIPCC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, radialCrossingDistances_inside_misses_inner_cells ) { Position_t position ( -3.5, 3.1, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 100.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 2, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( 3.5+std::sqrt(5.0*5.0-3.1*3.1), distances.dist(0), 1e-5 ); CHECK_EQUAL( 4, distances.id(1) ); CHECK_CLOSE( 100.0, distances.dist(1), 1e-5 ); #ifdef __HIPCC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, radialCrossingDistances_twice_through_a_single_cylinder_going_inward_single_crossing_outward ) { gpuFloatType_t y = 3.0 / std::sqrt(2.0 ); Position_t position ( -4.0, y, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 9.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 4, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( 4.0 - y, distances.dist(0), 1e-5 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( 4.0 + y, distances.dist(1), 1e-5 ); CHECK_EQUAL( 3, distances.id(2) ); CHECK_CLOSE( 4.0 + std::sqrt(5.0*5.0-y*y) , distances.dist(2), 1e-5 ); CHECK_EQUAL( 4, distances.id(3) ); CHECK_CLOSE( 9.0, distances.dist(3), 1e-5 ); #ifdef __HIPCC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } } }
229f3476a10a40cac273c0aad729fc19e9fd97cc.cu
#include <UnitTest++.h> #include "../CrossingDistanceHelper.hh" #include "../RayTraceHelper.hh" #include "MonteRay_CylindricalGrid.hh" namespace MonteRay_CylindricalGrid_crossingDistance_tests{ using namespace MonteRay; SUITE( MonteRay_CylindricalGrid_crossingDistance_Tests) { using GridBins_t = MonteRay_GridBins; using Position_t = MonteRay::Vector3D<gpuRayFloat_t>; const gpuFloatType_t s2 = std::sqrt(2.0); enum coord {R=0,Z=1,Theta=2,DIM=3}; inline void checkDistances( const char *file, int line, const std::vector<unsigned>& expectedIndex, const std::vector<gpuFloatType_t>& expectedDistance, const singleDimRayTraceMap_t& distances ) { char const* const errorFormat = "%s(%d): error: Failure \n"; if( expectedIndex.size() != expectedDistance.size() ) { printf(errorFormat, file, line); } CHECK_EQUAL( expectedIndex.size(), expectedDistance.size() ); if( expectedIndex.size() != distances.size() ) { printf(errorFormat, file, line); } CHECK_EQUAL( expectedIndex.size(), distances.size() ); for( auto i=0; i<distances.size(); ++i ) { if( expectedIndex[i] != distances.id(i) ) { printf("%s(%d): error: Failure in cell id #%d \n", file, line, i); } CHECK_EQUAL( expectedIndex [i], distances.id(i) ); if( std::abs( expectedDistance[i] - distances.dist(i) ) > 1.0e-5 ) { printf("%s(%d): error: Failure in distance #%d \n", file, line, i); } CHECK_CLOSE( expectedDistance[i], distances.dist(i), 1e-5 ); } } #define checkDistances(expectedIndex, expectedDistance, distances) { checkDistances(__FILE__, __LINE__, expectedIndex, expectedDistance, distances); } using distances_t = singleDimRayTraceMap_t; using CylindricalGrid = MonteRay_CylindricalGrid; using GridBins = MonteRay_GridBins; class CylindricalGridTester{ public: std::unique_ptr<CylindricalGrid> pCyl; CylindricalGridTester(){ std::vector<gpuRayFloat_t> Rverts = { 1.0, 2.0, 3.0, 5.0 }; std::vector<gpuRayFloat_t> Zverts = { 0.0, 1.0, 2.0, 3.0, 5.0 }; pCyl = std::make_unique<CylindricalGrid>(2, GridBins{Rverts, GridBins::RADIAL}, GridBins{Zverts}); } }; TEST_FIXTURE(CylindricalGridTester, CrossingDistance_in_1D_R_inward_from_outside_to_outside ) { Position_t position ( -6.5, 0.0, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 100.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 9, distances.size() ); checkDistances( std::vector<unsigned>({4,3,2,1,0,1,2,3,4}), std::vector<gpuFloatType_t>({1.5,3.5,4.5,5.5,7.5,8.5,9.5,11.5,distance}), distances ); #ifdef __CUDACC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_in_1D_R_inward_from_outside_to_inside_stop_inward ) { Position_t position ( -6.5, 0.0, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 6.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 5, distances.size() ); checkDistances( std::vector<unsigned>({4,3,2,1,0}), std::vector<gpuFloatType_t>({1.5,3.5,4.5,5.5,6.0}), distances ); #ifdef __CUDACC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_in_1D_R_inward_from_outside_to_inside_stop_outward ) { Position_t position ( -6.5, 0.0, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 9.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 7, distances.size() ); checkDistances( std::vector<unsigned>({4,3,2,1,0,1,2}), std::vector<gpuFloatType_t>({1.5,3.5,4.5,5.5,7.5,8.5,9.0}), distances ); #ifdef __CUDACC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_through_a_single_cylinder_in_2D_R_inward_from_inside_to_outside ) { gpuFloatType_t y = 3.0f / std::sqrt(2.0f ); gpuFloatType_t last_dist = std::sqrt( 25 - y*y ); Position_t position ( -4.0, y, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 9.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 4, distances.size() ); checkDistances( std::vector<unsigned>({3,2,3,4}), std::vector<gpuFloatType_t>({4.0f-y,4.0f+y,4.0f+last_dist,9.0}), distances ); #ifdef __CUDACC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_tanget_to_first_inner_cylinder_posY ) { gpuFloatType_t x = -3.5; gpuFloatType_t y = 3.0; gpuFloatType_t last_dist = std::sqrt( 25 - y*y ); Position_t position ( x, y, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 9.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 4, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( 3.5, distances.dist(0), 1e-5 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( 3.5, distances.dist(1), 1e-5 ); CHECK_EQUAL( 3, distances.id(2) ); CHECK_CLOSE( 7.5, distances.dist(2), 1e-5 ); CHECK_EQUAL( 4, distances.id(3) ); CHECK_CLOSE( 9.0, distances.dist(3), 1e-5 ); #ifdef __CUDACC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_tanget_to_first_inner_cylinder_negY ) { gpuFloatType_t x = -3.5; gpuFloatType_t y = -3.0; gpuFloatType_t last_dist = std::sqrt( 25 - y*y ); Position_t position ( x, y, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 9.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 4, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( 3.5, distances.dist(0), 1e-5 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( 3.5, distances.dist(1), 1e-5 ); CHECK_EQUAL( 3, distances.id(2) ); CHECK_CLOSE( 7.5, distances.dist(2), 1e-5 ); CHECK_EQUAL( 4, distances.id(3) ); CHECK_CLOSE( 9.0, distances.dist(3), 1e-5 ); #ifdef __CUDACC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_tanget_to_first_second_cylinder_posY ) { gpuFloatType_t y = 2.0; Position_t position ( -4.0, y, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 9.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 6, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( 4.0 - std::sqrt(9.0-4.0), distances.dist(0), 1e-5 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( 4.0, distances.dist(1), 1e-5 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( 4.0, distances.dist(2), 1e-5 ); CHECK_EQUAL( 2, distances.id(3) ); CHECK_CLOSE( 4.0 + std::sqrt(9.0-4.0), distances.dist(3), 1e-5 ); CHECK_EQUAL( 3, distances.id(4) ); CHECK_CLOSE( 4.0 + std::sqrt(25.0-4.0), distances.dist(4), 1e-5 ); CHECK_EQUAL( 4, distances.id(5) ); CHECK_CLOSE( distance, distances.dist(5), 1e-5 ); #ifdef __CUDACC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_outward_from_Origin_posX_to_outside ) { // std::cout << "Debug: ---------------------------------------------------------" << std::endl; Position_t position ( 0.0, 0.0, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 9.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( 0, distances.id(0) ); CHECK_CLOSE( 1.0, distances.dist(0), 1e-5 ); CHECK_EQUAL( 1, distances.id(1) ); CHECK_CLOSE( 2.0, distances.dist(1), 1e-5 ); CHECK_EQUAL( 2, distances.id(2) ); CHECK_CLOSE( 3.0, distances.dist(2), 1e-5 ); CHECK_EQUAL( 3, distances.id(3) ); CHECK_CLOSE( 5.0, distances.dist(3), 1e-5 ); CHECK_EQUAL( 4, distances.id(4) ); CHECK_CLOSE( 9.0, distances.dist(4), 1e-5 ); #ifdef __CUDACC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_outward_from_Origin_posX_to_inside ) { // std::cout << "Debug: ---------------------------------------------------------" << std::endl; Position_t position ( 0.0, 0.0, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 4.5; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 4, distances.size() ); CHECK_EQUAL( 0, distances.id(0) ); CHECK_CLOSE( 1.0, distances.dist(0), 1e-5 ); CHECK_EQUAL( 1, distances.id(1) ); CHECK_CLOSE( 2.0, distances.dist(1), 1e-5 ); CHECK_EQUAL( 2, distances.id(2) ); CHECK_CLOSE( 3.0, distances.dist(2), 1e-5 ); CHECK_EQUAL( 3, distances.id(3) ); CHECK_CLOSE( 4.5, distances.dist(3), 1e-5 ); #ifdef __CUDACC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_outward_from_posX_Postion_negX_Direction ) { // std::cout << "Debug: ---------------------------------------------------------" << std::endl; Position_t position ( 3.5, 0.0, 0.5 ); Position_t direction( -1, 0, 0 ); gpuFloatType_t distance = 9.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 8, distances.size() ); checkDistances( std::vector<unsigned>({3,2,1,0,1,2,3,4}), std::vector<gpuFloatType_t>({0.5, 1.5, 2.5, 4.5, 5.5, 6.5, 8.5, 9.0}), distances ); #ifdef __CUDACC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, CrossingDistance_outward_from_posX_Postion_negX_Direction_not_outside ) { // std::cout << "Debug: ---------------------------------------------------------" << std::endl; Position_t position ( 3.5, 0.0, 0.5 ); Position_t direction( -1, 0, 0 ); gpuFloatType_t distance = 7.5; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 7, distances.size() ); checkDistances( std::vector<unsigned>({3,2,1,0,1,2,3}), std::vector<gpuFloatType_t>({0.5, 1.5, 2.5, 4.5, 5.5, 6.5, 7.5}), distances ); #ifdef __CUDACC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, radialCrossingDistances_inside_thru_to_outside ) { Position_t position ( -4.5, 0.0, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 100.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 8, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( 1.5, distances.dist(0), 1e-5 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( 2.5, distances.dist(1), 1e-5 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( 3.5, distances.dist(2), 1e-5 ); CHECK_EQUAL( 0, distances.id(3) ); CHECK_CLOSE( 5.5, distances.dist(3), 1e-5 ); CHECK_EQUAL( 1, distances.id(4) ); CHECK_CLOSE( 6.5, distances.dist(4), 1e-5 ); CHECK_EQUAL( 2, distances.id(5) ); CHECK_CLOSE( 7.5, distances.dist(5), 1e-5 ); CHECK_EQUAL( 3, distances.id(6) ); CHECK_CLOSE( 9.5, distances.dist(6), 1e-5 ); CHECK_EQUAL( 4, distances.id(7) ); CHECK_CLOSE( 100.0, distances.dist(7), 1e-5 ); #ifdef __CUDACC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, radialCrossingDistances_inside_misses_inner_cells ) { Position_t position ( -3.5, 3.1, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 100.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 2, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( 3.5+std::sqrt(5.0*5.0-3.1*3.1), distances.dist(0), 1e-5 ); CHECK_EQUAL( 4, distances.id(1) ); CHECK_CLOSE( 100.0, distances.dist(1), 1e-5 ); #ifdef __CUDACC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } TEST_FIXTURE(CylindricalGridTester, radialCrossingDistances_twice_through_a_single_cylinder_going_inward_single_crossing_outward ) { gpuFloatType_t y = 3.0 / std::sqrt(2.0 ); Position_t position ( -4.0, y, 0.5 ); Position_t direction( 1, 0, 0 ); gpuFloatType_t distance = 9.0; distances_t distances = crossingDistanceOnCPU(pCyl.get(), R, position, direction, distance ); CHECK_EQUAL( 4, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( 4.0 - y, distances.dist(0), 1e-5 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( 4.0 + y, distances.dist(1), 1e-5 ); CHECK_EQUAL( 3, distances.id(2) ); CHECK_CLOSE( 4.0 + std::sqrt(5.0*5.0-y*y) , distances.dist(2), 1e-5 ); CHECK_EQUAL( 4, distances.id(3) ); CHECK_CLOSE( 9.0, distances.dist(3), 1e-5 ); #ifdef __CUDACC__ auto gpuDistances = crossingDistanceOnGPU(pCyl.get(), R, position, direction, distance); compareDistances(distances, gpuDistances); #endif } } }
bf751c1f0a816bba09130aeafc7fe0a4d15069d2.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/OpMathType.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/hip/JitLoops.cuh> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Math.cuh> #include <limits> namespace at::native { #if AT_USE_JITERATOR() CONSTEXPR_EXCEPT_WIN_CUDA char atan_name[] = "atan_impl"; #endif void atan_kernel_cuda(TensorIteratorBase& iter) { auto common_dtype = iter.common_dtype(); if (at::isComplexType(common_dtype)) { #if AT_USE_JITERATOR() static const auto atan_string = jiterator_stringify( template <typename T> T atan_impl(T a) { return std::atan(a); } ); AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atan_name", [&]() { jitted_gpu_kernel< /*name=*/ atan_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, atan_string); }); #else AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atan_name", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; return ::atan(static_cast<opmath_t>(a)); }); }); #endif } else { AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, common_dtype, "atan_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::atan(a); }); }); } } REGISTER_DISPATCH(atan_stub, &atan_kernel_cuda); } // namespace at::native
bf751c1f0a816bba09130aeafc7fe0a4d15069d2.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/OpMathType.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/cuda/JitLoops.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <limits> namespace at::native { #if AT_USE_JITERATOR() CONSTEXPR_EXCEPT_WIN_CUDA char atan_name[] = "atan_impl"; #endif void atan_kernel_cuda(TensorIteratorBase& iter) { auto common_dtype = iter.common_dtype(); if (at::isComplexType(common_dtype)) { #if AT_USE_JITERATOR() static const auto atan_string = jiterator_stringify( template <typename T> T atan_impl(T a) { return std::atan(a); } ); AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atan_name", [&]() { jitted_gpu_kernel< /*name=*/ atan_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, atan_string); }); #else AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atan_name", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; return ::atan(static_cast<opmath_t>(a)); }); }); #endif } else { AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, common_dtype, "atan_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::atan(a); }); }); } } REGISTER_DISPATCH(atan_stub, &atan_kernel_cuda); } // namespace at::native
e8914261a0f37f65c8aae6c6b8ecc726eacde697.hip
// !!! This is a file automatically generated by hipify!!! // This file defines a bare-bones CUDA benchmark which spins waiting for a // user-specified amount of time to complete. While the benchmark itself is // simpler than the mandelbrot-set benchmark, the boilerplate is relatively // similar. // // While this benchmark will spin for an arbitrary default number of // nanoseconds, the specific amount of time to spin may be given as a number // of nanoseconds provided in the "additional_info" configuration field. // // This benchmark differs from the regular timer_spin only in that it issues // all work to the default stream, rather than a user-defined stream. #include <hip/hip_runtime.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include "benchmark_gpu_utilities.h" #include "library_interface.h" // If no number is provided, spin for this number of nanoseconds. #define DEFAULT_SPIN_DURATION (10 * 1000 * 1000) // Holds the local state for one instance of this benchmark. typedef struct { // Holds the device copy of the overall start and end time of the kernel. uint64_t *device_kernel_times; // Holds the device copy of the start and end times of each block. uint64_t *device_block_times; // Holds the device copy of the SMID each block was assigned to. uint32_t *device_block_smids; // The number of nanoseconds for which each CUDA thread should spin. uint64_t spin_duration; // Holds the grid dimension to use, set during initialization. int block_count; int thread_count; // Holds host-side times that are shared with the calling process. KernelTimes spin_kernel_times; } BenchmarkState; // Implements the cleanup function required by the library interface, but is // also called internally (only during Initialize()) to clean up after errors. static void Cleanup(void *data) { BenchmarkState *state = (BenchmarkState *) data; KernelTimes *host_times = &state->spin_kernel_times; // Free device memory. if (state->device_kernel_times) hipFree(state->device_kernel_times); if (state->device_block_times) hipFree(state->device_block_times); if (state->device_block_smids) hipFree(state->device_block_smids); // Free host memory. if (host_times->kernel_times) hipHostFree(host_times->kernel_times); if (host_times->block_times) hipHostFree(host_times->block_times); if (host_times->block_smids) hipHostFree(host_times->block_smids); memset(state, 0, sizeof(*state)); free(state); } // Allocates GPU and CPU memory. Returns 0 on error, 1 otherwise. static int AllocateMemory(BenchmarkState *state) { uint64_t block_times_size = state->block_count * sizeof(uint64_t) * 2; uint64_t block_smids_size = state->block_count * sizeof(uint32_t); KernelTimes *host_times = &state->spin_kernel_times; // Allocate device memory if (!CheckCUDAError(hipMalloc(&(state->device_kernel_times), 2 * sizeof(uint64_t)))) { return 0; } if (!CheckCUDAError(hipMalloc(&(state->device_block_times), block_times_size))) { return 0; } if (!CheckCUDAError(hipMalloc(&(state->device_block_smids), block_smids_size))) { return 0; } // Allocate host memory. if (!CheckCUDAError(hipHostMalloc(&host_times->kernel_times, 2 * sizeof(uint64_t)))) { return 0; } if (!CheckCUDAError(hipHostMalloc(&host_times->block_times, block_times_size))) { return 0; } if (!CheckCUDAError(hipHostMalloc(&host_times->block_smids, block_smids_size))) { return 0; } return 1; } // If the given argument is a non-NULL, non-empty string, attempts to set the // spin_duration by parsing it as a number of nanoseconds. Otherwise, this // function will set spin_duration to a default value. Returns 0 if the // argument has been set to an invalid number, or nonzero on success. static int SetSpinDuration(const char *arg, BenchmarkState *state) { int64_t parsed_value; if (!arg || (strlen(arg) == 0)) { state->spin_duration = DEFAULT_SPIN_DURATION; return 1; } char *end = NULL; parsed_value = strtoll(arg, &end, 10); if ((*end != 0) || (parsed_value < 0)) { printf("Invalid spin duration: %s\n", arg); return 0; } state->spin_duration = (uint64_t) parsed_value; return 1; } static void* Initialize(InitializationParameters *params) { BenchmarkState *state = NULL; // First allocate space for local data. state = (BenchmarkState *) malloc(sizeof(*state)); if (!state) return NULL; memset(state, 0, sizeof(*state)); if (!CheckCUDAError(hipSetDevice(params->cuda_device))) return NULL; state->thread_count = params->thread_count; state->block_count = params->block_count; if (!AllocateMemory(state)) { Cleanup(state); return NULL; } if (!SetSpinDuration(params->additional_info, state)) { Cleanup(state); return NULL; } return state; } // Nothing needs to be copied in for this benchmark. static int CopyIn(void *data) { return 1; } // Spins in a loop until at least spin_duration nanoseconds have elapsed. static __global__ void GPUSpin(uint64_t spin_duration, uint64_t *kernel_times, uint64_t *block_times, uint32_t *block_smids) { uint64_t start_time = GlobalTimer64(); // First, record the kernel and block start times if (threadIdx.x == 0) { if (blockIdx.x == 0) kernel_times[0] = start_time; block_times[blockIdx.x * 2] = start_time; block_smids[blockIdx.x] = GetSMID(); } __syncthreads(); // The actual spin loop--most of this kernel code is for recording block and // kernel times. while ((GlobalTimer64() - start_time) < spin_duration) { continue; } // Record the kernel and block end times. if (threadIdx.x == 0) { block_times[blockIdx.x * 2 + 1] = GlobalTimer64(); } kernel_times[1] = GlobalTimer64(); } static int Execute(void *data) { BenchmarkState *state = (BenchmarkState *) data; hipLaunchKernelGGL(( GPUSpin), dim3(state->block_count), dim3(state->thread_count), 0, 0, state->spin_duration, state->device_kernel_times, state->device_block_times, state->device_block_smids); return 1; } static int CopyOut(void *data, TimingInformation *times) { BenchmarkState *state = (BenchmarkState *) data; KernelTimes *host_times = &state->spin_kernel_times; uint64_t block_times_count = state->block_count * 2; uint64_t block_smids_count = state->block_count; memset(times, 0, sizeof(*times)); if (!CheckCUDAError(hipMemcpy(host_times->kernel_times, state->device_kernel_times, 2 * sizeof(uint64_t), hipMemcpyDeviceToHost))) { return 0; } if (!CheckCUDAError(hipMemcpy(host_times->block_times, state->device_block_times, block_times_count * sizeof(uint64_t), hipMemcpyDeviceToHost))) { return 0; } if (!CheckCUDAError(hipMemcpy(host_times->block_smids, state->device_block_smids, block_smids_count * sizeof(uint32_t), hipMemcpyDeviceToHost))) { return 0; } host_times->kernel_name = "GPUSpin"; host_times->block_count = state->block_count; host_times->thread_count = state->thread_count; times->kernel_count = 1; times->kernel_info = host_times; return 1; } static const char* GetName(void) { return "Timer Spin (default stream)"; } // This should be the only function we export from the library, to provide // pointers to all of the other functions. int RegisterFunctions(BenchmarkLibraryFunctions *functions) { functions->initialize = Initialize; functions->copy_in = CopyIn; functions->execute = Execute; functions->copy_out = CopyOut; functions->cleanup = Cleanup; functions->get_name = GetName; return 1; }
e8914261a0f37f65c8aae6c6b8ecc726eacde697.cu
// This file defines a bare-bones CUDA benchmark which spins waiting for a // user-specified amount of time to complete. While the benchmark itself is // simpler than the mandelbrot-set benchmark, the boilerplate is relatively // similar. // // While this benchmark will spin for an arbitrary default number of // nanoseconds, the specific amount of time to spin may be given as a number // of nanoseconds provided in the "additional_info" configuration field. // // This benchmark differs from the regular timer_spin only in that it issues // all work to the default stream, rather than a user-defined stream. #include <cuda_runtime.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include "benchmark_gpu_utilities.h" #include "library_interface.h" // If no number is provided, spin for this number of nanoseconds. #define DEFAULT_SPIN_DURATION (10 * 1000 * 1000) // Holds the local state for one instance of this benchmark. typedef struct { // Holds the device copy of the overall start and end time of the kernel. uint64_t *device_kernel_times; // Holds the device copy of the start and end times of each block. uint64_t *device_block_times; // Holds the device copy of the SMID each block was assigned to. uint32_t *device_block_smids; // The number of nanoseconds for which each CUDA thread should spin. uint64_t spin_duration; // Holds the grid dimension to use, set during initialization. int block_count; int thread_count; // Holds host-side times that are shared with the calling process. KernelTimes spin_kernel_times; } BenchmarkState; // Implements the cleanup function required by the library interface, but is // also called internally (only during Initialize()) to clean up after errors. static void Cleanup(void *data) { BenchmarkState *state = (BenchmarkState *) data; KernelTimes *host_times = &state->spin_kernel_times; // Free device memory. if (state->device_kernel_times) cudaFree(state->device_kernel_times); if (state->device_block_times) cudaFree(state->device_block_times); if (state->device_block_smids) cudaFree(state->device_block_smids); // Free host memory. if (host_times->kernel_times) cudaFreeHost(host_times->kernel_times); if (host_times->block_times) cudaFreeHost(host_times->block_times); if (host_times->block_smids) cudaFreeHost(host_times->block_smids); memset(state, 0, sizeof(*state)); free(state); } // Allocates GPU and CPU memory. Returns 0 on error, 1 otherwise. static int AllocateMemory(BenchmarkState *state) { uint64_t block_times_size = state->block_count * sizeof(uint64_t) * 2; uint64_t block_smids_size = state->block_count * sizeof(uint32_t); KernelTimes *host_times = &state->spin_kernel_times; // Allocate device memory if (!CheckCUDAError(cudaMalloc(&(state->device_kernel_times), 2 * sizeof(uint64_t)))) { return 0; } if (!CheckCUDAError(cudaMalloc(&(state->device_block_times), block_times_size))) { return 0; } if (!CheckCUDAError(cudaMalloc(&(state->device_block_smids), block_smids_size))) { return 0; } // Allocate host memory. if (!CheckCUDAError(cudaMallocHost(&host_times->kernel_times, 2 * sizeof(uint64_t)))) { return 0; } if (!CheckCUDAError(cudaMallocHost(&host_times->block_times, block_times_size))) { return 0; } if (!CheckCUDAError(cudaMallocHost(&host_times->block_smids, block_smids_size))) { return 0; } return 1; } // If the given argument is a non-NULL, non-empty string, attempts to set the // spin_duration by parsing it as a number of nanoseconds. Otherwise, this // function will set spin_duration to a default value. Returns 0 if the // argument has been set to an invalid number, or nonzero on success. static int SetSpinDuration(const char *arg, BenchmarkState *state) { int64_t parsed_value; if (!arg || (strlen(arg) == 0)) { state->spin_duration = DEFAULT_SPIN_DURATION; return 1; } char *end = NULL; parsed_value = strtoll(arg, &end, 10); if ((*end != 0) || (parsed_value < 0)) { printf("Invalid spin duration: %s\n", arg); return 0; } state->spin_duration = (uint64_t) parsed_value; return 1; } static void* Initialize(InitializationParameters *params) { BenchmarkState *state = NULL; // First allocate space for local data. state = (BenchmarkState *) malloc(sizeof(*state)); if (!state) return NULL; memset(state, 0, sizeof(*state)); if (!CheckCUDAError(cudaSetDevice(params->cuda_device))) return NULL; state->thread_count = params->thread_count; state->block_count = params->block_count; if (!AllocateMemory(state)) { Cleanup(state); return NULL; } if (!SetSpinDuration(params->additional_info, state)) { Cleanup(state); return NULL; } return state; } // Nothing needs to be copied in for this benchmark. static int CopyIn(void *data) { return 1; } // Spins in a loop until at least spin_duration nanoseconds have elapsed. static __global__ void GPUSpin(uint64_t spin_duration, uint64_t *kernel_times, uint64_t *block_times, uint32_t *block_smids) { uint64_t start_time = GlobalTimer64(); // First, record the kernel and block start times if (threadIdx.x == 0) { if (blockIdx.x == 0) kernel_times[0] = start_time; block_times[blockIdx.x * 2] = start_time; block_smids[blockIdx.x] = GetSMID(); } __syncthreads(); // The actual spin loop--most of this kernel code is for recording block and // kernel times. while ((GlobalTimer64() - start_time) < spin_duration) { continue; } // Record the kernel and block end times. if (threadIdx.x == 0) { block_times[blockIdx.x * 2 + 1] = GlobalTimer64(); } kernel_times[1] = GlobalTimer64(); } static int Execute(void *data) { BenchmarkState *state = (BenchmarkState *) data; GPUSpin<<<state->block_count, state->thread_count>>>(state->spin_duration, state->device_kernel_times, state->device_block_times, state->device_block_smids); return 1; } static int CopyOut(void *data, TimingInformation *times) { BenchmarkState *state = (BenchmarkState *) data; KernelTimes *host_times = &state->spin_kernel_times; uint64_t block_times_count = state->block_count * 2; uint64_t block_smids_count = state->block_count; memset(times, 0, sizeof(*times)); if (!CheckCUDAError(cudaMemcpy(host_times->kernel_times, state->device_kernel_times, 2 * sizeof(uint64_t), cudaMemcpyDeviceToHost))) { return 0; } if (!CheckCUDAError(cudaMemcpy(host_times->block_times, state->device_block_times, block_times_count * sizeof(uint64_t), cudaMemcpyDeviceToHost))) { return 0; } if (!CheckCUDAError(cudaMemcpy(host_times->block_smids, state->device_block_smids, block_smids_count * sizeof(uint32_t), cudaMemcpyDeviceToHost))) { return 0; } host_times->kernel_name = "GPUSpin"; host_times->block_count = state->block_count; host_times->thread_count = state->thread_count; times->kernel_count = 1; times->kernel_info = host_times; return 1; } static const char* GetName(void) { return "Timer Spin (default stream)"; } // This should be the only function we export from the library, to provide // pointers to all of the other functions. int RegisterFunctions(BenchmarkLibraryFunctions *functions) { functions->initialize = Initialize; functions->copy_in = CopyIn; functions->execute = Execute; functions->copy_out = CopyOut; functions->cleanup = Cleanup; functions->get_name = GetName; return 1; }
0b7d835f2a7d03ce2a2a0648dee90f943dd232f8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <stdio.h> #include "box2d1r-512-16-512_kernel.hu" #define BENCH_DIM 2 #define BENCH_FPP 17 #define BENCH_RAD 1 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1; if (scop) { if (dimsize >= 3 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ hipError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != hipSuccess) { \ fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == hipSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(hipGetLastError()); \ } while(0) float *dev_A; cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float))); { cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice)); #ifdef STENCILBENCH hipDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 16; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { hipLaunchKernelGGL(( kernel0_16), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 2) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 3) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 4) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 5) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 6) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 7) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 8) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 9) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 10) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 11) { { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 12) { { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 13) { { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 14) { { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 15) { { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 2) { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 3) { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 4) { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 5) { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 6) { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 7) { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 8) { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 9) { const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 494; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_9), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 10) { const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_10), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 11) { const AN5D_TYPE __side0Len = 11; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 490; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_11), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 12) { const AN5D_TYPE __side0Len = 12; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_12), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 13) { const AN5D_TYPE __side0Len = 13; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 486; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_13), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 14) { const AN5D_TYPE __side0Len = 14; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_14), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 15) { const AN5D_TYPE __side0Len = 15; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 482; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_15), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH hipDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost)); } cudaCheckReturn(hipFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) A[(t+1)%2][i][j] = 0.09371f * A[t%2][i-1][j-1] + 0.09374f * A[t%2][i-1][j] + 0.09376f * A[t%2][i-1][j+1] + 0.09372f * A[t%2][i][j-1] + 0.25001f * A[t%2][i][j] + 0.09377f * A[t%2][i][j+1] + 0.09373f * A[t%2][i+1][j-1] + 0.09375f * A[t%2][i+1][j] + 0.09378f * A[t%2][i+1][j+1]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
0b7d835f2a7d03ce2a2a0648dee90f943dd232f8.cu
#include <assert.h> #include <stdio.h> #include "box2d1r-512-16-512_kernel.hu" #define BENCH_DIM 2 #define BENCH_FPP 17 #define BENCH_RAD 1 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1; if (scop) { if (dimsize >= 3 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ cudaError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != cudaSuccess) { \ fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == cudaSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(cudaGetLastError()); \ } while(0) float *dev_A; cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float))); { cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice)); #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 16; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { kernel0_16<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 2) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 3) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 4) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 5) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 6) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 7) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 8) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 9) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 10) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 11) { { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 12) { { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 13) { { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 14) { { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 15) { { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 2) { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 3) { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 4) { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 5) { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 6) { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 7) { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 8) { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 9) { const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 494; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_9<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 10) { const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_10<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 11) { const AN5D_TYPE __side0Len = 11; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 490; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_11<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 12) { const AN5D_TYPE __side0Len = 12; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_12<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 13) { const AN5D_TYPE __side0Len = 13; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 486; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_13<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 14) { const AN5D_TYPE __side0Len = 14; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_14<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 15) { const AN5D_TYPE __side0Len = 15; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 482; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_15<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost)); } cudaCheckReturn(cudaFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) A[(t+1)%2][i][j] = 0.09371f * A[t%2][i-1][j-1] + 0.09374f * A[t%2][i-1][j] + 0.09376f * A[t%2][i-1][j+1] + 0.09372f * A[t%2][i][j-1] + 0.25001f * A[t%2][i][j] + 0.09377f * A[t%2][i][j+1] + 0.09373f * A[t%2][i+1][j-1] + 0.09375f * A[t%2][i+1][j] + 0.09378f * A[t%2][i+1][j+1]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
9b7e1655cef74f7bc99f42c977b649da2249a567.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "convolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "im2col.h" #include "col2im.h" #include "utils.h" #include "hip/hip_runtime.h" #include <stdint.h> #include <unistd.h> } /* syoh added: cuda matmul based on fixed-point quantization */ #define BLOCK_SIZE 16 static int lnum = 0; __global__ void quant_cuda_bnmm(float *a, float *b, float *c, int m, int n, int k, float *mean, float *variance, float *scale, float* biase, const int lnum) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0; float bn_b = sqrtf(variance[row]) + 0.000001f; float bn_a = mean[row]; if( col < k && row < m ){ for(int i = 0; i < n; i++){ // n is k in normal matmul float bn_w, input; if (lnum == 0){ bn_w = a[row*n+i]; input = (b[i*k+col]/bn_b)*scale[row]; } else { bn_w = (a[row*n+i]/bn_b)*scale[row]; input = b[i*k+col]; } // quantization if (bn_w >= 1.984375) // 2-6 bn_w = 1.984375; else if (bn_w <= -2) bn_w = -2; if (input >= 15.875) // 5-3 input = 15.875; else if (input <= -16) input = -16; int8_t qa = ((int8_t)(bn_w*64)); int8_t qb = ((int8_t)(input*8+0.5)); // integer MAC int32_t qacc = (int32_t)(sum*(1<<16)); int16_t qmult = qa * qb; qacc = (int32_t)qmult*(1<<7) + qacc; sum = (float) qacc / (1<<16); } float bn_biase = (biase[row] - ((bn_a/bn_b)*scale[row])); // quantization if (bn_biase >= 7.937500) // 4-4 bn_biase = 7.937500; else if (bn_biase <= -8) bn_biase = -8; int8_t qbn_biase = ((int8_t)(bn_biase*16)); // integer MAC int32_t qacc = (int32_t)(sum*(1<<16)); qacc = (int32_t)qbn_biase*(1<<12) + qacc; // 12 = 16 - fraction_bit sum = (float)qacc / (1<<16); c[row * k + col] = sum; __syncthreads(); } } __global__ void quant_cuda_mm(float *a, float *b, float *c, int m, int n, int k, float* biase) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0; if( col < k && row < m ){ for(int i = 0; i < n; i++){ // n is k in normal matmul /* original matmul */ //sum += a[row*n+i] * b[i*k+col]; /* quantization */ float aval = a[row*n+i]; float bval = b[i*k+col]; if (aval >= 1.984375) aval = 1.984375; else if (aval <= -2) aval = -2; if (bval >= 15.875) bval = 15.875; else if (bval <= -16) bval = -16; int8_t qa = ((int8_t)(aval*64)); // -2^7 ~ int8_t qb = ((int8_t)(bval*8+0.5)); // -2^7 ~ // integer mult int32_t qacc = (int32_t)(sum*(1<<16)); //-2^31 ~ int16_t qmult = qa * qb; // -2^14 ~ --> -2^5 ~ qacc = (int32_t)qmult*(1<<7) + qacc; sum = (float) qacc / (1<<16); } float bn_biase = biase[row]; // quantization if (bn_biase >= 7.937500) // 4-4 bn_biase = 7.937500; else if (bn_biase <= -8) bn_biase = -8; int8_t qbn_biase = ((int8_t)(bn_biase*16)); // integer MAC int32_t qacc = (int32_t)(sum*(1<<16)); qacc = (int32_t)qbn_biase*(1<<12) + qacc; // 12 = 16 - fraction_bit sum = (float)qacc / (1<<16); c[row * k + col] = sum; __syncthreads(); } } __global__ void quant_cuda_sqmm(float *d_a, float *d_b, float *d_result, int n) { __shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE]; int row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int col = blockIdx.x * BLOCK_SIZE + threadIdx.x; int idx; float tmp = 0; for (int sub = 0; sub < gridDim.x; ++sub){ idx = row * n + sub * BLOCK_SIZE + threadIdx.x; if(idx >= n*n) tile_a[threadIdx.y][threadIdx.x] = 0; else tile_a[threadIdx.y][threadIdx.x] = d_a[idx]; idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col; if(idx >= n*n) tile_b[threadIdx.y][threadIdx.x] = 0; else tile_b[threadIdx.y][threadIdx.x] = d_b[idx]; __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x]; __syncthreads(); } if(row < n && col < n) d_result[row * n + col] = tmp; } void quant_gpu_mm(int m, int k, int n, float* d_a, float* d_b, float* d_c, float* biase){ unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); if(m == k && k == n){ hipLaunchKernelGGL(( quant_cuda_sqmm), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, k); } else{ hipLaunchKernelGGL(( quant_cuda_mm), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, m, k, n, biase); } hipDeviceSynchronize(); } void quant_gpu_bnmm(int m, int k, int n, float* d_a, float* d_b, float* d_c, float* mean, float* variance, float* scale, float* biase){ unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; if (lnum == 15) lnum = 0; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); if(m == k && k == n){ hipLaunchKernelGGL(( quant_cuda_sqmm), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, k); } else{ hipLaunchKernelGGL(( quant_cuda_bnmm), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, m, k, n, mean, variance, scale, biase, lnum++); } hipDeviceSynchronize(); } /* end added */ __global__ void binarize_kernel(float *x, int n, float *binary) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; binary[i] = (x[i] >= 0) ? 1 : -1; } void binarize_gpu(float *x, int n, float *binary) { hipLaunchKernelGGL(( binarize_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, binary); check_error(hipPeekAtLastError()); } __global__ void binarize_input_kernel(float *input, int n, int size, float *binary) { int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (s >= size) return; int i = 0; float mean = 0; for(i = 0; i < n; ++i){ mean += fabsf(input[i*size + s]); } mean = mean / n; for(i = 0; i < n; ++i){ binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean; } } void binarize_input_gpu(float *input, int n, int size, float *binary) { hipLaunchKernelGGL(( binarize_input_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, input, n, size, binary); check_error(hipPeekAtLastError()); } __global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary) { int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (f >= n) return; int i = 0; float mean = 0; for(i = 0; i < size; ++i){ mean += fabsf(weights[f*size + i]); } mean = mean / size; for(i = 0; i < size; ++i){ binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean; //binary[f*size + i] = weights[f*size + i]; } } void binarize_weights_gpu(float *weights, int n, int size, float *binary) { hipLaunchKernelGGL(( binarize_weights_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, weights, n, size, binary); check_error(hipPeekAtLastError()); } // DONE_180725 float* get_qinput(const int m, const int channel, const int height, const int width, float* input, float* inputtest){ printf("lnum: %d, quantizing input.. dim info ( m: %d, c: %d, h: %d, w: %d )\n", lnum, m, channel, height, width); float* gtocinput = (float*)malloc(channel*height*width*sizeof(float)); int8_t* qinput = (int8_t*)malloc(channel*height*width*sizeof(int8_t)); cuda_pull_array(input, gtocinput, channel*height*width); // scale, var, mean, biases: size M for (int c=0; c<channel; c++){ // channel - height - width for(int h=0; h<height; h++){ for(int w=0; w<width; w++){ int access = (c*height + h)*width + w; float input = gtocinput[access]; if (input >= 15.875) // 5-3 input = 15.875; else if (input <= -16) input = -16; int8_t qb = ((int8_t)(input*8+0.5)); qinput[access] = qb; inputtest[access] = gtocinput[access]; } } } char buf_bnw[256]; sprintf(buf_bnw, "./DAC_final_weights_180726/input_raw_%d.data", lnum); FILE* bnw_fp = fopen(buf_bnw, "w"); fwrite(qinput, sizeof(int8_t), channel*height*width, bnw_fp); free(qinput); free(gtocinput); fclose(bnw_fp); if (lnum == 15) sleep(10); float* ctoginput = cuda_make_array(inputtest, channel*height*width); return ctoginput; } void forward_convolutional_layer_gpu(convolutional_layer l, network net) { fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1); if(l.binary){ binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu); swap_binary(&l); } if(l.xnor){ binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu); swap_binary(&l); binarize_gpu(net.input_gpu, l.c*l.h*l.w*l.batch, l.binary_input_gpu); net.input_gpu = l.binary_input_gpu; } #ifdef CUDNN float one = 1; cudnnConvolutionForward(cudnn_handle(), &one, l.srcTensorDesc, net.input_gpu, l.weightDesc, l.weights_gpu, l.convDesc, l.fw_algo, net.workspace, l.workspace_size, &one, l.dstTensorDesc, l.output_gpu); #else int i, j; int m = l.n/l.groups; int k = l.size*l.size*l.c/l.groups; int n = l.out_w*l.out_h; for(i = 0; i < l.batch; ++i){ for(j = 0; j < l.groups; ++j){ float *a = l.weights_gpu + j*l.nweights/l.groups; float *b = net.workspace; float *c = l.output_gpu + (i*l.groups + j)*n*m; float *test = (float*)malloc(l.c*l.w*l.h*sizeof(float)); float *newinput = get_qinput(l.n, l.c, l.h, l.w, net.input_gpu + (i*l.groups + j)*l.c/l.groups*l.h*l.w, test); // validated //im2col_gpu(net.input_gpu + (i*l.groups + j)*l.c/l.groups*l.h*l.w, // l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b); im2col_gpu(newinput, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b); /* syoh: toggle to custom cuda matmul (quant added) */ //gemm_gpu(0,0,m,n,k,1,a,k,b,n,1,c,n); if (l.batch_normalize) quant_gpu_bnmm(m, k, n, a, b, c, l.rolling_mean_gpu, l.rolling_variance_gpu, l.scales_gpu, l.biases_gpu); // batchnorm_combined else quant_gpu_mm(m, k, n, a, b, c, l.biases_gpu); // orig cuda_mm } } #endif /* if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, net); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } */ activate_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation); if(l.binary || l.xnor) swap_binary(&l); } __global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, float rate, float *delta) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int j = id % w; id /= w; int i = id % h; id /= h; int k = id % c; id /= c; int b = id; int w_offset = -(size/2.f); int h_offset = -(size/2.f); int out_index = j + w*(i + h*(k + c*b)); int l, m; for(l = 0; l < size; ++l){ for(m = 0; m < size; ++m){ int cur_h = h_offset + i + l; int cur_w = w_offset + j + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); delta[out_index] += valid ? rate*(x[index] - x[out_index]) : 0; } } } extern "C" void smooth_layer(layer l, int size, float rate) { int h = l.out_h; int w = l.out_w; int c = l.out_c; size_t n = h*w*c*l.batch; hipLaunchKernelGGL(( smooth_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, l.output_gpu, n, l.w, l.h, l.c, size, rate, l.delta_gpu); check_error(hipPeekAtLastError()); } void backward_convolutional_layer_gpu(convolutional_layer l, network net) { if(l.smooth){ smooth_layer(l, 5, l.smooth); } constrain_gpu(l.outputs*l.batch, 1, l.delta_gpu, 1); gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); if(l.batch_normalize){ backward_batchnorm_layer_gpu(l, net); } else { backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); } float *original_input = net.input_gpu; if(l.xnor) net.input_gpu = l.binary_input_gpu; #ifdef CUDNN float one = 1; cudnnConvolutionBackwardFilter(cudnn_handle(), &one, l.srcTensorDesc, net.input_gpu, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bf_algo, net.workspace, l.workspace_size, &one, l.dweightDesc, l.weight_updates_gpu); if(net.delta_gpu){ if(l.binary || l.xnor) swap_binary(&l); cudnnConvolutionBackwardData(cudnn_handle(), &one, l.weightDesc, l.weights_gpu, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bd_algo, net.workspace, l.workspace_size, &one, l.dsrcTensorDesc, net.delta_gpu); if(l.binary || l.xnor) swap_binary(&l); if(l.xnor) gradient_array_gpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, net.delta_gpu); } #else int m = l.n/l.groups; int n = l.size*l.size*l.c/l.groups; int k = l.out_w*l.out_h; int i, j; for(i = 0; i < l.batch; ++i){ for(j = 0; j < l.groups; ++j){ float *a = l.delta_gpu + (i*l.groups + j)*m*k; float *b = net.workspace; float *c = l.weight_updates_gpu + j*l.nweights/l.groups; float *im = net.input_gpu+(i*l.groups + j)*l.c/l.groups*l.h*l.w; im2col_gpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b); gemm_gpu(0,1,m,n,k,1,a,k,b,k,1,c,n); if(net.delta_gpu){ if(l.binary || l.xnor) swap_binary(&l); a = l.weights_gpu + j*l.nweights/l.groups; b = l.delta_gpu + (i*l.groups + j)*m*k; c = net.workspace; gemm_gpu(1,0,n,k,m,1,a,n,b,k,0,c,k); col2im_gpu(net.workspace, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, net.delta_gpu + (i*l.groups + j)*l.c/l.groups*l.h*l.w); if(l.binary || l.xnor) { swap_binary(&l); } } if(l.xnor) gradient_array_gpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, net.delta_gpu + i*l.c*l.h*l.w); } } #endif } void pull_convolutional_layer(layer l) { cuda_pull_array(l.weights_gpu, l.weights, l.nweights); cuda_pull_array(l.biases_gpu, l.biases, l.n); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.nweights); cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_pull_array(l.scales_gpu, l.scales, l.n); cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void push_convolutional_layer(layer l) { cuda_push_array(l.weights_gpu, l.weights, l.nweights); cuda_push_array(l.biases_gpu, l.biases, l.n); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.nweights); cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_push_array(l.scales_gpu, l.scales, l.n); cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void update_convolutional_layer_gpu(layer l, update_args a) { float learning_rate = a.learning_rate*l.learning_rate_scale; float momentum = a.momentum; float decay = a.decay; int batch = a.batch; if(a.adam){ adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t); adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); if(l.scales_gpu){ adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); } }else{ axpy_gpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_gpu(l.nweights, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_gpu(l.nweights, momentum, l.weight_updates_gpu, 1); axpy_gpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); scal_gpu(l.n, momentum, l.bias_updates_gpu, 1); if(l.scales_gpu){ axpy_gpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); scal_gpu(l.n, momentum, l.scale_updates_gpu, 1); } } }
9b7e1655cef74f7bc99f42c977b649da2249a567.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "convolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "im2col.h" #include "col2im.h" #include "utils.h" #include "cuda.h" #include <stdint.h> #include <unistd.h> } /* syoh added: cuda matmul based on fixed-point quantization */ #define BLOCK_SIZE 16 static int lnum = 0; __global__ void quant_cuda_bnmm(float *a, float *b, float *c, int m, int n, int k, float *mean, float *variance, float *scale, float* biase, const int lnum) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0; float bn_b = sqrtf(variance[row]) + 0.000001f; float bn_a = mean[row]; if( col < k && row < m ){ for(int i = 0; i < n; i++){ // n is k in normal matmul float bn_w, input; if (lnum == 0){ bn_w = a[row*n+i]; input = (b[i*k+col]/bn_b)*scale[row]; } else { bn_w = (a[row*n+i]/bn_b)*scale[row]; input = b[i*k+col]; } // quantization if (bn_w >= 1.984375) // 2-6 bn_w = 1.984375; else if (bn_w <= -2) bn_w = -2; if (input >= 15.875) // 5-3 input = 15.875; else if (input <= -16) input = -16; int8_t qa = ((int8_t)(bn_w*64)); int8_t qb = ((int8_t)(input*8+0.5)); // integer MAC int32_t qacc = (int32_t)(sum*(1<<16)); int16_t qmult = qa * qb; qacc = (int32_t)qmult*(1<<7) + qacc; sum = (float) qacc / (1<<16); } float bn_biase = (biase[row] - ((bn_a/bn_b)*scale[row])); // quantization if (bn_biase >= 7.937500) // 4-4 bn_biase = 7.937500; else if (bn_biase <= -8) bn_biase = -8; int8_t qbn_biase = ((int8_t)(bn_biase*16)); // integer MAC int32_t qacc = (int32_t)(sum*(1<<16)); qacc = (int32_t)qbn_biase*(1<<12) + qacc; // 12 = 16 - fraction_bit sum = (float)qacc / (1<<16); c[row * k + col] = sum; __syncthreads(); } } __global__ void quant_cuda_mm(float *a, float *b, float *c, int m, int n, int k, float* biase) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0; if( col < k && row < m ){ for(int i = 0; i < n; i++){ // n is k in normal matmul /* original matmul */ //sum += a[row*n+i] * b[i*k+col]; /* quantization */ float aval = a[row*n+i]; float bval = b[i*k+col]; if (aval >= 1.984375) aval = 1.984375; else if (aval <= -2) aval = -2; if (bval >= 15.875) bval = 15.875; else if (bval <= -16) bval = -16; int8_t qa = ((int8_t)(aval*64)); // -2^7 ~ int8_t qb = ((int8_t)(bval*8+0.5)); // -2^7 ~ // integer mult int32_t qacc = (int32_t)(sum*(1<<16)); //-2^31 ~ int16_t qmult = qa * qb; // -2^14 ~ --> -2^5 ~ qacc = (int32_t)qmult*(1<<7) + qacc; sum = (float) qacc / (1<<16); } float bn_biase = biase[row]; // quantization if (bn_biase >= 7.937500) // 4-4 bn_biase = 7.937500; else if (bn_biase <= -8) bn_biase = -8; int8_t qbn_biase = ((int8_t)(bn_biase*16)); // integer MAC int32_t qacc = (int32_t)(sum*(1<<16)); qacc = (int32_t)qbn_biase*(1<<12) + qacc; // 12 = 16 - fraction_bit sum = (float)qacc / (1<<16); c[row * k + col] = sum; __syncthreads(); } } __global__ void quant_cuda_sqmm(float *d_a, float *d_b, float *d_result, int n) { __shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE]; int row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int col = blockIdx.x * BLOCK_SIZE + threadIdx.x; int idx; float tmp = 0; for (int sub = 0; sub < gridDim.x; ++sub){ idx = row * n + sub * BLOCK_SIZE + threadIdx.x; if(idx >= n*n) tile_a[threadIdx.y][threadIdx.x] = 0; else tile_a[threadIdx.y][threadIdx.x] = d_a[idx]; idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col; if(idx >= n*n) tile_b[threadIdx.y][threadIdx.x] = 0; else tile_b[threadIdx.y][threadIdx.x] = d_b[idx]; __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x]; __syncthreads(); } if(row < n && col < n) d_result[row * n + col] = tmp; } void quant_gpu_mm(int m, int k, int n, float* d_a, float* d_b, float* d_c, float* biase){ unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); if(m == k && k == n){ quant_cuda_sqmm<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, k); } else{ quant_cuda_mm<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m, k, n, biase); } cudaThreadSynchronize(); } void quant_gpu_bnmm(int m, int k, int n, float* d_a, float* d_b, float* d_c, float* mean, float* variance, float* scale, float* biase){ unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (n + BLOCK_SIZE - 1) / BLOCK_SIZE; if (lnum == 15) lnum = 0; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); if(m == k && k == n){ quant_cuda_sqmm<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, k); } else{ quant_cuda_bnmm<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m, k, n, mean, variance, scale, biase, lnum++); } cudaThreadSynchronize(); } /* end added */ __global__ void binarize_kernel(float *x, int n, float *binary) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; binary[i] = (x[i] >= 0) ? 1 : -1; } void binarize_gpu(float *x, int n, float *binary) { binarize_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, binary); check_error(cudaPeekAtLastError()); } __global__ void binarize_input_kernel(float *input, int n, int size, float *binary) { int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (s >= size) return; int i = 0; float mean = 0; for(i = 0; i < n; ++i){ mean += fabsf(input[i*size + s]); } mean = mean / n; for(i = 0; i < n; ++i){ binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean; } } void binarize_input_gpu(float *input, int n, int size, float *binary) { binarize_input_kernel<<<cuda_gridsize(size), BLOCK>>>(input, n, size, binary); check_error(cudaPeekAtLastError()); } __global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary) { int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (f >= n) return; int i = 0; float mean = 0; for(i = 0; i < size; ++i){ mean += fabsf(weights[f*size + i]); } mean = mean / size; for(i = 0; i < size; ++i){ binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean; //binary[f*size + i] = weights[f*size + i]; } } void binarize_weights_gpu(float *weights, int n, int size, float *binary) { binarize_weights_kernel<<<cuda_gridsize(n), BLOCK>>>(weights, n, size, binary); check_error(cudaPeekAtLastError()); } // DONE_180725 float* get_qinput(const int m, const int channel, const int height, const int width, float* input, float* inputtest){ printf("lnum: %d, quantizing input.. dim info ( m: %d, c: %d, h: %d, w: %d )\n", lnum, m, channel, height, width); float* gtocinput = (float*)malloc(channel*height*width*sizeof(float)); int8_t* qinput = (int8_t*)malloc(channel*height*width*sizeof(int8_t)); cuda_pull_array(input, gtocinput, channel*height*width); // scale, var, mean, biases: size M for (int c=0; c<channel; c++){ // channel - height - width for(int h=0; h<height; h++){ for(int w=0; w<width; w++){ int access = (c*height + h)*width + w; float input = gtocinput[access]; if (input >= 15.875) // 5-3 input = 15.875; else if (input <= -16) input = -16; int8_t qb = ((int8_t)(input*8+0.5)); qinput[access] = qb; inputtest[access] = gtocinput[access]; } } } char buf_bnw[256]; sprintf(buf_bnw, "./DAC_final_weights_180726/input_raw_%d.data", lnum); FILE* bnw_fp = fopen(buf_bnw, "w"); fwrite(qinput, sizeof(int8_t), channel*height*width, bnw_fp); free(qinput); free(gtocinput); fclose(bnw_fp); if (lnum == 15) sleep(10); float* ctoginput = cuda_make_array(inputtest, channel*height*width); return ctoginput; } void forward_convolutional_layer_gpu(convolutional_layer l, network net) { fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1); if(l.binary){ binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu); swap_binary(&l); } if(l.xnor){ binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu); swap_binary(&l); binarize_gpu(net.input_gpu, l.c*l.h*l.w*l.batch, l.binary_input_gpu); net.input_gpu = l.binary_input_gpu; } #ifdef CUDNN float one = 1; cudnnConvolutionForward(cudnn_handle(), &one, l.srcTensorDesc, net.input_gpu, l.weightDesc, l.weights_gpu, l.convDesc, l.fw_algo, net.workspace, l.workspace_size, &one, l.dstTensorDesc, l.output_gpu); #else int i, j; int m = l.n/l.groups; int k = l.size*l.size*l.c/l.groups; int n = l.out_w*l.out_h; for(i = 0; i < l.batch; ++i){ for(j = 0; j < l.groups; ++j){ float *a = l.weights_gpu + j*l.nweights/l.groups; float *b = net.workspace; float *c = l.output_gpu + (i*l.groups + j)*n*m; float *test = (float*)malloc(l.c*l.w*l.h*sizeof(float)); float *newinput = get_qinput(l.n, l.c, l.h, l.w, net.input_gpu + (i*l.groups + j)*l.c/l.groups*l.h*l.w, test); // validated //im2col_gpu(net.input_gpu + (i*l.groups + j)*l.c/l.groups*l.h*l.w, // l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b); im2col_gpu(newinput, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b); /* syoh: toggle to custom cuda matmul (quant added) */ //gemm_gpu(0,0,m,n,k,1,a,k,b,n,1,c,n); if (l.batch_normalize) quant_gpu_bnmm(m, k, n, a, b, c, l.rolling_mean_gpu, l.rolling_variance_gpu, l.scales_gpu, l.biases_gpu); // batchnorm_combined else quant_gpu_mm(m, k, n, a, b, c, l.biases_gpu); // orig cuda_mm } } #endif /* if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, net); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } */ activate_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation); if(l.binary || l.xnor) swap_binary(&l); } __global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, float rate, float *delta) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int j = id % w; id /= w; int i = id % h; id /= h; int k = id % c; id /= c; int b = id; int w_offset = -(size/2.f); int h_offset = -(size/2.f); int out_index = j + w*(i + h*(k + c*b)); int l, m; for(l = 0; l < size; ++l){ for(m = 0; m < size; ++m){ int cur_h = h_offset + i + l; int cur_w = w_offset + j + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); delta[out_index] += valid ? rate*(x[index] - x[out_index]) : 0; } } } extern "C" void smooth_layer(layer l, int size, float rate) { int h = l.out_h; int w = l.out_w; int c = l.out_c; size_t n = h*w*c*l.batch; smooth_kernel<<<cuda_gridsize(n), BLOCK>>>(l.output_gpu, n, l.w, l.h, l.c, size, rate, l.delta_gpu); check_error(cudaPeekAtLastError()); } void backward_convolutional_layer_gpu(convolutional_layer l, network net) { if(l.smooth){ smooth_layer(l, 5, l.smooth); } constrain_gpu(l.outputs*l.batch, 1, l.delta_gpu, 1); gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); if(l.batch_normalize){ backward_batchnorm_layer_gpu(l, net); } else { backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); } float *original_input = net.input_gpu; if(l.xnor) net.input_gpu = l.binary_input_gpu; #ifdef CUDNN float one = 1; cudnnConvolutionBackwardFilter(cudnn_handle(), &one, l.srcTensorDesc, net.input_gpu, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bf_algo, net.workspace, l.workspace_size, &one, l.dweightDesc, l.weight_updates_gpu); if(net.delta_gpu){ if(l.binary || l.xnor) swap_binary(&l); cudnnConvolutionBackwardData(cudnn_handle(), &one, l.weightDesc, l.weights_gpu, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bd_algo, net.workspace, l.workspace_size, &one, l.dsrcTensorDesc, net.delta_gpu); if(l.binary || l.xnor) swap_binary(&l); if(l.xnor) gradient_array_gpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, net.delta_gpu); } #else int m = l.n/l.groups; int n = l.size*l.size*l.c/l.groups; int k = l.out_w*l.out_h; int i, j; for(i = 0; i < l.batch; ++i){ for(j = 0; j < l.groups; ++j){ float *a = l.delta_gpu + (i*l.groups + j)*m*k; float *b = net.workspace; float *c = l.weight_updates_gpu + j*l.nweights/l.groups; float *im = net.input_gpu+(i*l.groups + j)*l.c/l.groups*l.h*l.w; im2col_gpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b); gemm_gpu(0,1,m,n,k,1,a,k,b,k,1,c,n); if(net.delta_gpu){ if(l.binary || l.xnor) swap_binary(&l); a = l.weights_gpu + j*l.nweights/l.groups; b = l.delta_gpu + (i*l.groups + j)*m*k; c = net.workspace; gemm_gpu(1,0,n,k,m,1,a,n,b,k,0,c,k); col2im_gpu(net.workspace, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, net.delta_gpu + (i*l.groups + j)*l.c/l.groups*l.h*l.w); if(l.binary || l.xnor) { swap_binary(&l); } } if(l.xnor) gradient_array_gpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, net.delta_gpu + i*l.c*l.h*l.w); } } #endif } void pull_convolutional_layer(layer l) { cuda_pull_array(l.weights_gpu, l.weights, l.nweights); cuda_pull_array(l.biases_gpu, l.biases, l.n); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.nweights); cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_pull_array(l.scales_gpu, l.scales, l.n); cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void push_convolutional_layer(layer l) { cuda_push_array(l.weights_gpu, l.weights, l.nweights); cuda_push_array(l.biases_gpu, l.biases, l.n); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.nweights); cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_push_array(l.scales_gpu, l.scales, l.n); cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void update_convolutional_layer_gpu(layer l, update_args a) { float learning_rate = a.learning_rate*l.learning_rate_scale; float momentum = a.momentum; float decay = a.decay; int batch = a.batch; if(a.adam){ adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t); adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); if(l.scales_gpu){ adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); } }else{ axpy_gpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_gpu(l.nweights, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_gpu(l.nweights, momentum, l.weight_updates_gpu, 1); axpy_gpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); scal_gpu(l.n, momentum, l.bias_updates_gpu, 1); if(l.scales_gpu){ axpy_gpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); scal_gpu(l.n, momentum, l.scale_updates_gpu, 1); } } }
b091a89b2c48811f77db07474e6aac3befeb397b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" { #include "lua.h" #include "lualib.h" #include "lauxlib.h" } #include "luaT.h" #include "THH.h" #include <stdio.h> #include <assert.h> #include <math_constants.h> #include <stdint.h> #include <unistd.h> #include <png++/image.hpp> #define TB 128 #define DISP_MAX 256 THCState* getCutorchState(lua_State* L) { lua_getglobal(L, "cutorch"); lua_getfield(L, -1, "getState"); lua_call(L, 0, 1); THCState *state = (THCState*) lua_touserdata(L, -1); lua_pop(L, 2); return state; } void checkCudaError(lua_State *L) { hipError_t status = hipPeekAtLastError(); if (status != hipSuccess) { luaL_error(L, hipGetErrorString(status)); } } #define COLOR_DIFF(x, i, j) (abs(x[i] - x[j])) THCudaTensor *new_tensor_like(THCState *state, THCudaTensor *x) { THCudaTensor *y = THCudaTensor_new(state); THCudaTensor_resizeAs(state, y, x); return y; } __device__ void sort(float *x, int n) { for (int i = 0; i < n - 1; i++) { int min = i; for (int j = i + 1; j < n; j++) { if (x[j] < x[min]) { min = j; } } float tmp = x[min]; x[min] = x[i]; x[i] = tmp; } } __global__ void ad(float *x0, float *x1, float *output, int size, int size2, int size3, int direction) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int d = id; int x = d % size3; d /= size3; int y = d % size2; d /= size2; d *= direction; float dist; if (0 <= x + d && x + d < size3) { int cnt = 0; dist = 0; for (int yy = y - 4; yy <= y + 4; yy++) { for (int xx = x - 4; xx <= x + 4; xx++) { if (0 <= xx && xx < size3 && 0 <= xx + d && xx + d < size3 && 0 <= yy && yy < size2) { int ind = yy * size3 + xx; dist += abs(x0[ind] - x1[ind + d]); cnt++; } } } dist /= cnt; } else { dist = CUDART_NAN; } output[id] = dist; } } int ad(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *x0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x1 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); int direction = luaL_checkinteger(L, 4); assert(direction == -1 || direction == 1); hipLaunchKernelGGL(( ad), dim3((THCudaTensor_nElement(state, out) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3), direction); checkCudaError(L); return 0; } __global__ void census(float *x0, float *x1, float *output, int size, int num_channels, int size2, int size3, int direction) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int d = id; int x = d % size3; d /= size3; int y = d % size2; d /= size2; d *= direction; float dist; if (0 <= x + d && x + d < size3) { dist = 0; for (int i = 0; i < num_channels; i++) { int ind_p = (i * size2 + y) * size3 + x; for (int yy = y - 4; yy <= y + 4; yy++) { for (int xx = x - 4; xx <= x + 4; xx++) { if (0 <= xx && xx < size3 && 0 <= xx + d && xx + d < size3 && 0 <= yy && yy < size2) { int ind_q = (i * size2 + yy) * size3 + xx; if ((x0[ind_q] < x0[ind_p]) != (x1[ind_q + d] < x1[ind_p + d])) { dist++; } } else { dist++; } } } } dist /= num_channels; } else { dist = CUDART_NAN; } output[id] = dist; } } int census(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *x0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x1 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); int direction = luaL_checkinteger(L, 4); assert(direction == -1 || direction == 1); hipLaunchKernelGGL(( census), dim3((THCudaTensor_nElement(state, out) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, x0, 1), THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3), direction); checkCudaError(L); return 0; } #if 0 __global__ void add_vol(float *vol, float *cnt, float *out, int size, int size1, int size2, int size3, float ratio) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int d = id; int x = d % size3; d /= size3; int y = d % size2; d /= size2; int lo = floor(d * ratio); int hi = lo + 1; float alpha = (d * ratio) - lo; assert(0 <= lo && hi < size1); float val = vol[(lo * size2 + y) * size3 + x] * (1 - alpha) + vol[(hi * size2 + y) * size3 + x] * alpha; if (!isnan(val) && cnt[id] > 0) { out[id] += val; cnt[id] += 1; } } } int add_vol(lua_State *L) { THCudaTensor *vol = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *cnt = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); float ratio = luaL_checknumber(L, 4); hipLaunchKernelGGL(( add_vol), dim3((THCudaTensor_nElement(out) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(vol), THCudaTensor_data(cnt), THCudaTensor_data(out), THCudaTensor_nElement(out), THCudaTensor_size(vol, 1), THCudaTensor_size(out, 2), THCudaTensor_size(out, 3), ratio); checkCudaError(L); return 0; } __global__ void rho(float *x, int size, float lambda) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { x[id] = 1 - exp(-x[id] / lambda); } } int rho(lua_State *L) { THCudaTensor *x = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); float lambda = luaL_checknumber(L, 2); hipLaunchKernelGGL(( rho), dim3((THCudaTensor_nElement(x) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(x), THCudaTensor_nElement(x), lambda); checkCudaError(L); return 0; } #endif __global__ void spatial_argmin(float *input, float *output, int size, int size1, int size23) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int dim23 = id % size23; int dim0 = id / size23; int argmin = 0; float min = CUDART_INF; for (int i = 0; i < size1; i++) { float val = input[(dim0 * size1 + i) * size23 + dim23]; if (val < min) { min = val; argmin = i; } } output[id] = argmin + 1; } } int spatial_argmin(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); hipLaunchKernelGGL(( spatial_argmin), dim3((THCudaTensor_nElement(state, output) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, input), THCudaTensor_data(state, output), THCudaTensor_nElement(state, output), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, output, 3)); checkCudaError(L); return 0; } __global__ void cross(float *x0, float *out, int size, int dim2, int dim3, int L1, float tau1) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int dir = id; int x = dir % dim3; dir /= dim3; int y = dir % dim2; dir /= dim2; int dx = 0; int dy = 0; if (dir == 0) { dx = -1; } else if (dir == 1) { dx = 1; } else if (dir == 2) { dy = -1; } else if (dir == 3) { dy = 1; } else { assert(0); } int xx, yy, ind1, ind2, dist; ind1 = y * dim3 + x; for (xx = x + dx, yy = y + dy;;xx += dx, yy += dy) { if (xx < 0 || xx >= dim3 || yy < 0 || yy >= dim2) break; dist = max(abs(xx - x), abs(yy - y)); if (dist == 1) continue; ind2 = yy * dim3 + xx; /* rule 1 */ if (COLOR_DIFF(x0, ind1, ind2) >= tau1) break; /* rule 2 */ if (dist >= L1) break; } out[id] = dir <= 1 ? xx : yy; } } int cross(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *x0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); int L1 = luaL_checkinteger(L, 3); float tau1 = luaL_checknumber(L, 4); hipLaunchKernelGGL(( cross), dim3((THCudaTensor_nElement(state, out) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, x0), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3), L1, tau1); checkCudaError(L); return 0; } __global__ void cbca(float *x0c, float *x1c, float *vol, float *out, int size, int dim2, int dim3, int direction) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int d = id; int x = d % dim3; d /= dim3; int y = d % dim2; d /= dim2; if (x + d * direction < 0 || x + d * direction >= dim3) { out[id] = vol[id]; } else { float sum = 0; int cnt = 0; int yy_s = max(x0c[(2 * dim2 + y) * dim3 + x], x1c[(2 * dim2 + y) * dim3 + x + d * direction]); int yy_t = min(x0c[(3 * dim2 + y) * dim3 + x], x1c[(3 * dim2 + y) * dim3 + x + d * direction]); for (int yy = yy_s + 1; yy < yy_t; yy++) { int xx_s = max(x0c[(0 * dim2 + yy) * dim3 + x], x1c[(0 * dim2 + yy) * dim3 + x + d * direction] - d * direction); int xx_t = min(x0c[(1 * dim2 + yy) * dim3 + x], x1c[(1 * dim2 + yy) * dim3 + x + d * direction] - d * direction); for (int xx = xx_s + 1; xx < xx_t; xx++) { float val = vol[(d * dim2 + yy) * dim3 + xx]; assert(!isnan(val)); sum += val; cnt++; } } assert(cnt > 0); out[id] = sum / cnt; assert(!isnan(out[id])); } } } int cbca(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *x0c = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x1c = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *vol_in = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *vol_out = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); int direction = luaL_checkinteger(L, 5); assert(direction == -1 or direction == 1); hipLaunchKernelGGL(( cbca), dim3((THCudaTensor_nElement(state, vol_out) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, x0c), THCudaTensor_data(state, x1c), THCudaTensor_data(state, vol_in), THCudaTensor_data(state, vol_out), THCudaTensor_nElement(state, vol_out), THCudaTensor_size(state, vol_out, 2), THCudaTensor_size(state, vol_out, 3), direction); checkCudaError(L); return 0; } __global__ void sgm(float *x0, float *x1, float *vol, float *tmp, float *out, int dim1, int dim2, int dim3, float pi1, float pi2, float tau_so, float alpha1, float sgm_q1, float sgm_q2, int sgm_direction, int direction) { int x, y, dx, dy; dx = dy = 0; if (sgm_direction <= 1) { y = blockIdx.x * blockDim.x + threadIdx.x; if (y >= dim2) { return; } if (sgm_direction == 0) { x = 0; dx = 1; } else if (sgm_direction == 1) { x = dim3 - 1; dx = -1; } } else if (sgm_direction <= 3) { x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= dim3) { return; } if (sgm_direction == 2) { y = 0; dy = 1; } else if (sgm_direction == 3) { y = dim2 - 1; dy = -1; } } assert(dim1 <= 400); float tmp_curr_[400]; float tmp_prev_[400]; float *tmp_curr = tmp_curr_; float *tmp_prev = tmp_prev_; float min_prev = CUDART_INF; for (; 0 <= y && y < dim2 && 0 <= x && x < dim3; x += dx, y += dy) { float min_curr = CUDART_INF; for (int d = 0; d < dim1; d++) { int ind = (d * dim2 + y) * dim3 + x; if (x + d * direction < 0 || x + d * direction >= dim3 || y - dy < 0 || y - dy >= dim2 || x + d * direction - dx < 0 || x + d * direction - dx >= dim3 || x - dx < 0 || x - dx >= dim3) { out[ind] += vol[ind]; tmp_curr[d] = vol[ind]; } else { int ind2 = y * dim3 + x; float D1 = COLOR_DIFF(x0, ind2, ind2 - dy * dim3 - dx); float D2 = COLOR_DIFF(x1, ind2 + d * direction, ind2 + d * direction - dy * dim3 - dx); float P1, P2; if (D1 < tau_so && D2 < tau_so) { P1 = pi1; P2 = (pi1 * pi2); } else if (D1 > tau_so && D2 > tau_so) { P1 = pi1 / (sgm_q1 * sgm_q2); P2 = (pi1 * pi2) / (sgm_q1 * sgm_q2); } else { P1 = pi1 / sgm_q1; P2 = (pi1 * pi2) / sgm_q1; } assert(min_prev != CUDART_INF); float cost = min(tmp_prev[d], min_prev + P2); if (d > 0) { cost = min(cost, tmp_prev[d - 1] + (sgm_direction == 2 ? P1 / alpha1 : P1)); } if (d < dim1 - 1) { cost = min(cost, tmp_prev[d + 1] + (sgm_direction == 3 ? P1 / alpha1 : P1)); } float val = vol[ind] + cost - min_prev; out[ind] += val; tmp_curr[d] = val; } if (tmp_curr[d] < min_curr) { min_curr = tmp_curr[d]; } } min_prev = min_curr; float *swap = tmp_curr; tmp_curr = tmp_prev; tmp_prev = swap; } } int sgm(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *x0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x1 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *vol = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *tmp = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 5, "torch.CudaTensor"); float pi1 = luaL_checknumber(L, 6); float pi2 = luaL_checknumber(L, 7); float tau_so = luaL_checknumber(L, 8); float alpha1 = luaL_checknumber(L, 9); float sgm_q1 = luaL_checknumber(L, 10); float sgm_q2 = luaL_checknumber(L, 11); int direction = luaL_checknumber(L, 12); int dim1 = THCudaTensor_size(state, out, 1); int dim2 = THCudaTensor_size(state, out, 2); int dim3 = THCudaTensor_size(state, out, 3); for (int sgm_direction = 0; sgm_direction < 4; sgm_direction++) { int size = sgm_direction <= 1 ? dim2 : dim3; hipLaunchKernelGGL(( sgm), dim3((size - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, vol), THCudaTensor_data(state, tmp), THCudaTensor_data(state, out), dim1, dim2, dim3, pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, sgm_direction, direction); } checkCudaError(L); return 0; } #define INDEX(dim0, dim1, dim2, dim3) \ assert((dim1) >= 0 && (dim1) < size1 && (dim2) >= 0 && (dim2) < size2 && (dim3) >= 0 && (dim3) < size3), \ ((((dim0) * size1 + (dim1)) * size2 + (dim2)) * size3 + dim3) template <int sgm_direction> __global__ void sgm2(float *x0, float *x1, float *input, float *output, float *tmp, float pi1, float pi2, float tau_so, float alpha1, float sgm_q1, float sgm_q2, int direction, int size1, int size2, int size3, int step) { int x, y, dx, dy; int d = threadIdx.x; if (sgm_direction == 0) { /* right */ x = step; y = blockIdx.x; dx = 1; dy = 0; } else if (sgm_direction == 1) { /* left */ x = size2 - 1 - step; y = blockIdx.x; dx = -1; dy = 0; } else if (sgm_direction == 2) { /* down */ x = blockIdx.x; y = step; dx = 0; dy = 1; } else if (sgm_direction == 3) { /* up */ x = blockIdx.x; y = size1 - 1 - step; dx = 0; dy = -1; } if (y - dy < 0 || y - dy >= size1 || x - dx < 0 || x - dx >= size2) { float val = input[INDEX(0, y, x, d)]; output[INDEX(0, y, x, d)] += val; tmp[d * size2 + blockIdx.x] = val; return; } __shared__ float output_s[400], output_min[400]; output_s[d] = output_min[d] = tmp[d * size2 + blockIdx.x]; __syncthreads(); for (int i = 256; i > 0; i /= 2) { if (d < i && d + i < size3 && output_min[d + i] < output_min[d]) { output_min[d] = output_min[d + i]; } __syncthreads(); } int ind2 = y * size2 + x; float D1 = COLOR_DIFF(x0, ind2, ind2 - dy * size2 - dx); float D2; int xx = x + d * direction; if (xx < 0 || xx >= size2 || xx - dx < 0 || xx - dx >= size2) { D2 = 10; } else { D2 = COLOR_DIFF(x1, ind2 + d * direction, ind2 + d * direction - dy * size2 - dx); } float P1, P2; if (D1 < tau_so && D2 < tau_so) { P1 = pi1; P2 = pi2; } else if (D1 > tau_so && D2 > tau_so) { P1 = pi1 / (sgm_q1 * sgm_q2); P2 = pi2 / (sgm_q1 * sgm_q2); } else { P1 = pi1 / sgm_q1; P2 = pi2 / sgm_q1; } float cost = min(output_s[d], output_min[0] + P2); if (d - 1 >= 0) { cost = min(cost, output_s[d - 1] + (sgm_direction == 2 ? P1 / alpha1 : P1)); } if (d + 1 < size3) { cost = min(cost, output_s[d + 1] + (sgm_direction == 3 ? P1 / alpha1 : P1)); } float val = input[INDEX(0, y, x, d)] + cost - output_min[0]; output[INDEX(0, y, x, d)] += val; tmp[d * size2 + blockIdx.x] = val; } int sgm2(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *x0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x1 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); THCudaTensor *tmp = (THCudaTensor*)luaT_checkudata(L, 5, "torch.CudaTensor"); float pi1 = luaL_checknumber(L, 6); float pi2 = luaL_checknumber(L, 7); float tau_so = luaL_checknumber(L, 8); float alpha1 = luaL_checknumber(L, 9); float sgm_q1 = luaL_checknumber(L, 10); float sgm_q2 = luaL_checknumber(L, 11); int direction = luaL_checknumber(L, 12); int size1 = THCudaTensor_size(state, output, 1) * THCudaTensor_size(state, output, 3); int size2 = THCudaTensor_size(state, output, 2) * THCudaTensor_size(state, output, 3); int disp_max = THCudaTensor_size(state, output, 3); for (int step = 0; step < THCudaTensor_size(state, input, 2); step++) { hipLaunchKernelGGL(( sgm2<0>), dim3((size1 - 1) / disp_max + 1), dim3(disp_max), 0, 0, THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, input), THCudaTensor_data(state, output), THCudaTensor_data(state, tmp), pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, direction, THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2), THCudaTensor_size(state, input, 3), step); } for (int step = 0; step < THCudaTensor_size(state, input, 2); step++) { hipLaunchKernelGGL(( sgm2<1>), dim3((size1 - 1) / disp_max + 1), dim3(disp_max), 0, 0, THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, input), THCudaTensor_data(state, output), THCudaTensor_data(state, tmp), pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, direction, THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2), THCudaTensor_size(state, input, 3), step); } for (int step = 0; step < THCudaTensor_size(state, input, 1); step++) { hipLaunchKernelGGL(( sgm2<2>), dim3((size2 - 1) / disp_max + 1), dim3(disp_max), 0, 0, THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, input), THCudaTensor_data(state, output), THCudaTensor_data(state, tmp), pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, direction, THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2), THCudaTensor_size(state, input, 3), step); } for (int step = 0; step < THCudaTensor_size(state, input, 1); step++) { hipLaunchKernelGGL(( sgm2<3>), dim3((size2 - 1) / disp_max + 1), dim3(disp_max), 0, 0, THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, input), THCudaTensor_data(state, output), THCudaTensor_data(state, tmp), pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, direction, THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2), THCudaTensor_size(state, input, 3), step); } checkCudaError(L); return 0; } template <int sgm_direction> __global__ void sgm3(float *x0, float *x1, float *input, float *output, float pi1, float pi2, float tau_so, float alpha1, float sgm_q1, float sgm_q2, int direction, int size1, int size2, int size3, int step) { int x, y, dx, dy; int d = threadIdx.x; if (sgm_direction == 0) { /* right */ x = step; y = blockIdx.x; dx = 1; dy = 0; } else if (sgm_direction == 1) { /* left */ x = size2 - 1 - step; y = blockIdx.x; dx = -1; dy = 0; } else if (sgm_direction == 2) { /* down */ x = blockIdx.x; y = step; dx = 0; dy = 1; } else if (sgm_direction == 3) { /* up */ x = blockIdx.x; y = size1 - 1 - step; dx = 0; dy = -1; } if (y - dy < 0 || y - dy >= size1 || x - dx < 0 || x - dx >= size2) { output[INDEX(sgm_direction, y, x, d)] = input[INDEX(0, y, x, d)]; return; } __shared__ float output_s[400], output_min[400]; output_s[d] = output_min[d] = output[INDEX(sgm_direction, y - dy, x - dx, d)]; __syncthreads(); for (int i = 256; i > 0; i /= 2) { if (d < i && d + i < size3 && output_min[d + i] < output_min[d]) { output_min[d] = output_min[d + i]; } __syncthreads(); } int ind2 = y * size2 + x; float D1 = COLOR_DIFF(x0, ind2, ind2 - dy * size2 - dx); float D2; int xx = x + d * direction; if (xx < 0 || xx >= size2 || xx - dx < 0 || xx - dx >= size2) { D2 = 10; } else { D2 = COLOR_DIFF(x1, ind2 + d * direction, ind2 + d * direction - dy * size2 - dx); } float P1, P2; if (D1 < tau_so && D2 < tau_so) { P1 = pi1; P2 = pi2; } else if (D1 > tau_so && D2 > tau_so) { P1 = pi1 / (sgm_q1 * sgm_q2); P2 = pi2 / (sgm_q1 * sgm_q2); } else { P1 = pi1 / sgm_q1; P2 = pi2 / sgm_q1; } float cost = min(output_s[d], output_min[0] + P2); if (d - 1 >= 0) { cost = min(cost, output_s[d - 1] + (sgm_direction == 2 ? P1 / alpha1 : P1)); } if (d + 1 < size3) { cost = min(cost, output_s[d + 1] + (sgm_direction == 3 ? P1 / alpha1 : P1)); } output[INDEX(sgm_direction, y, x, d)] = input[INDEX(0, y, x, d)] + cost - output_min[0]; } int sgm3(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *x0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x1 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); float pi1 = luaL_checknumber(L, 5); float pi2 = luaL_checknumber(L, 6); float tau_so = luaL_checknumber(L, 7); float alpha1 = luaL_checknumber(L, 8); float sgm_q1 = luaL_checknumber(L, 9); float sgm_q2 = luaL_checknumber(L, 10); int direction = luaL_checknumber(L, 11); int size1 = THCudaTensor_size(state, output, 1) * THCudaTensor_size(state, output, 3); int size2 = THCudaTensor_size(state, output, 2) * THCudaTensor_size(state, output, 3); int disp_max = THCudaTensor_size(state, output, 3); for (int step = 0; step < THCudaTensor_size(state, input, 2); step++) { hipLaunchKernelGGL(( sgm3<0>), dim3((size1 - 1) / disp_max + 1), dim3(disp_max), 0, 0, THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, input), THCudaTensor_data(state, output), pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, direction, THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2), THCudaTensor_size(state, input, 3), step); } for (int step = 0; step < THCudaTensor_size(state, input, 2); step++) { hipLaunchKernelGGL(( sgm3<1>), dim3((size1 - 1) / disp_max + 1), dim3(disp_max), 0, 0, THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, input), THCudaTensor_data(state, output), pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, direction, THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2), THCudaTensor_size(state, input, 3), step); } for (int step = 0; step < THCudaTensor_size(state, input, 1); step++) { hipLaunchKernelGGL(( sgm3<2>), dim3((size2 - 1) / disp_max + 1), dim3(disp_max), 0, 0, THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, input), THCudaTensor_data(state, output), pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, direction, THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2), THCudaTensor_size(state, input, 3), step); } for (int step = 0; step < THCudaTensor_size(state, input, 1); step++) { hipLaunchKernelGGL(( sgm3<3>), dim3((size2 - 1) / disp_max + 1), dim3(disp_max), 0, 0, THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, input), THCudaTensor_data(state, output), pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, direction, THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2), THCudaTensor_size(state, input, 3), step); } checkCudaError(L); return 0; } __global__ void fliplr(float *in, float *out, int size, int dim3) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; out[id + dim3 - 2 * x - 1] = in[id]; } } int fliplr(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *in = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); hipLaunchKernelGGL(( fliplr), dim3((THCudaTensor_nElement(state, out) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, in), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 3)); checkCudaError(L); return 0; } __global__ void outlier_detection(float *d0, float *d1, float *outlier, int size, int dim3, int disp_max) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int d0i = d0[id]; if (x - d0i < 0) { //assert(0); outlier[id] = 1; } else if (abs(d0[id] - d1[id - d0i]) < 1.1) { outlier[id] = 0; /* match */ } else { outlier[id] = 1; /* occlusion */ for (int d = 0; d < disp_max; d++) { if (x - d >= 0 && abs(d - d1[id - d]) < 1.1) { outlier[id] = 2; /* mismatch */ break; } } } } } int outlier_detection(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *d0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *d1 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *outlier = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); int disp_max = luaL_checkinteger(L, 4); hipLaunchKernelGGL(( outlier_detection), dim3((THCudaTensor_nElement(state, d0) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, d0), THCudaTensor_data(state, d1), THCudaTensor_data(state, outlier), THCudaTensor_nElement(state, d0), THCudaTensor_size(state, d0, 3), disp_max); checkCudaError(L); return 0; } #if 0 __global__ void iterative_region_voting(float *d0, float *x0c, float *x1c, float *outlier, float *d0_out, float *outlier_out, int size, int dim2, int dim3, float tau_s, float tau_h, int disp_max) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; d0_out[id] = d0[id]; outlier_out[id] = outlier[id]; if (outlier[id] == 0) return; assert(disp_max < DISP_MAX); int hist[DISP_MAX]; for (int i = 0; i < disp_max; i++) { hist[i] = 0; } int yy_s = x0c[(2 * dim2 + y) * dim3 + x]; int yy_t = x0c[(3 * dim2 + y) * dim3 + x]; for (int yy = yy_s + 1; yy < yy_t; yy++) { int xx_s = x0c[(0 * dim2 + yy) * dim3 + x]; int xx_t = x0c[(1 * dim2 + yy) * dim3 + x]; for (int xx = xx_s + 1; xx < xx_t; xx++) { if (outlier[yy * dim3 + xx] == 0) { hist[(int)d0[yy * dim3 + xx]]++; } } } int cnt = 0; int max_i = 0; for (int i = 0; i < disp_max; i++) { cnt += hist[i]; if (hist[i] > hist[max_i]) { max_i = i; } } if (cnt > tau_s && (float)hist[max_i] / cnt > tau_h) { outlier_out[id] = 0; d0_out[id] = max_i; } } } int iterative_region_voting(lua_State *L) { THCudaTensor *d0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x0c = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *x1c = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *outlier = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); float tau_s = luaL_checknumber(L, 5); float tau_h = luaL_checknumber(L, 6); int disp_max = luaL_checkinteger(L, 7); int iterations = luaL_checkinteger(L, 8); THCudaTensor *d0_tmp = new_tensor_like(state, d0); THCudaTensor *outlier_tmp = new_tensor_like(state, outlier); assert(iterations % 2 == 0); for (int i = 0; i < iterations; i++) { hipLaunchKernelGGL(( iterative_region_voting), dim3((THCudaTensor_nElement(d0) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(i % 2 == 0 ? d0 : d0_tmp), THCudaTensor_data(x0c), THCudaTensor_data(x1c), THCudaTensor_data(i % 2 == 0 ? outlier : outlier_tmp), THCudaTensor_data(i % 2 == 0 ? d0_tmp : d0), THCudaTensor_data(i % 2 == 0 ? outlier_tmp : outlier), THCudaTensor_nElement(d0), THCudaTensor_size(d0, 2), THCudaTensor_size(d0, 3), tau_s, tau_h, disp_max); } checkCudaError(L); return 0; } #endif __global__ void interpolate_mismatch(float *d0, float *outlier, float *out, int size, int dim2, int dim3) { const float dir[] = { 0 , 1, -0.5, 1, -1 , 1, -1 , 0.5, -1 , 0, -1 , -0.5, -1 , -1, -0.5, -1, 0 , -1, 0.5 , -1, 1 , -1, 1 , -0.5, 1 , 0, 1 , 0.5, 1 , 1, 0.5 , 1 }; int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { if (outlier[id] != 2) { out[id] = d0[id]; return; } float vals[16]; int vals_size = 0; int x = id % dim3; int y = id / dim3; for (int d = 0; d < 16; d++) { float dx = dir[2 * d]; float dy = dir[2 * d + 1]; float xx = x; float yy = y; int xx_i = round(xx); int yy_i = round(yy); while (0 <= yy_i && yy_i < dim2 && 0 <= xx_i && xx_i < dim3 && outlier[yy_i * dim3 + xx_i] == 2) { xx += dx; yy += dy; xx_i = round(xx); yy_i = round(yy); } int ind = yy_i * dim3 + xx_i; if (0 <= yy_i && yy_i < dim2 && 0 <= xx_i && xx_i < dim3) { assert(outlier[ind] != 2); vals[vals_size++] = d0[ind]; } } assert(vals_size > 0); sort(vals, vals_size); out[id] = vals[vals_size / 2]; } } int interpolate_mismatch(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *d0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *outlier = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *out = new_tensor_like(state, d0); hipLaunchKernelGGL(( interpolate_mismatch), dim3((THCudaTensor_nElement(state, out) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, d0), THCudaTensor_data(state, outlier), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3)); checkCudaError(L); luaT_pushudata(L, out, "torch.CudaTensor"); return 1; } __global__ void interpolate_occlusion(float *d0, float *outlier, float *out, int size, int dim3) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { if (outlier[id] != 1) { out[id] = d0[id]; return; } int x = id % dim3; int dx = 0; while (x + dx >= 0 && outlier[id + dx] != 0) { dx--; } if (x + dx < 0) { dx = 0; while (x + dx < dim3 && outlier[id + dx] != 0) { dx++; } } if (x + dx < dim3) { out[id] = d0[id + dx]; } else { out[id] = d0[id]; } } } int interpolate_occlusion(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *d0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *outlier = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *out = new_tensor_like(state, d0); hipLaunchKernelGGL(( interpolate_occlusion), dim3((THCudaTensor_nElement(state, out) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, d0), THCudaTensor_data(state, outlier), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 3) ); checkCudaError(L); luaT_pushudata(L, out, "torch.CudaTensor"); return 1; } #if 0 __global__ void sobel(float *x, float *g1, float *g2, int size, int dim2, int dim3) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int xx = id % dim3; int yy = id / dim3; if (1 <= yy && yy < dim2 - 1 && 1 <= xx && xx < dim3 - 1) { g1[id] = -x[id-dim3-1] +x[id-dim3+1] -2*x[id-1] +2*x[id+1] -x[id+dim3-1] +x[id+dim3+1]; g2[id] = x[id-dim3-1] +2*x[id-dim3] +x[id-dim3+1] -x[id+dim3-1] -2*x[id+dim3] -x[id+dim3+1]; } else { g1[id] = 0; g2[id] = 0; } } } int sobel(lua_State *L) { THCudaTensor *x = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *g1 = new_tensor_like(x); THCudaTensor *g2 = new_tensor_like(x); hipLaunchKernelGGL(( sobel), dim3((THCudaTensor_nElement(x) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(x), THCudaTensor_data(g1), THCudaTensor_data(g2), THCudaTensor_nElement(x), THCudaTensor_size(x, 2), THCudaTensor_size(x, 3) ); checkCudaError(L); luaT_pushudata(L, g1, "torch.CudaTensor"); luaT_pushudata(L, g2, "torch.CudaTensor"); return 2; } __global__ void depth_discontinuity_adjustment(float *d0, float *dg1, float *dg2, float *xg1, float *xg2, float *out, int size, int dim3, float tau_e) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { if (abs(dg1[id]) > tau_e) { out[id] = xg1[id - 1] > xg1[id + 1] ? d0[id - 1] : d0[id + 1]; } else if (abs(dg2[id]) > tau_e) { out[id] = xg2[id - dim3] > xg2[id + dim3] ? d0[id - dim3] : d0[id + dim3]; } else { out[id] = d0[id]; } } } int depth_discontinuity_adjustment(lua_State *L) { THCudaTensor *d0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *dg1 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *dg2 = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *xg1 = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); THCudaTensor *xg2 = (THCudaTensor*)luaT_checkudata(L, 5, "torch.CudaTensor"); float tau_e = luaL_checknumber(L, 6); THCudaTensor *out = new_tensor_like(d0); hipLaunchKernelGGL(( depth_discontinuity_adjustment), dim3((THCudaTensor_nElement(out) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(d0), THCudaTensor_data(dg1), THCudaTensor_data(dg2), THCudaTensor_data(xg1), THCudaTensor_data(xg2), THCudaTensor_data(out), THCudaTensor_nElement(out), THCudaTensor_size(out, 3), tau_e); checkCudaError(L); luaT_pushudata(L, out, "torch.CudaTensor"); return 1; } #endif __global__ void subpixel_enchancement(float *d0, float *c2, float *out, int size, int dim23, int disp_max) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int d = d0[id]; out[id] = d; if (1 <= d && d < disp_max - 1) { float cn = c2[(d - 1) * dim23 + id]; float cz = c2[d * dim23 + id]; float cp = c2[(d + 1) * dim23 + id]; float denom = 2 * (cp + cn - 2 * cz); if (denom > 1e-5) { out[id] = d - min(1.0, max(-1.0, (cp - cn) / denom)); } } } } int subpixel_enchancement(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *d0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *c2 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); int disp_max = luaL_checkinteger(L, 3); THCudaTensor *out = new_tensor_like(state, d0); hipLaunchKernelGGL(( subpixel_enchancement), dim3((THCudaTensor_nElement(state, out) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, d0), THCudaTensor_data(state, c2), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 2) * THCudaTensor_size(state, out, 3), disp_max); checkCudaError(L); luaT_pushudata(L, out, "torch.CudaTensor"); return 1; } __global__ void mean2d(float *img, float *kernel, float *out, int size, int kernel_radius, int dim2, int dim3, float alpha2) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; float sum = 0; float cnt = 0; int i = 0; for (int xx = x - kernel_radius; xx <= x + kernel_radius; xx++) { for (int yy = y - kernel_radius; yy <= y + kernel_radius; yy++, i++) { if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2 && abs(img[yy * dim3 + xx] - img[y * dim3 + x]) < alpha2) { sum += img[yy * dim3 + xx] * kernel[i]; cnt += kernel[i]; } } } out[id] = sum / cnt; } } int mean2d(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *img = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *kernel = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); float alpha2 = luaL_checknumber(L, 3); THCudaTensor *out = new_tensor_like(state, img); assert(THCudaTensor_size(state, kernel, 0) % 2 == 1); hipLaunchKernelGGL(( mean2d), dim3((THCudaTensor_nElement(state, out) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, img), THCudaTensor_data(state, kernel), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, kernel, 0) / 2, THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3), alpha2); checkCudaError(L); luaT_pushudata(L, out, "torch.CudaTensor"); return 1; } __global__ void Normalize_get_norm_(float *input, float *norm, int size1, int size23, int size023) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size023) { int dim23 = id % size23; int dim0 = id / size23; float sum = 0.0; for (int dim1 = 0; dim1 < size1; dim1++) { float x = input[(dim0 * size1 + dim1) * size23 + dim23]; sum += x * x; } norm[dim0 * size23 + dim23] = sum + 1e-5; } } __global__ void Normalize_forward_(float *input, float *norm, float *output, int size23, int size123, int size0123) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size0123) { int dim23 = id % size23; int dim0 = (id / size123); output[id] = input[id] / sqrtf(norm[dim0 * size23 + dim23]); } } int Normalize_forward(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *norm = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); hipLaunchKernelGGL(( Normalize_get_norm_), dim3((THCudaTensor_nElement(state, norm) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, input), THCudaTensor_data(state, norm), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), THCudaTensor_nElement(state, norm)); hipLaunchKernelGGL(( Normalize_forward_), dim3((THCudaTensor_nElement(state, output) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, input), THCudaTensor_data(state, norm), THCudaTensor_data(state, output), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), THCudaTensor_size(state, input, 1) * THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), THCudaTensor_nElement(state, output)); checkCudaError(L); return 0; } __global__ void Normalize_backward_input_(float *grad_output, float *input, float *norm, float *grad_input, int size1, int size23, int size0123) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size0123) { int dim0 = id; int dim23 = dim0 % size23; dim0 /= size23; int dim1 = dim0 % size1; dim0 /= size1; float denom = powf(norm[dim0 * size23 + dim23], 1.5); float deriv = (norm[dim0 * size23 + dim23] - input[id] * input[id]) / denom * grad_output[id]; float sum = 0; for (int dim1_ = 0; dim1_ < size1; dim1_++) { if (dim1_ != dim1) { int ind = (dim0 * size1 + dim1_) * size23 + dim23; sum += input[ind] * grad_output[ind]; } } grad_input[id] = deriv - sum * input[id] / denom; } } int Normalize_backward_input(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *grad_output = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *norm = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *grad_input = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); hipLaunchKernelGGL(( Normalize_backward_input_), dim3((THCudaTensor_nElement(state, input) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, grad_output), THCudaTensor_data(state, input), THCudaTensor_data(state, norm), THCudaTensor_data(state, grad_input), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), THCudaTensor_nElement(state, input)); checkCudaError(L); return 0; } struct Margin2_functor { float margin; __host__ Margin2_functor(float margin_) : margin(margin_) {}; __device__ float forward(float pos, float neg) { return fmaxf(0, neg - pos + margin); } __device__ float backward(float pos, float neg, int which) { float f = neg - pos + margin; if (which == 0) { return -1. * (f > 0); } else { return f > 0; } } }; struct Margin2_squared_functor { float margin; __host__ Margin2_squared_functor(float margin_) : margin(margin_) {}; __device__ float forward(float pos, float neg) { float d = fmaxf(0, neg - pos + margin); return d * d * 0.5; } __device__ float backward(float pos, float neg, int which) { float f = neg - pos + margin; if (which == 0) { return -f * (f > 0); } else { return f * (f > 0); } } }; template <class Op> __global__ void Margin2_(float *input, float *tmp, float *gradInput, float margin, Op op, int size) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { float pos = input[id * 2]; float neg = input[id * 2 + 1]; tmp[id] = op.forward(pos, neg); gradInput[id * 2] = op.backward(pos, neg, 0); gradInput[id * 2 + 1] = op.backward(pos, neg, 1); } } int Margin2(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *tmp = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); float margin = luaL_checknumber(L, 4); int pow = luaL_checkinteger(L, 5); if (pow == 1) { hipLaunchKernelGGL(( Margin2_), dim3((THCudaTensor_nElement(state, tmp) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, input), THCudaTensor_data(state, tmp), THCudaTensor_data(state, gradInput), margin, Margin2_functor(margin), THCudaTensor_nElement(state, tmp)); } else if (pow == 2) { hipLaunchKernelGGL(( Margin2_), dim3((THCudaTensor_nElement(state, tmp) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, input), THCudaTensor_data(state, tmp), THCudaTensor_data(state, gradInput), margin, Margin2_squared_functor(margin), THCudaTensor_nElement(state, tmp)); } checkCudaError(L); return 0; } __global__ void StereoJoin_(float *input_L, float *input_R, float *output_L, float *output_R, int size1_input, int size1, int size3, int size23) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size23) { int dim3 = id % size3; assert(size1_input <= 128); float L_cache[128]; for (int i = 0; i < size1_input; i++) { L_cache[i] = input_L[i * size23 + id]; } for (int d = 0; d < size1; d++) { if (dim3 - d >= 0) { float sum = 0; for (int i = 0; i < size1_input; i++) { sum -= L_cache[i] * input_R[i * size23 + id - d]; } output_L[d * size23 + id] = sum; output_R[d * size23 + id - d] = sum; } } } } int StereoJoin(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input_L = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *input_R = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output_L = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *output_R = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); int size23 = THCudaTensor_size(state, output_L, 2) * THCudaTensor_size(state, output_L, 3); hipLaunchKernelGGL(( StereoJoin_), dim3((size23 - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, input_L), THCudaTensor_data(state, input_R), THCudaTensor_data(state, output_L), THCudaTensor_data(state, output_R), THCudaTensor_size(state, input_L, 1), THCudaTensor_size(state, output_L, 1), THCudaTensor_size(state, output_L, 3), size23); checkCudaError(L); return 0; } __global__ void StereoL2R_(float *vol_L, float *vol_R, int size2, int size3, int size) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int dim3 = id % size3; int dim1 = id / (size2 * size3); if (dim3 + dim1 >= size3) { vol_R[id] = CUDART_INF; } else { vol_R[id] = vol_L[id + dim1]; } } } int StereoL2R(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *vol_L = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *vol_R = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); hipLaunchKernelGGL(( StereoL2R_), dim3((THCudaTensor_nElement(state, vol_L) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, vol_L), THCudaTensor_data(state, vol_R), THCudaTensor_size(state, vol_R, 2), THCudaTensor_size(state, vol_R, 3), THCudaTensor_nElement(state, vol_R)); checkCudaError(L); return 0; } __global__ void bilateral_filter(float *img, float *out, int size, int dim2, int dim3, int kernel_radius, float sigma1, float sigma2) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; float sum = 0; float cnt = 0; for (int i = -kernel_radius; i <= kernel_radius; i++) { for (int j = -kernel_radius; j <= kernel_radius; j++) { int yy = y + i; int xx = x + j; if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2) { float color_diff = img[yy * dim3 + xx] - img[y * dim3 + x]; float v1 = exp(-(i * i + j * j) / (2 * sigma1 * sigma1)); float v2 = exp(-(color_diff * color_diff) / (2 * sigma2 * sigma2)); sum += img[yy * dim3 + xx] * v1 * v2; cnt += v1 * v2; } } } out[id] = sum / cnt; } } int bilateral_filter(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *img = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); float sigma1 = luaL_checknumber(L, 2); float sigma2 = luaL_checknumber(L, 3); THCudaTensor *out = new_tensor_like(state, img); int kernel_radius = ceil(min(sigma1, sigma2) * 3); hipLaunchKernelGGL(( bilateral_filter), dim3((THCudaTensor_nElement(state, out) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, img), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3), kernel_radius, sigma1, sigma2); checkCudaError(L); luaT_pushudata(L, out, "torch.CudaTensor"); return 1; } __global__ void median2d(float *img, float *out, int size, int dim2, int dim3, int kernel_radius) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; float xs[11 * 11]; int xs_size = 0; for (int xx = x - kernel_radius; xx <= x + kernel_radius; xx++) { for (int yy = y - kernel_radius; yy <= y + kernel_radius; yy++) { if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2) { xs[xs_size++] = img[yy * dim3 + xx]; } } } sort(xs, xs_size); out[id] = xs[xs_size / 2]; } } int median2d(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *img = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); int kernel_size = luaL_checkinteger(L, 2); THCudaTensor *out = new_tensor_like(state, img); assert(kernel_size % 2 == 1); assert(kernel_size <= 11); hipLaunchKernelGGL(( median2d), dim3((THCudaTensor_nElement(state, out) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, img), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3), kernel_size / 2); checkCudaError(L); luaT_pushudata(L, out, "torch.CudaTensor"); return 1; } #if 0 int histogram(lua_State *L) { THFloatTensor *img = (THFloatTensor*)luaT_checkudata(L, 1, "torch.FloatTensor"); THIntTensor *hist = THIntTensor_newWithSize1d(256); THIntTensor_zero(hist); float *img_data = THFloatTensor_data(img); int *hist_data = THIntTensor_data(hist); for (int i = 0; i < THFloatTensor_size(img, 2) * THFloatTensor_size(img, 3); i++) { assert(0 <= img_data[i] && img_data[i] < 256); hist_data[(int)img_data[i]]++; } luaT_pushudata(L, hist, "torch.IntTensor"); return 1; } int histogram_equalization_map(lua_State *L) { THIntTensor *cdf = (THIntTensor*)luaT_checkudata(L, 1, "torch.IntTensor"); THIntTensor *map = THIntTensor_new(); THIntTensor_resizeAs(map, cdf); int *cdf_data = THIntTensor_data(cdf); int max = cdf_data[255]; int min = cdf_data[0]; for (int i = 0; i < 256; i++) { if (cdf_data[i]) { min = cdf_data[i]; break; } } int *map_data = THIntTensor_data(map); for (int i = 0; i < 256; i++) { map_data[i] = round((double)(cdf_data[i] - min) / (max - min) * 255); } luaT_pushudata(L, map, "torch.IntTensor"); return 1; } int map_intensities(lua_State *L) { THFloatTensor *img = (THFloatTensor*)luaT_checkudata(L, 1, "torch.FloatTensor"); THIntTensor *map = (THIntTensor*)luaT_checkudata(L, 2, "torch.IntTensor"); THFloatTensor *out = THFloatTensor_new(); THFloatTensor_resizeAs(out, img); float *img_data = THFloatTensor_data(img); float *out_data = THFloatTensor_data(out); int *map_data = THIntTensor_data(map); for (int i = 0; i < THFloatTensor_size(img, 2) * THFloatTensor_size(img, 3); i++) { out_data[i] = map_data[(int)img_data[i]]; } luaT_pushudata(L, out, "torch.FloatTensor"); return 1; } #endif int readPNG16(lua_State *L) { THFloatTensor *img_ = (THFloatTensor*)luaT_checkudata(L, 1, "torch.FloatTensor"); const char* fname = luaL_checkstring(L, 2); float *img = THFloatTensor_data(img_); png::image<png::gray_pixel_16> image(fname); int width = image.get_width(); int height = image.get_height(); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { uint16_t val = image.get_pixel(j, i); img[i * width + j] = val == 0 ? 0.0 : ((float)val)/256.0; } } return 0; } int writePNG16(lua_State *L) { THFloatTensor *img_ = (THFloatTensor*)luaT_checkudata(L, 1, "torch.FloatTensor"); int height = luaL_checkinteger(L, 2); int width = luaL_checkinteger(L, 3); const char* fname = luaL_checkstring(L, 4); float *img = THFloatTensor_data(img_); png::image<png::gray_pixel_16> image(width, height); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { float val = img[i * width + j]; image.set_pixel(j, i, (uint16_t)(val < 1e-5 ? 0 : val * 256)); } } image.write(fname); return 0; } int writePFM(lua_State *L) { THFloatTensor *img_ = (THFloatTensor*)luaT_checkudata(L, 1, "torch.FloatTensor"); const char* fname = luaL_checkstring(L, 2); int height = THFloatTensor_size(img_, 0); int width = THFloatTensor_size(img_, 1); FILE *f = fopen(fname, "w"); fprintf(f, "Pf\n%d %d\n-0.003922\n", width, height); fwrite(THFloatTensor_data(img_), 4, height * width, f); fclose(f); return 0; } __global__ void remove_nonvisible(float *y, int size, int size3) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % size3; if (y[id] >= x) { y[id] = 0; } } } int remove_nonvisible(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *y = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); hipLaunchKernelGGL(( remove_nonvisible), dim3((THCudaTensor_nElement(state, y) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, y), THCudaTensor_nElement(state, y), THCudaTensor_size(state, y, 3)); checkCudaError(L); return 0; } __global__ void remove_occluded(float *y, int size, int size3) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % size3; for (int i = 1; x + i < size3; i++) { if (i - y[id + i] < -y[id]) { y[id] = 0; break; } } } } int remove_occluded(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *y = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); hipLaunchKernelGGL(( remove_occluded), dim3((THCudaTensor_nElement(state, y) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, y), THCudaTensor_nElement(state, y), THCudaTensor_size(state, y, 3)); checkCudaError(L); return 0; } __global__ void remove_white(float *x, float *y, int size) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { if (x[id] == 255) { y[id] = 0; } } } int remove_white(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *x = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *y = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); hipLaunchKernelGGL(( remove_white), dim3((THCudaTensor_nElement(state, y) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, x), THCudaTensor_data(state, y), THCudaTensor_nElement(state, y)); checkCudaError(L); return 0; } __global__ void copy_fill(float *in, float *out, int size, int in_size2, int in_size3, int out_size2, int out_size3) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int out_x = id % out_size3; int out_y = id / out_size3; int in_x = out_x - (out_size3 - in_size3) / 2; int in_y = out_y - (out_size2 - in_size2) / 2; int x = min(in_size3 - 1, max(0, in_x)); int y = min(in_size2 - 1, max(0, in_y)); out[id] = in[y * in_size3 + x]; } } int copy_fill(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *in = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); hipLaunchKernelGGL(( copy_fill), dim3((THCudaTensor_nElement(state, out) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, in), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, in, 2), THCudaTensor_size(state, in, 3), THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3)); checkCudaError(L); luaT_pushudata(L, out, "torch.CudaTensor"); return 1; } void memcpy2d(float *dst, float *src, int x, int y, int win_radius, int height, int width) { assert(0 <= x - win_radius); assert(x + win_radius <= width); assert(0 <= y - win_radius); assert(y + win_radius <= height); for (int i = -win_radius; i <= win_radius; i++) { memcpy(dst, src + (y + i) * width + x - win_radius, (win_radius * 2 + 1) * sizeof(float)); dst += win_radius * 2 + 1; } } double random_uniform() { return ((double)rand()/(double)RAND_MAX); } int random_int(int a, int b) { assert(a <= b); return floor(random_uniform() * (b - a + 1) + a); } double random_exp(double lambda) { double u = random_uniform(); return -log(u) / lambda; } int subset_dataset(lua_State *L) { THLongTensor *index_ = (THLongTensor*)luaT_checkudata(L, 1, "torch.LongTensor"); THFloatTensor *input_ = (THFloatTensor*)luaT_checkudata(L, 2, "torch.FloatTensor"); THFloatTensor *output_ = (THFloatTensor*)luaT_checkudata(L, 3, "torch.FloatTensor"); long *index = THLongTensor_data(index_); float *input = THFloatTensor_data(input_); float *output = THFloatTensor_data(output_); const int N = 200; int set[N]; for (int i = 0; i < N; i++) { set[i] = 0; } for (int i = 0; i < THLongTensor_nElement(index_); i++) { assert(index[i] < N); set[index[i]] = 1; } int i = 0; for (int j = 0; j < THFloatTensor_size(input_, 0); j++) { int im = input[j * 4]; if (set[im]) { for (int k = 0; k < 4; k++) { output[i * 4 + k] = input[j * 4 + k]; } i++; } } lua_pushinteger(L, i); return 1; } int make_dataset2(lua_State *L) { THFloatTensor *disp_ = (THFloatTensor*)luaT_checkudata(L, 1, "torch.FloatTensor"); THFloatTensor *nnz_ = (THFloatTensor*)luaT_checkudata(L, 2, "torch.FloatTensor"); int img = luaL_checkinteger(L, 3); int t = luaL_checkinteger(L, 4); float *disp = THFloatTensor_data(disp_); float *nnz = THFloatTensor_data(nnz_); int height = THFloatTensor_size(disp_, 2); int width = THFloatTensor_size(disp_, 3); int nnz_size = THFloatTensor_nElement(nnz_); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { if (disp[i * width + j] > 0.5) { assert(t * 4 + 4 <= nnz_size); nnz[t * 4 + 0] = img; nnz[t * 4 + 1] = i; nnz[t * 4 + 2] = j; nnz[t * 4 + 3] = disp[i * width + j]; t++; } } } lua_pushinteger(L, t); return 1; } int make_dataset(lua_State *L) { THFloatTensor *x0_ = (THFloatTensor*)luaT_checkudata(L, 1, "torch.FloatTensor"); THFloatTensor *x1_ = (THFloatTensor*)luaT_checkudata(L, 2, "torch.FloatTensor"); THFloatTensor *disp_ = (THFloatTensor*)luaT_checkudata(L, 3, "torch.FloatTensor"); THFloatTensor *x_ = (THFloatTensor*)luaT_checkudata(L, 4, "torch.FloatTensor"); THFloatTensor *y_ = (THFloatTensor*)luaT_checkudata(L, 5, "torch.FloatTensor"); int t = luaL_checkinteger(L, 6); float thr_true = luaL_checknumber(L, 7); float thr_false_l = luaL_checknumber(L, 8); float thr_false_u = luaL_checknumber(L, 9); float *x0 = THFloatTensor_data(x0_); float *x1 = THFloatTensor_data(x1_); float *disp = THFloatTensor_data(disp_); float *x = THFloatTensor_data(x_); float *y = THFloatTensor_data(y_); int height = THFloatTensor_size(x0_, 2); int width = THFloatTensor_size(x0_, 3); int win_size = THFloatTensor_size(x_, 2); int x_size = THFloatTensor_size(x_, 0); assert(win_size % 2 == 1); int win_radius = (win_size - 1) / 2; x += t * 2 * win_size * win_size; for (int i = win_radius; i < height - win_radius; i++) { for (int j = win_radius; j < width - win_radius; j++) { if (disp[i * width + j] > 0.5) { int d_true = round(disp[i * width + j]); if (0 <= j - d_true - win_radius) { /* true offset */ int delta = 0; for (;;) { delta = random_int(-thr_true, thr_true); if (0 <= j - d_true + delta - win_radius && j - d_true + delta + win_radius < width) { break; } } assert(t < x_size); memcpy2d(x, x0, j, i, win_radius, height, width); x += win_size * win_size; memcpy2d(x, x1, j - d_true + delta, i, win_radius, height, width); x += win_size * win_size; y[t] = 1; t++; /* false offset */ delta = 0; for (;;) { delta = random_int(thr_false_l, thr_false_u); if (random_uniform() < 0.5) { delta = -delta; } if (0 <= j - d_true + delta - win_radius && j - d_true + delta + win_radius < width) { break; } } assert(t < x_size); memcpy2d(x, x0, j, i, win_radius, height, width); x += win_size * win_size; memcpy2d(x, x1, j - d_true + delta, i, win_radius, height, width); x += win_size * win_size; y[t] = 0; t++; } } } } lua_pushinteger(L, t); return 1; } /* CPU implementation */ int grey2jet(lua_State *L) { THDoubleTensor *grey_img = (THDoubleTensor*)luaT_checkudata(L, 1, "torch.DoubleTensor"); THDoubleTensor *col_img = (THDoubleTensor*)luaT_checkudata(L, 2, "torch.DoubleTensor"); assert(grey_img->nDimension == 2); if (3 * THDoubleTensor_nElement(grey_img) != THDoubleTensor_nElement(col_img)) { luaL_error(L, "Size mismatch"); } int height = THDoubleTensor_size(grey_img, 0); int width = THDoubleTensor_size(grey_img, 1); double *gray_data = THDoubleTensor_data(grey_img); double *col_data = THDoubleTensor_data(col_img); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { double val = gray_data[i * width + j] * 4; double r = 0, g = 0, b = 0; if (-0.1 <= val && val < 0.5) { r = 0; g = 0; b = 0.5 + val; } else if (0.5 <= val && val < 1.5) { r = 0; g = val - 0.5; b = 1; } else if (1.5 <= val && val < 2.5) { r = val - 1.5; g = 1; b = 1 - (val - 1.5); } else if (2.5 <= val && val < 3.5) { r = 1; g = 1 - (val - 2.5); b = 0; } else if (3.5 <= val && val <= 4.1) { r = 1 - (val - 3.5); g = 0; b = 0; } else { printf("val = %f\n", val); assert(0); } col_data[(0 * height + i) * width + j] = r; col_data[(1 * height + i) * width + j] = g; col_data[(2 * height + i) * width + j] = b; } } return 0; } int version(lua_State* L) { printf("libadcensus version 0.0.5\n"); return 0; } static const struct luaL_Reg funcs[] = { {"ad", ad}, {"census", census}, {"cross", cross}, {"cbca", cbca}, {"sgm", sgm}, {"sgm2", sgm2}, {"sgm3", sgm3}, {"outlier_detection", outlier_detection}, {"interpolate_occlusion", interpolate_occlusion}, {"interpolate_mismatch", interpolate_mismatch}, {"subpixel_enchancement", subpixel_enchancement}, {"copy_fill", copy_fill}, {"median2d", median2d}, {"mean2d", mean2d}, {"Normalize_forward", Normalize_forward}, {"Normalize_backward_input", Normalize_backward_input}, {"Margin2", Margin2}, {"StereoJoin", StereoJoin}, {"StereoL2R", StereoL2R}, {"subset_dataset", subset_dataset}, {"make_dataset", make_dataset}, {"make_dataset2", make_dataset2}, {"remove_nonvisible", remove_nonvisible}, {"remove_occluded", remove_occluded}, {"remove_white", remove_white}, {"readPNG16", readPNG16}, {"writePNG16", writePNG16}, {"writePFM", writePFM}, {"grey2jet", grey2jet}, {"spatial_argmin", spatial_argmin}, {"version", version}, {NULL, NULL} }; #include "SpatialLogSoftMax.cu" extern "C" int luaopen_libadcensus(lua_State *L) { srand(42); cunn_SpatialLogSoftMax_init(L); luaL_openlib(L, "adcensus", funcs, 0); return 1; }
b091a89b2c48811f77db07474e6aac3befeb397b.cu
extern "C" { #include "lua.h" #include "lualib.h" #include "lauxlib.h" } #include "luaT.h" #include "THC.h" #include <stdio.h> #include <assert.h> #include <math_constants.h> #include <stdint.h> #include <unistd.h> #include <png++/image.hpp> #define TB 128 #define DISP_MAX 256 THCState* getCutorchState(lua_State* L) { lua_getglobal(L, "cutorch"); lua_getfield(L, -1, "getState"); lua_call(L, 0, 1); THCState *state = (THCState*) lua_touserdata(L, -1); lua_pop(L, 2); return state; } void checkCudaError(lua_State *L) { cudaError_t status = cudaPeekAtLastError(); if (status != cudaSuccess) { luaL_error(L, cudaGetErrorString(status)); } } #define COLOR_DIFF(x, i, j) (abs(x[i] - x[j])) THCudaTensor *new_tensor_like(THCState *state, THCudaTensor *x) { THCudaTensor *y = THCudaTensor_new(state); THCudaTensor_resizeAs(state, y, x); return y; } __device__ void sort(float *x, int n) { for (int i = 0; i < n - 1; i++) { int min = i; for (int j = i + 1; j < n; j++) { if (x[j] < x[min]) { min = j; } } float tmp = x[min]; x[min] = x[i]; x[i] = tmp; } } __global__ void ad(float *x0, float *x1, float *output, int size, int size2, int size3, int direction) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int d = id; int x = d % size3; d /= size3; int y = d % size2; d /= size2; d *= direction; float dist; if (0 <= x + d && x + d < size3) { int cnt = 0; dist = 0; for (int yy = y - 4; yy <= y + 4; yy++) { for (int xx = x - 4; xx <= x + 4; xx++) { if (0 <= xx && xx < size3 && 0 <= xx + d && xx + d < size3 && 0 <= yy && yy < size2) { int ind = yy * size3 + xx; dist += abs(x0[ind] - x1[ind + d]); cnt++; } } } dist /= cnt; } else { dist = CUDART_NAN; } output[id] = dist; } } int ad(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *x0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x1 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); int direction = luaL_checkinteger(L, 4); assert(direction == -1 || direction == 1); ad<<<(THCudaTensor_nElement(state, out) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3), direction); checkCudaError(L); return 0; } __global__ void census(float *x0, float *x1, float *output, int size, int num_channels, int size2, int size3, int direction) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int d = id; int x = d % size3; d /= size3; int y = d % size2; d /= size2; d *= direction; float dist; if (0 <= x + d && x + d < size3) { dist = 0; for (int i = 0; i < num_channels; i++) { int ind_p = (i * size2 + y) * size3 + x; for (int yy = y - 4; yy <= y + 4; yy++) { for (int xx = x - 4; xx <= x + 4; xx++) { if (0 <= xx && xx < size3 && 0 <= xx + d && xx + d < size3 && 0 <= yy && yy < size2) { int ind_q = (i * size2 + yy) * size3 + xx; if ((x0[ind_q] < x0[ind_p]) != (x1[ind_q + d] < x1[ind_p + d])) { dist++; } } else { dist++; } } } } dist /= num_channels; } else { dist = CUDART_NAN; } output[id] = dist; } } int census(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *x0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x1 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); int direction = luaL_checkinteger(L, 4); assert(direction == -1 || direction == 1); census<<<(THCudaTensor_nElement(state, out) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, x0, 1), THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3), direction); checkCudaError(L); return 0; } #if 0 __global__ void add_vol(float *vol, float *cnt, float *out, int size, int size1, int size2, int size3, float ratio) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int d = id; int x = d % size3; d /= size3; int y = d % size2; d /= size2; int lo = floor(d * ratio); int hi = lo + 1; float alpha = (d * ratio) - lo; assert(0 <= lo && hi < size1); float val = vol[(lo * size2 + y) * size3 + x] * (1 - alpha) + vol[(hi * size2 + y) * size3 + x] * alpha; if (!isnan(val) && cnt[id] > 0) { out[id] += val; cnt[id] += 1; } } } int add_vol(lua_State *L) { THCudaTensor *vol = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *cnt = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); float ratio = luaL_checknumber(L, 4); add_vol<<<(THCudaTensor_nElement(out) - 1) / TB + 1, TB>>>( THCudaTensor_data(vol), THCudaTensor_data(cnt), THCudaTensor_data(out), THCudaTensor_nElement(out), THCudaTensor_size(vol, 1), THCudaTensor_size(out, 2), THCudaTensor_size(out, 3), ratio); checkCudaError(L); return 0; } __global__ void rho(float *x, int size, float lambda) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { x[id] = 1 - exp(-x[id] / lambda); } } int rho(lua_State *L) { THCudaTensor *x = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); float lambda = luaL_checknumber(L, 2); rho<<<(THCudaTensor_nElement(x) - 1) / TB + 1, TB>>>( THCudaTensor_data(x), THCudaTensor_nElement(x), lambda); checkCudaError(L); return 0; } #endif __global__ void spatial_argmin(float *input, float *output, int size, int size1, int size23) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int dim23 = id % size23; int dim0 = id / size23; int argmin = 0; float min = CUDART_INF; for (int i = 0; i < size1; i++) { float val = input[(dim0 * size1 + i) * size23 + dim23]; if (val < min) { min = val; argmin = i; } } output[id] = argmin + 1; } } int spatial_argmin(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); spatial_argmin<<<(THCudaTensor_nElement(state, output) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, input), THCudaTensor_data(state, output), THCudaTensor_nElement(state, output), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, output, 3)); checkCudaError(L); return 0; } __global__ void cross(float *x0, float *out, int size, int dim2, int dim3, int L1, float tau1) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int dir = id; int x = dir % dim3; dir /= dim3; int y = dir % dim2; dir /= dim2; int dx = 0; int dy = 0; if (dir == 0) { dx = -1; } else if (dir == 1) { dx = 1; } else if (dir == 2) { dy = -1; } else if (dir == 3) { dy = 1; } else { assert(0); } int xx, yy, ind1, ind2, dist; ind1 = y * dim3 + x; for (xx = x + dx, yy = y + dy;;xx += dx, yy += dy) { if (xx < 0 || xx >= dim3 || yy < 0 || yy >= dim2) break; dist = max(abs(xx - x), abs(yy - y)); if (dist == 1) continue; ind2 = yy * dim3 + xx; /* rule 1 */ if (COLOR_DIFF(x0, ind1, ind2) >= tau1) break; /* rule 2 */ if (dist >= L1) break; } out[id] = dir <= 1 ? xx : yy; } } int cross(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *x0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); int L1 = luaL_checkinteger(L, 3); float tau1 = luaL_checknumber(L, 4); cross<<<(THCudaTensor_nElement(state, out) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, x0), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3), L1, tau1); checkCudaError(L); return 0; } __global__ void cbca(float *x0c, float *x1c, float *vol, float *out, int size, int dim2, int dim3, int direction) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int d = id; int x = d % dim3; d /= dim3; int y = d % dim2; d /= dim2; if (x + d * direction < 0 || x + d * direction >= dim3) { out[id] = vol[id]; } else { float sum = 0; int cnt = 0; int yy_s = max(x0c[(2 * dim2 + y) * dim3 + x], x1c[(2 * dim2 + y) * dim3 + x + d * direction]); int yy_t = min(x0c[(3 * dim2 + y) * dim3 + x], x1c[(3 * dim2 + y) * dim3 + x + d * direction]); for (int yy = yy_s + 1; yy < yy_t; yy++) { int xx_s = max(x0c[(0 * dim2 + yy) * dim3 + x], x1c[(0 * dim2 + yy) * dim3 + x + d * direction] - d * direction); int xx_t = min(x0c[(1 * dim2 + yy) * dim3 + x], x1c[(1 * dim2 + yy) * dim3 + x + d * direction] - d * direction); for (int xx = xx_s + 1; xx < xx_t; xx++) { float val = vol[(d * dim2 + yy) * dim3 + xx]; assert(!isnan(val)); sum += val; cnt++; } } assert(cnt > 0); out[id] = sum / cnt; assert(!isnan(out[id])); } } } int cbca(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *x0c = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x1c = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *vol_in = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *vol_out = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); int direction = luaL_checkinteger(L, 5); assert(direction == -1 or direction == 1); cbca<<<(THCudaTensor_nElement(state, vol_out) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, x0c), THCudaTensor_data(state, x1c), THCudaTensor_data(state, vol_in), THCudaTensor_data(state, vol_out), THCudaTensor_nElement(state, vol_out), THCudaTensor_size(state, vol_out, 2), THCudaTensor_size(state, vol_out, 3), direction); checkCudaError(L); return 0; } __global__ void sgm(float *x0, float *x1, float *vol, float *tmp, float *out, int dim1, int dim2, int dim3, float pi1, float pi2, float tau_so, float alpha1, float sgm_q1, float sgm_q2, int sgm_direction, int direction) { int x, y, dx, dy; dx = dy = 0; if (sgm_direction <= 1) { y = blockIdx.x * blockDim.x + threadIdx.x; if (y >= dim2) { return; } if (sgm_direction == 0) { x = 0; dx = 1; } else if (sgm_direction == 1) { x = dim3 - 1; dx = -1; } } else if (sgm_direction <= 3) { x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= dim3) { return; } if (sgm_direction == 2) { y = 0; dy = 1; } else if (sgm_direction == 3) { y = dim2 - 1; dy = -1; } } assert(dim1 <= 400); float tmp_curr_[400]; float tmp_prev_[400]; float *tmp_curr = tmp_curr_; float *tmp_prev = tmp_prev_; float min_prev = CUDART_INF; for (; 0 <= y && y < dim2 && 0 <= x && x < dim3; x += dx, y += dy) { float min_curr = CUDART_INF; for (int d = 0; d < dim1; d++) { int ind = (d * dim2 + y) * dim3 + x; if (x + d * direction < 0 || x + d * direction >= dim3 || y - dy < 0 || y - dy >= dim2 || x + d * direction - dx < 0 || x + d * direction - dx >= dim3 || x - dx < 0 || x - dx >= dim3) { out[ind] += vol[ind]; tmp_curr[d] = vol[ind]; } else { int ind2 = y * dim3 + x; float D1 = COLOR_DIFF(x0, ind2, ind2 - dy * dim3 - dx); float D2 = COLOR_DIFF(x1, ind2 + d * direction, ind2 + d * direction - dy * dim3 - dx); float P1, P2; if (D1 < tau_so && D2 < tau_so) { P1 = pi1; P2 = (pi1 * pi2); } else if (D1 > tau_so && D2 > tau_so) { P1 = pi1 / (sgm_q1 * sgm_q2); P2 = (pi1 * pi2) / (sgm_q1 * sgm_q2); } else { P1 = pi1 / sgm_q1; P2 = (pi1 * pi2) / sgm_q1; } assert(min_prev != CUDART_INF); float cost = min(tmp_prev[d], min_prev + P2); if (d > 0) { cost = min(cost, tmp_prev[d - 1] + (sgm_direction == 2 ? P1 / alpha1 : P1)); } if (d < dim1 - 1) { cost = min(cost, tmp_prev[d + 1] + (sgm_direction == 3 ? P1 / alpha1 : P1)); } float val = vol[ind] + cost - min_prev; out[ind] += val; tmp_curr[d] = val; } if (tmp_curr[d] < min_curr) { min_curr = tmp_curr[d]; } } min_prev = min_curr; float *swap = tmp_curr; tmp_curr = tmp_prev; tmp_prev = swap; } } int sgm(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *x0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x1 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *vol = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *tmp = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 5, "torch.CudaTensor"); float pi1 = luaL_checknumber(L, 6); float pi2 = luaL_checknumber(L, 7); float tau_so = luaL_checknumber(L, 8); float alpha1 = luaL_checknumber(L, 9); float sgm_q1 = luaL_checknumber(L, 10); float sgm_q2 = luaL_checknumber(L, 11); int direction = luaL_checknumber(L, 12); int dim1 = THCudaTensor_size(state, out, 1); int dim2 = THCudaTensor_size(state, out, 2); int dim3 = THCudaTensor_size(state, out, 3); for (int sgm_direction = 0; sgm_direction < 4; sgm_direction++) { int size = sgm_direction <= 1 ? dim2 : dim3; sgm<<<(size - 1) / TB + 1, TB>>>( THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, vol), THCudaTensor_data(state, tmp), THCudaTensor_data(state, out), dim1, dim2, dim3, pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, sgm_direction, direction); } checkCudaError(L); return 0; } #define INDEX(dim0, dim1, dim2, dim3) \ assert((dim1) >= 0 && (dim1) < size1 && (dim2) >= 0 && (dim2) < size2 && (dim3) >= 0 && (dim3) < size3), \ ((((dim0) * size1 + (dim1)) * size2 + (dim2)) * size3 + dim3) template <int sgm_direction> __global__ void sgm2(float *x0, float *x1, float *input, float *output, float *tmp, float pi1, float pi2, float tau_so, float alpha1, float sgm_q1, float sgm_q2, int direction, int size1, int size2, int size3, int step) { int x, y, dx, dy; int d = threadIdx.x; if (sgm_direction == 0) { /* right */ x = step; y = blockIdx.x; dx = 1; dy = 0; } else if (sgm_direction == 1) { /* left */ x = size2 - 1 - step; y = blockIdx.x; dx = -1; dy = 0; } else if (sgm_direction == 2) { /* down */ x = blockIdx.x; y = step; dx = 0; dy = 1; } else if (sgm_direction == 3) { /* up */ x = blockIdx.x; y = size1 - 1 - step; dx = 0; dy = -1; } if (y - dy < 0 || y - dy >= size1 || x - dx < 0 || x - dx >= size2) { float val = input[INDEX(0, y, x, d)]; output[INDEX(0, y, x, d)] += val; tmp[d * size2 + blockIdx.x] = val; return; } __shared__ float output_s[400], output_min[400]; output_s[d] = output_min[d] = tmp[d * size2 + blockIdx.x]; __syncthreads(); for (int i = 256; i > 0; i /= 2) { if (d < i && d + i < size3 && output_min[d + i] < output_min[d]) { output_min[d] = output_min[d + i]; } __syncthreads(); } int ind2 = y * size2 + x; float D1 = COLOR_DIFF(x0, ind2, ind2 - dy * size2 - dx); float D2; int xx = x + d * direction; if (xx < 0 || xx >= size2 || xx - dx < 0 || xx - dx >= size2) { D2 = 10; } else { D2 = COLOR_DIFF(x1, ind2 + d * direction, ind2 + d * direction - dy * size2 - dx); } float P1, P2; if (D1 < tau_so && D2 < tau_so) { P1 = pi1; P2 = pi2; } else if (D1 > tau_so && D2 > tau_so) { P1 = pi1 / (sgm_q1 * sgm_q2); P2 = pi2 / (sgm_q1 * sgm_q2); } else { P1 = pi1 / sgm_q1; P2 = pi2 / sgm_q1; } float cost = min(output_s[d], output_min[0] + P2); if (d - 1 >= 0) { cost = min(cost, output_s[d - 1] + (sgm_direction == 2 ? P1 / alpha1 : P1)); } if (d + 1 < size3) { cost = min(cost, output_s[d + 1] + (sgm_direction == 3 ? P1 / alpha1 : P1)); } float val = input[INDEX(0, y, x, d)] + cost - output_min[0]; output[INDEX(0, y, x, d)] += val; tmp[d * size2 + blockIdx.x] = val; } int sgm2(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *x0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x1 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); THCudaTensor *tmp = (THCudaTensor*)luaT_checkudata(L, 5, "torch.CudaTensor"); float pi1 = luaL_checknumber(L, 6); float pi2 = luaL_checknumber(L, 7); float tau_so = luaL_checknumber(L, 8); float alpha1 = luaL_checknumber(L, 9); float sgm_q1 = luaL_checknumber(L, 10); float sgm_q2 = luaL_checknumber(L, 11); int direction = luaL_checknumber(L, 12); int size1 = THCudaTensor_size(state, output, 1) * THCudaTensor_size(state, output, 3); int size2 = THCudaTensor_size(state, output, 2) * THCudaTensor_size(state, output, 3); int disp_max = THCudaTensor_size(state, output, 3); for (int step = 0; step < THCudaTensor_size(state, input, 2); step++) { sgm2<0><<<(size1 - 1) / disp_max + 1, disp_max>>>( THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, input), THCudaTensor_data(state, output), THCudaTensor_data(state, tmp), pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, direction, THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2), THCudaTensor_size(state, input, 3), step); } for (int step = 0; step < THCudaTensor_size(state, input, 2); step++) { sgm2<1><<<(size1 - 1) / disp_max + 1, disp_max>>>( THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, input), THCudaTensor_data(state, output), THCudaTensor_data(state, tmp), pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, direction, THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2), THCudaTensor_size(state, input, 3), step); } for (int step = 0; step < THCudaTensor_size(state, input, 1); step++) { sgm2<2><<<(size2 - 1) / disp_max + 1, disp_max>>>( THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, input), THCudaTensor_data(state, output), THCudaTensor_data(state, tmp), pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, direction, THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2), THCudaTensor_size(state, input, 3), step); } for (int step = 0; step < THCudaTensor_size(state, input, 1); step++) { sgm2<3><<<(size2 - 1) / disp_max + 1, disp_max>>>( THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, input), THCudaTensor_data(state, output), THCudaTensor_data(state, tmp), pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, direction, THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2), THCudaTensor_size(state, input, 3), step); } checkCudaError(L); return 0; } template <int sgm_direction> __global__ void sgm3(float *x0, float *x1, float *input, float *output, float pi1, float pi2, float tau_so, float alpha1, float sgm_q1, float sgm_q2, int direction, int size1, int size2, int size3, int step) { int x, y, dx, dy; int d = threadIdx.x; if (sgm_direction == 0) { /* right */ x = step; y = blockIdx.x; dx = 1; dy = 0; } else if (sgm_direction == 1) { /* left */ x = size2 - 1 - step; y = blockIdx.x; dx = -1; dy = 0; } else if (sgm_direction == 2) { /* down */ x = blockIdx.x; y = step; dx = 0; dy = 1; } else if (sgm_direction == 3) { /* up */ x = blockIdx.x; y = size1 - 1 - step; dx = 0; dy = -1; } if (y - dy < 0 || y - dy >= size1 || x - dx < 0 || x - dx >= size2) { output[INDEX(sgm_direction, y, x, d)] = input[INDEX(0, y, x, d)]; return; } __shared__ float output_s[400], output_min[400]; output_s[d] = output_min[d] = output[INDEX(sgm_direction, y - dy, x - dx, d)]; __syncthreads(); for (int i = 256; i > 0; i /= 2) { if (d < i && d + i < size3 && output_min[d + i] < output_min[d]) { output_min[d] = output_min[d + i]; } __syncthreads(); } int ind2 = y * size2 + x; float D1 = COLOR_DIFF(x0, ind2, ind2 - dy * size2 - dx); float D2; int xx = x + d * direction; if (xx < 0 || xx >= size2 || xx - dx < 0 || xx - dx >= size2) { D2 = 10; } else { D2 = COLOR_DIFF(x1, ind2 + d * direction, ind2 + d * direction - dy * size2 - dx); } float P1, P2; if (D1 < tau_so && D2 < tau_so) { P1 = pi1; P2 = pi2; } else if (D1 > tau_so && D2 > tau_so) { P1 = pi1 / (sgm_q1 * sgm_q2); P2 = pi2 / (sgm_q1 * sgm_q2); } else { P1 = pi1 / sgm_q1; P2 = pi2 / sgm_q1; } float cost = min(output_s[d], output_min[0] + P2); if (d - 1 >= 0) { cost = min(cost, output_s[d - 1] + (sgm_direction == 2 ? P1 / alpha1 : P1)); } if (d + 1 < size3) { cost = min(cost, output_s[d + 1] + (sgm_direction == 3 ? P1 / alpha1 : P1)); } output[INDEX(sgm_direction, y, x, d)] = input[INDEX(0, y, x, d)] + cost - output_min[0]; } int sgm3(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *x0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x1 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); float pi1 = luaL_checknumber(L, 5); float pi2 = luaL_checknumber(L, 6); float tau_so = luaL_checknumber(L, 7); float alpha1 = luaL_checknumber(L, 8); float sgm_q1 = luaL_checknumber(L, 9); float sgm_q2 = luaL_checknumber(L, 10); int direction = luaL_checknumber(L, 11); int size1 = THCudaTensor_size(state, output, 1) * THCudaTensor_size(state, output, 3); int size2 = THCudaTensor_size(state, output, 2) * THCudaTensor_size(state, output, 3); int disp_max = THCudaTensor_size(state, output, 3); for (int step = 0; step < THCudaTensor_size(state, input, 2); step++) { sgm3<0><<<(size1 - 1) / disp_max + 1, disp_max>>>( THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, input), THCudaTensor_data(state, output), pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, direction, THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2), THCudaTensor_size(state, input, 3), step); } for (int step = 0; step < THCudaTensor_size(state, input, 2); step++) { sgm3<1><<<(size1 - 1) / disp_max + 1, disp_max>>>( THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, input), THCudaTensor_data(state, output), pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, direction, THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2), THCudaTensor_size(state, input, 3), step); } for (int step = 0; step < THCudaTensor_size(state, input, 1); step++) { sgm3<2><<<(size2 - 1) / disp_max + 1, disp_max>>>( THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, input), THCudaTensor_data(state, output), pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, direction, THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2), THCudaTensor_size(state, input, 3), step); } for (int step = 0; step < THCudaTensor_size(state, input, 1); step++) { sgm3<3><<<(size2 - 1) / disp_max + 1, disp_max>>>( THCudaTensor_data(state, x0), THCudaTensor_data(state, x1), THCudaTensor_data(state, input), THCudaTensor_data(state, output), pi1, pi2, tau_so, alpha1, sgm_q1, sgm_q2, direction, THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2), THCudaTensor_size(state, input, 3), step); } checkCudaError(L); return 0; } __global__ void fliplr(float *in, float *out, int size, int dim3) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; out[id + dim3 - 2 * x - 1] = in[id]; } } int fliplr(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *in = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); fliplr<<<(THCudaTensor_nElement(state, out) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, in), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 3)); checkCudaError(L); return 0; } __global__ void outlier_detection(float *d0, float *d1, float *outlier, int size, int dim3, int disp_max) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int d0i = d0[id]; if (x - d0i < 0) { //assert(0); outlier[id] = 1; } else if (abs(d0[id] - d1[id - d0i]) < 1.1) { outlier[id] = 0; /* match */ } else { outlier[id] = 1; /* occlusion */ for (int d = 0; d < disp_max; d++) { if (x - d >= 0 && abs(d - d1[id - d]) < 1.1) { outlier[id] = 2; /* mismatch */ break; } } } } } int outlier_detection(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *d0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *d1 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *outlier = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); int disp_max = luaL_checkinteger(L, 4); outlier_detection<<<(THCudaTensor_nElement(state, d0) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, d0), THCudaTensor_data(state, d1), THCudaTensor_data(state, outlier), THCudaTensor_nElement(state, d0), THCudaTensor_size(state, d0, 3), disp_max); checkCudaError(L); return 0; } #if 0 __global__ void iterative_region_voting(float *d0, float *x0c, float *x1c, float *outlier, float *d0_out, float *outlier_out, int size, int dim2, int dim3, float tau_s, float tau_h, int disp_max) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; d0_out[id] = d0[id]; outlier_out[id] = outlier[id]; if (outlier[id] == 0) return; assert(disp_max < DISP_MAX); int hist[DISP_MAX]; for (int i = 0; i < disp_max; i++) { hist[i] = 0; } int yy_s = x0c[(2 * dim2 + y) * dim3 + x]; int yy_t = x0c[(3 * dim2 + y) * dim3 + x]; for (int yy = yy_s + 1; yy < yy_t; yy++) { int xx_s = x0c[(0 * dim2 + yy) * dim3 + x]; int xx_t = x0c[(1 * dim2 + yy) * dim3 + x]; for (int xx = xx_s + 1; xx < xx_t; xx++) { if (outlier[yy * dim3 + xx] == 0) { hist[(int)d0[yy * dim3 + xx]]++; } } } int cnt = 0; int max_i = 0; for (int i = 0; i < disp_max; i++) { cnt += hist[i]; if (hist[i] > hist[max_i]) { max_i = i; } } if (cnt > tau_s && (float)hist[max_i] / cnt > tau_h) { outlier_out[id] = 0; d0_out[id] = max_i; } } } int iterative_region_voting(lua_State *L) { THCudaTensor *d0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x0c = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *x1c = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *outlier = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); float tau_s = luaL_checknumber(L, 5); float tau_h = luaL_checknumber(L, 6); int disp_max = luaL_checkinteger(L, 7); int iterations = luaL_checkinteger(L, 8); THCudaTensor *d0_tmp = new_tensor_like(state, d0); THCudaTensor *outlier_tmp = new_tensor_like(state, outlier); assert(iterations % 2 == 0); for (int i = 0; i < iterations; i++) { iterative_region_voting<<<(THCudaTensor_nElement(d0) - 1) / TB + 1, TB>>>( THCudaTensor_data(i % 2 == 0 ? d0 : d0_tmp), THCudaTensor_data(x0c), THCudaTensor_data(x1c), THCudaTensor_data(i % 2 == 0 ? outlier : outlier_tmp), THCudaTensor_data(i % 2 == 0 ? d0_tmp : d0), THCudaTensor_data(i % 2 == 0 ? outlier_tmp : outlier), THCudaTensor_nElement(d0), THCudaTensor_size(d0, 2), THCudaTensor_size(d0, 3), tau_s, tau_h, disp_max); } checkCudaError(L); return 0; } #endif __global__ void interpolate_mismatch(float *d0, float *outlier, float *out, int size, int dim2, int dim3) { const float dir[] = { 0 , 1, -0.5, 1, -1 , 1, -1 , 0.5, -1 , 0, -1 , -0.5, -1 , -1, -0.5, -1, 0 , -1, 0.5 , -1, 1 , -1, 1 , -0.5, 1 , 0, 1 , 0.5, 1 , 1, 0.5 , 1 }; int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { if (outlier[id] != 2) { out[id] = d0[id]; return; } float vals[16]; int vals_size = 0; int x = id % dim3; int y = id / dim3; for (int d = 0; d < 16; d++) { float dx = dir[2 * d]; float dy = dir[2 * d + 1]; float xx = x; float yy = y; int xx_i = round(xx); int yy_i = round(yy); while (0 <= yy_i && yy_i < dim2 && 0 <= xx_i && xx_i < dim3 && outlier[yy_i * dim3 + xx_i] == 2) { xx += dx; yy += dy; xx_i = round(xx); yy_i = round(yy); } int ind = yy_i * dim3 + xx_i; if (0 <= yy_i && yy_i < dim2 && 0 <= xx_i && xx_i < dim3) { assert(outlier[ind] != 2); vals[vals_size++] = d0[ind]; } } assert(vals_size > 0); sort(vals, vals_size); out[id] = vals[vals_size / 2]; } } int interpolate_mismatch(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *d0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *outlier = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *out = new_tensor_like(state, d0); interpolate_mismatch<<<(THCudaTensor_nElement(state, out) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, d0), THCudaTensor_data(state, outlier), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3)); checkCudaError(L); luaT_pushudata(L, out, "torch.CudaTensor"); return 1; } __global__ void interpolate_occlusion(float *d0, float *outlier, float *out, int size, int dim3) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { if (outlier[id] != 1) { out[id] = d0[id]; return; } int x = id % dim3; int dx = 0; while (x + dx >= 0 && outlier[id + dx] != 0) { dx--; } if (x + dx < 0) { dx = 0; while (x + dx < dim3 && outlier[id + dx] != 0) { dx++; } } if (x + dx < dim3) { out[id] = d0[id + dx]; } else { out[id] = d0[id]; } } } int interpolate_occlusion(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *d0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *outlier = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *out = new_tensor_like(state, d0); interpolate_occlusion<<<(THCudaTensor_nElement(state, out) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, d0), THCudaTensor_data(state, outlier), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 3) ); checkCudaError(L); luaT_pushudata(L, out, "torch.CudaTensor"); return 1; } #if 0 __global__ void sobel(float *x, float *g1, float *g2, int size, int dim2, int dim3) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int xx = id % dim3; int yy = id / dim3; if (1 <= yy && yy < dim2 - 1 && 1 <= xx && xx < dim3 - 1) { g1[id] = -x[id-dim3-1] +x[id-dim3+1] -2*x[id-1] +2*x[id+1] -x[id+dim3-1] +x[id+dim3+1]; g2[id] = x[id-dim3-1] +2*x[id-dim3] +x[id-dim3+1] -x[id+dim3-1] -2*x[id+dim3] -x[id+dim3+1]; } else { g1[id] = 0; g2[id] = 0; } } } int sobel(lua_State *L) { THCudaTensor *x = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *g1 = new_tensor_like(x); THCudaTensor *g2 = new_tensor_like(x); sobel<<<(THCudaTensor_nElement(x) - 1) / TB + 1, TB>>>( THCudaTensor_data(x), THCudaTensor_data(g1), THCudaTensor_data(g2), THCudaTensor_nElement(x), THCudaTensor_size(x, 2), THCudaTensor_size(x, 3) ); checkCudaError(L); luaT_pushudata(L, g1, "torch.CudaTensor"); luaT_pushudata(L, g2, "torch.CudaTensor"); return 2; } __global__ void depth_discontinuity_adjustment(float *d0, float *dg1, float *dg2, float *xg1, float *xg2, float *out, int size, int dim3, float tau_e) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { if (abs(dg1[id]) > tau_e) { out[id] = xg1[id - 1] > xg1[id + 1] ? d0[id - 1] : d0[id + 1]; } else if (abs(dg2[id]) > tau_e) { out[id] = xg2[id - dim3] > xg2[id + dim3] ? d0[id - dim3] : d0[id + dim3]; } else { out[id] = d0[id]; } } } int depth_discontinuity_adjustment(lua_State *L) { THCudaTensor *d0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *dg1 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *dg2 = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *xg1 = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); THCudaTensor *xg2 = (THCudaTensor*)luaT_checkudata(L, 5, "torch.CudaTensor"); float tau_e = luaL_checknumber(L, 6); THCudaTensor *out = new_tensor_like(d0); depth_discontinuity_adjustment<<<(THCudaTensor_nElement(out) - 1) / TB + 1, TB>>>( THCudaTensor_data(d0), THCudaTensor_data(dg1), THCudaTensor_data(dg2), THCudaTensor_data(xg1), THCudaTensor_data(xg2), THCudaTensor_data(out), THCudaTensor_nElement(out), THCudaTensor_size(out, 3), tau_e); checkCudaError(L); luaT_pushudata(L, out, "torch.CudaTensor"); return 1; } #endif __global__ void subpixel_enchancement(float *d0, float *c2, float *out, int size, int dim23, int disp_max) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int d = d0[id]; out[id] = d; if (1 <= d && d < disp_max - 1) { float cn = c2[(d - 1) * dim23 + id]; float cz = c2[d * dim23 + id]; float cp = c2[(d + 1) * dim23 + id]; float denom = 2 * (cp + cn - 2 * cz); if (denom > 1e-5) { out[id] = d - min(1.0, max(-1.0, (cp - cn) / denom)); } } } } int subpixel_enchancement(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *d0 = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *c2 = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); int disp_max = luaL_checkinteger(L, 3); THCudaTensor *out = new_tensor_like(state, d0); subpixel_enchancement<<<(THCudaTensor_nElement(state, out) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, d0), THCudaTensor_data(state, c2), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 2) * THCudaTensor_size(state, out, 3), disp_max); checkCudaError(L); luaT_pushudata(L, out, "torch.CudaTensor"); return 1; } __global__ void mean2d(float *img, float *kernel, float *out, int size, int kernel_radius, int dim2, int dim3, float alpha2) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; float sum = 0; float cnt = 0; int i = 0; for (int xx = x - kernel_radius; xx <= x + kernel_radius; xx++) { for (int yy = y - kernel_radius; yy <= y + kernel_radius; yy++, i++) { if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2 && abs(img[yy * dim3 + xx] - img[y * dim3 + x]) < alpha2) { sum += img[yy * dim3 + xx] * kernel[i]; cnt += kernel[i]; } } } out[id] = sum / cnt; } } int mean2d(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *img = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *kernel = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); float alpha2 = luaL_checknumber(L, 3); THCudaTensor *out = new_tensor_like(state, img); assert(THCudaTensor_size(state, kernel, 0) % 2 == 1); mean2d<<<(THCudaTensor_nElement(state, out) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, img), THCudaTensor_data(state, kernel), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, kernel, 0) / 2, THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3), alpha2); checkCudaError(L); luaT_pushudata(L, out, "torch.CudaTensor"); return 1; } __global__ void Normalize_get_norm_(float *input, float *norm, int size1, int size23, int size023) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size023) { int dim23 = id % size23; int dim0 = id / size23; float sum = 0.0; for (int dim1 = 0; dim1 < size1; dim1++) { float x = input[(dim0 * size1 + dim1) * size23 + dim23]; sum += x * x; } norm[dim0 * size23 + dim23] = sum + 1e-5; } } __global__ void Normalize_forward_(float *input, float *norm, float *output, int size23, int size123, int size0123) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size0123) { int dim23 = id % size23; int dim0 = (id / size123); output[id] = input[id] / sqrtf(norm[dim0 * size23 + dim23]); } } int Normalize_forward(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *norm = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); Normalize_get_norm_<<<(THCudaTensor_nElement(state, norm) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, input), THCudaTensor_data(state, norm), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), THCudaTensor_nElement(state, norm)); Normalize_forward_<<<(THCudaTensor_nElement(state, output) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, input), THCudaTensor_data(state, norm), THCudaTensor_data(state, output), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), THCudaTensor_size(state, input, 1) * THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), THCudaTensor_nElement(state, output)); checkCudaError(L); return 0; } __global__ void Normalize_backward_input_(float *grad_output, float *input, float *norm, float *grad_input, int size1, int size23, int size0123) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size0123) { int dim0 = id; int dim23 = dim0 % size23; dim0 /= size23; int dim1 = dim0 % size1; dim0 /= size1; float denom = powf(norm[dim0 * size23 + dim23], 1.5); float deriv = (norm[dim0 * size23 + dim23] - input[id] * input[id]) / denom * grad_output[id]; float sum = 0; for (int dim1_ = 0; dim1_ < size1; dim1_++) { if (dim1_ != dim1) { int ind = (dim0 * size1 + dim1_) * size23 + dim23; sum += input[ind] * grad_output[ind]; } } grad_input[id] = deriv - sum * input[id] / denom; } } int Normalize_backward_input(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *grad_output = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *norm = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *grad_input = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); Normalize_backward_input_<<<(THCudaTensor_nElement(state, input) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, grad_output), THCudaTensor_data(state, input), THCudaTensor_data(state, norm), THCudaTensor_data(state, grad_input), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), THCudaTensor_nElement(state, input)); checkCudaError(L); return 0; } struct Margin2_functor { float margin; __host__ Margin2_functor(float margin_) : margin(margin_) {}; __device__ float forward(float pos, float neg) { return fmaxf(0, neg - pos + margin); } __device__ float backward(float pos, float neg, int which) { float f = neg - pos + margin; if (which == 0) { return -1. * (f > 0); } else { return f > 0; } } }; struct Margin2_squared_functor { float margin; __host__ Margin2_squared_functor(float margin_) : margin(margin_) {}; __device__ float forward(float pos, float neg) { float d = fmaxf(0, neg - pos + margin); return d * d * 0.5; } __device__ float backward(float pos, float neg, int which) { float f = neg - pos + margin; if (which == 0) { return -f * (f > 0); } else { return f * (f > 0); } } }; template <class Op> __global__ void Margin2_(float *input, float *tmp, float *gradInput, float margin, Op op, int size) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { float pos = input[id * 2]; float neg = input[id * 2 + 1]; tmp[id] = op.forward(pos, neg); gradInput[id * 2] = op.backward(pos, neg, 0); gradInput[id * 2 + 1] = op.backward(pos, neg, 1); } } int Margin2(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *tmp = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); float margin = luaL_checknumber(L, 4); int pow = luaL_checkinteger(L, 5); if (pow == 1) { Margin2_<<<(THCudaTensor_nElement(state, tmp) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, input), THCudaTensor_data(state, tmp), THCudaTensor_data(state, gradInput), margin, Margin2_functor(margin), THCudaTensor_nElement(state, tmp)); } else if (pow == 2) { Margin2_<<<(THCudaTensor_nElement(state, tmp) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, input), THCudaTensor_data(state, tmp), THCudaTensor_data(state, gradInput), margin, Margin2_squared_functor(margin), THCudaTensor_nElement(state, tmp)); } checkCudaError(L); return 0; } __global__ void StereoJoin_(float *input_L, float *input_R, float *output_L, float *output_R, int size1_input, int size1, int size3, int size23) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size23) { int dim3 = id % size3; assert(size1_input <= 128); float L_cache[128]; for (int i = 0; i < size1_input; i++) { L_cache[i] = input_L[i * size23 + id]; } for (int d = 0; d < size1; d++) { if (dim3 - d >= 0) { float sum = 0; for (int i = 0; i < size1_input; i++) { sum -= L_cache[i] * input_R[i * size23 + id - d]; } output_L[d * size23 + id] = sum; output_R[d * size23 + id - d] = sum; } } } } int StereoJoin(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input_L = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *input_R = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output_L = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *output_R = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); int size23 = THCudaTensor_size(state, output_L, 2) * THCudaTensor_size(state, output_L, 3); StereoJoin_<<<(size23 - 1) / TB + 1, TB>>>( THCudaTensor_data(state, input_L), THCudaTensor_data(state, input_R), THCudaTensor_data(state, output_L), THCudaTensor_data(state, output_R), THCudaTensor_size(state, input_L, 1), THCudaTensor_size(state, output_L, 1), THCudaTensor_size(state, output_L, 3), size23); checkCudaError(L); return 0; } __global__ void StereoL2R_(float *vol_L, float *vol_R, int size2, int size3, int size) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int dim3 = id % size3; int dim1 = id / (size2 * size3); if (dim3 + dim1 >= size3) { vol_R[id] = CUDART_INF; } else { vol_R[id] = vol_L[id + dim1]; } } } int StereoL2R(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *vol_L = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *vol_R = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); StereoL2R_<<<(THCudaTensor_nElement(state, vol_L) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, vol_L), THCudaTensor_data(state, vol_R), THCudaTensor_size(state, vol_R, 2), THCudaTensor_size(state, vol_R, 3), THCudaTensor_nElement(state, vol_R)); checkCudaError(L); return 0; } __global__ void bilateral_filter(float *img, float *out, int size, int dim2, int dim3, int kernel_radius, float sigma1, float sigma2) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; float sum = 0; float cnt = 0; for (int i = -kernel_radius; i <= kernel_radius; i++) { for (int j = -kernel_radius; j <= kernel_radius; j++) { int yy = y + i; int xx = x + j; if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2) { float color_diff = img[yy * dim3 + xx] - img[y * dim3 + x]; float v1 = exp(-(i * i + j * j) / (2 * sigma1 * sigma1)); float v2 = exp(-(color_diff * color_diff) / (2 * sigma2 * sigma2)); sum += img[yy * dim3 + xx] * v1 * v2; cnt += v1 * v2; } } } out[id] = sum / cnt; } } int bilateral_filter(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *img = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); float sigma1 = luaL_checknumber(L, 2); float sigma2 = luaL_checknumber(L, 3); THCudaTensor *out = new_tensor_like(state, img); int kernel_radius = ceil(min(sigma1, sigma2) * 3); bilateral_filter<<<(THCudaTensor_nElement(state, out) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, img), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3), kernel_radius, sigma1, sigma2); checkCudaError(L); luaT_pushudata(L, out, "torch.CudaTensor"); return 1; } __global__ void median2d(float *img, float *out, int size, int dim2, int dim3, int kernel_radius) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; float xs[11 * 11]; int xs_size = 0; for (int xx = x - kernel_radius; xx <= x + kernel_radius; xx++) { for (int yy = y - kernel_radius; yy <= y + kernel_radius; yy++) { if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2) { xs[xs_size++] = img[yy * dim3 + xx]; } } } sort(xs, xs_size); out[id] = xs[xs_size / 2]; } } int median2d(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *img = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); int kernel_size = luaL_checkinteger(L, 2); THCudaTensor *out = new_tensor_like(state, img); assert(kernel_size % 2 == 1); assert(kernel_size <= 11); median2d<<<(THCudaTensor_nElement(state, out) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, img), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3), kernel_size / 2); checkCudaError(L); luaT_pushudata(L, out, "torch.CudaTensor"); return 1; } #if 0 int histogram(lua_State *L) { THFloatTensor *img = (THFloatTensor*)luaT_checkudata(L, 1, "torch.FloatTensor"); THIntTensor *hist = THIntTensor_newWithSize1d(256); THIntTensor_zero(hist); float *img_data = THFloatTensor_data(img); int *hist_data = THIntTensor_data(hist); for (int i = 0; i < THFloatTensor_size(img, 2) * THFloatTensor_size(img, 3); i++) { assert(0 <= img_data[i] && img_data[i] < 256); hist_data[(int)img_data[i]]++; } luaT_pushudata(L, hist, "torch.IntTensor"); return 1; } int histogram_equalization_map(lua_State *L) { THIntTensor *cdf = (THIntTensor*)luaT_checkudata(L, 1, "torch.IntTensor"); THIntTensor *map = THIntTensor_new(); THIntTensor_resizeAs(map, cdf); int *cdf_data = THIntTensor_data(cdf); int max = cdf_data[255]; int min = cdf_data[0]; for (int i = 0; i < 256; i++) { if (cdf_data[i]) { min = cdf_data[i]; break; } } int *map_data = THIntTensor_data(map); for (int i = 0; i < 256; i++) { map_data[i] = round((double)(cdf_data[i] - min) / (max - min) * 255); } luaT_pushudata(L, map, "torch.IntTensor"); return 1; } int map_intensities(lua_State *L) { THFloatTensor *img = (THFloatTensor*)luaT_checkudata(L, 1, "torch.FloatTensor"); THIntTensor *map = (THIntTensor*)luaT_checkudata(L, 2, "torch.IntTensor"); THFloatTensor *out = THFloatTensor_new(); THFloatTensor_resizeAs(out, img); float *img_data = THFloatTensor_data(img); float *out_data = THFloatTensor_data(out); int *map_data = THIntTensor_data(map); for (int i = 0; i < THFloatTensor_size(img, 2) * THFloatTensor_size(img, 3); i++) { out_data[i] = map_data[(int)img_data[i]]; } luaT_pushudata(L, out, "torch.FloatTensor"); return 1; } #endif int readPNG16(lua_State *L) { THFloatTensor *img_ = (THFloatTensor*)luaT_checkudata(L, 1, "torch.FloatTensor"); const char* fname = luaL_checkstring(L, 2); float *img = THFloatTensor_data(img_); png::image<png::gray_pixel_16> image(fname); int width = image.get_width(); int height = image.get_height(); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { uint16_t val = image.get_pixel(j, i); img[i * width + j] = val == 0 ? 0.0 : ((float)val)/256.0; } } return 0; } int writePNG16(lua_State *L) { THFloatTensor *img_ = (THFloatTensor*)luaT_checkudata(L, 1, "torch.FloatTensor"); int height = luaL_checkinteger(L, 2); int width = luaL_checkinteger(L, 3); const char* fname = luaL_checkstring(L, 4); float *img = THFloatTensor_data(img_); png::image<png::gray_pixel_16> image(width, height); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { float val = img[i * width + j]; image.set_pixel(j, i, (uint16_t)(val < 1e-5 ? 0 : val * 256)); } } image.write(fname); return 0; } int writePFM(lua_State *L) { THFloatTensor *img_ = (THFloatTensor*)luaT_checkudata(L, 1, "torch.FloatTensor"); const char* fname = luaL_checkstring(L, 2); int height = THFloatTensor_size(img_, 0); int width = THFloatTensor_size(img_, 1); FILE *f = fopen(fname, "w"); fprintf(f, "Pf\n%d %d\n-0.003922\n", width, height); fwrite(THFloatTensor_data(img_), 4, height * width, f); fclose(f); return 0; } __global__ void remove_nonvisible(float *y, int size, int size3) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % size3; if (y[id] >= x) { y[id] = 0; } } } int remove_nonvisible(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *y = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); remove_nonvisible<<<(THCudaTensor_nElement(state, y) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, y), THCudaTensor_nElement(state, y), THCudaTensor_size(state, y, 3)); checkCudaError(L); return 0; } __global__ void remove_occluded(float *y, int size, int size3) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % size3; for (int i = 1; x + i < size3; i++) { if (i - y[id + i] < -y[id]) { y[id] = 0; break; } } } } int remove_occluded(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *y = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); remove_occluded<<<(THCudaTensor_nElement(state, y) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, y), THCudaTensor_nElement(state, y), THCudaTensor_size(state, y, 3)); checkCudaError(L); return 0; } __global__ void remove_white(float *x, float *y, int size) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { if (x[id] == 255) { y[id] = 0; } } } int remove_white(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *x = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *y = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); remove_white<<<(THCudaTensor_nElement(state, y) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, x), THCudaTensor_data(state, y), THCudaTensor_nElement(state, y)); checkCudaError(L); return 0; } __global__ void copy_fill(float *in, float *out, int size, int in_size2, int in_size3, int out_size2, int out_size3) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int out_x = id % out_size3; int out_y = id / out_size3; int in_x = out_x - (out_size3 - in_size3) / 2; int in_y = out_y - (out_size2 - in_size2) / 2; int x = min(in_size3 - 1, max(0, in_x)); int y = min(in_size2 - 1, max(0, in_y)); out[id] = in[y * in_size3 + x]; } } int copy_fill(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *in = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); copy_fill<<<(THCudaTensor_nElement(state, out) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, in), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, in, 2), THCudaTensor_size(state, in, 3), THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3)); checkCudaError(L); luaT_pushudata(L, out, "torch.CudaTensor"); return 1; } void memcpy2d(float *dst, float *src, int x, int y, int win_radius, int height, int width) { assert(0 <= x - win_radius); assert(x + win_radius <= width); assert(0 <= y - win_radius); assert(y + win_radius <= height); for (int i = -win_radius; i <= win_radius; i++) { memcpy(dst, src + (y + i) * width + x - win_radius, (win_radius * 2 + 1) * sizeof(float)); dst += win_radius * 2 + 1; } } double random_uniform() { return ((double)rand()/(double)RAND_MAX); } int random_int(int a, int b) { assert(a <= b); return floor(random_uniform() * (b - a + 1) + a); } double random_exp(double lambda) { double u = random_uniform(); return -log(u) / lambda; } int subset_dataset(lua_State *L) { THLongTensor *index_ = (THLongTensor*)luaT_checkudata(L, 1, "torch.LongTensor"); THFloatTensor *input_ = (THFloatTensor*)luaT_checkudata(L, 2, "torch.FloatTensor"); THFloatTensor *output_ = (THFloatTensor*)luaT_checkudata(L, 3, "torch.FloatTensor"); long *index = THLongTensor_data(index_); float *input = THFloatTensor_data(input_); float *output = THFloatTensor_data(output_); const int N = 200; int set[N]; for (int i = 0; i < N; i++) { set[i] = 0; } for (int i = 0; i < THLongTensor_nElement(index_); i++) { assert(index[i] < N); set[index[i]] = 1; } int i = 0; for (int j = 0; j < THFloatTensor_size(input_, 0); j++) { int im = input[j * 4]; if (set[im]) { for (int k = 0; k < 4; k++) { output[i * 4 + k] = input[j * 4 + k]; } i++; } } lua_pushinteger(L, i); return 1; } int make_dataset2(lua_State *L) { THFloatTensor *disp_ = (THFloatTensor*)luaT_checkudata(L, 1, "torch.FloatTensor"); THFloatTensor *nnz_ = (THFloatTensor*)luaT_checkudata(L, 2, "torch.FloatTensor"); int img = luaL_checkinteger(L, 3); int t = luaL_checkinteger(L, 4); float *disp = THFloatTensor_data(disp_); float *nnz = THFloatTensor_data(nnz_); int height = THFloatTensor_size(disp_, 2); int width = THFloatTensor_size(disp_, 3); int nnz_size = THFloatTensor_nElement(nnz_); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { if (disp[i * width + j] > 0.5) { assert(t * 4 + 4 <= nnz_size); nnz[t * 4 + 0] = img; nnz[t * 4 + 1] = i; nnz[t * 4 + 2] = j; nnz[t * 4 + 3] = disp[i * width + j]; t++; } } } lua_pushinteger(L, t); return 1; } int make_dataset(lua_State *L) { THFloatTensor *x0_ = (THFloatTensor*)luaT_checkudata(L, 1, "torch.FloatTensor"); THFloatTensor *x1_ = (THFloatTensor*)luaT_checkudata(L, 2, "torch.FloatTensor"); THFloatTensor *disp_ = (THFloatTensor*)luaT_checkudata(L, 3, "torch.FloatTensor"); THFloatTensor *x_ = (THFloatTensor*)luaT_checkudata(L, 4, "torch.FloatTensor"); THFloatTensor *y_ = (THFloatTensor*)luaT_checkudata(L, 5, "torch.FloatTensor"); int t = luaL_checkinteger(L, 6); float thr_true = luaL_checknumber(L, 7); float thr_false_l = luaL_checknumber(L, 8); float thr_false_u = luaL_checknumber(L, 9); float *x0 = THFloatTensor_data(x0_); float *x1 = THFloatTensor_data(x1_); float *disp = THFloatTensor_data(disp_); float *x = THFloatTensor_data(x_); float *y = THFloatTensor_data(y_); int height = THFloatTensor_size(x0_, 2); int width = THFloatTensor_size(x0_, 3); int win_size = THFloatTensor_size(x_, 2); int x_size = THFloatTensor_size(x_, 0); assert(win_size % 2 == 1); int win_radius = (win_size - 1) / 2; x += t * 2 * win_size * win_size; for (int i = win_radius; i < height - win_radius; i++) { for (int j = win_radius; j < width - win_radius; j++) { if (disp[i * width + j] > 0.5) { int d_true = round(disp[i * width + j]); if (0 <= j - d_true - win_radius) { /* true offset */ int delta = 0; for (;;) { delta = random_int(-thr_true, thr_true); if (0 <= j - d_true + delta - win_radius && j - d_true + delta + win_radius < width) { break; } } assert(t < x_size); memcpy2d(x, x0, j, i, win_radius, height, width); x += win_size * win_size; memcpy2d(x, x1, j - d_true + delta, i, win_radius, height, width); x += win_size * win_size; y[t] = 1; t++; /* false offset */ delta = 0; for (;;) { delta = random_int(thr_false_l, thr_false_u); if (random_uniform() < 0.5) { delta = -delta; } if (0 <= j - d_true + delta - win_radius && j - d_true + delta + win_radius < width) { break; } } assert(t < x_size); memcpy2d(x, x0, j, i, win_radius, height, width); x += win_size * win_size; memcpy2d(x, x1, j - d_true + delta, i, win_radius, height, width); x += win_size * win_size; y[t] = 0; t++; } } } } lua_pushinteger(L, t); return 1; } /* CPU implementation */ int grey2jet(lua_State *L) { THDoubleTensor *grey_img = (THDoubleTensor*)luaT_checkudata(L, 1, "torch.DoubleTensor"); THDoubleTensor *col_img = (THDoubleTensor*)luaT_checkudata(L, 2, "torch.DoubleTensor"); assert(grey_img->nDimension == 2); if (3 * THDoubleTensor_nElement(grey_img) != THDoubleTensor_nElement(col_img)) { luaL_error(L, "Size mismatch"); } int height = THDoubleTensor_size(grey_img, 0); int width = THDoubleTensor_size(grey_img, 1); double *gray_data = THDoubleTensor_data(grey_img); double *col_data = THDoubleTensor_data(col_img); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { double val = gray_data[i * width + j] * 4; double r = 0, g = 0, b = 0; if (-0.1 <= val && val < 0.5) { r = 0; g = 0; b = 0.5 + val; } else if (0.5 <= val && val < 1.5) { r = 0; g = val - 0.5; b = 1; } else if (1.5 <= val && val < 2.5) { r = val - 1.5; g = 1; b = 1 - (val - 1.5); } else if (2.5 <= val && val < 3.5) { r = 1; g = 1 - (val - 2.5); b = 0; } else if (3.5 <= val && val <= 4.1) { r = 1 - (val - 3.5); g = 0; b = 0; } else { printf("val = %f\n", val); assert(0); } col_data[(0 * height + i) * width + j] = r; col_data[(1 * height + i) * width + j] = g; col_data[(2 * height + i) * width + j] = b; } } return 0; } int version(lua_State* L) { printf("libadcensus version 0.0.5\n"); return 0; } static const struct luaL_Reg funcs[] = { {"ad", ad}, {"census", census}, {"cross", cross}, {"cbca", cbca}, {"sgm", sgm}, {"sgm2", sgm2}, {"sgm3", sgm3}, {"outlier_detection", outlier_detection}, {"interpolate_occlusion", interpolate_occlusion}, {"interpolate_mismatch", interpolate_mismatch}, {"subpixel_enchancement", subpixel_enchancement}, {"copy_fill", copy_fill}, {"median2d", median2d}, {"mean2d", mean2d}, {"Normalize_forward", Normalize_forward}, {"Normalize_backward_input", Normalize_backward_input}, {"Margin2", Margin2}, {"StereoJoin", StereoJoin}, {"StereoL2R", StereoL2R}, {"subset_dataset", subset_dataset}, {"make_dataset", make_dataset}, {"make_dataset2", make_dataset2}, {"remove_nonvisible", remove_nonvisible}, {"remove_occluded", remove_occluded}, {"remove_white", remove_white}, {"readPNG16", readPNG16}, {"writePNG16", writePNG16}, {"writePFM", writePFM}, {"grey2jet", grey2jet}, {"spatial_argmin", spatial_argmin}, {"version", version}, {NULL, NULL} }; #include "SpatialLogSoftMax.cu" extern "C" int luaopen_libadcensus(lua_State *L) { srand(42); cunn_SpatialLogSoftMax_init(L); luaL_openlib(L, "adcensus", funcs, 0); return 1; }
e79be5e24138b6aa31bf37a309434f21b44f0ecf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/zsymmetrize_tiles.cu, normal z -> d, Mon Jun 25 18:24:13 2018 @author Mark Gates */ #include "magma_internal.h" #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ceil(m/NB) x ntile. Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void dsymmetrize_tiles_lower( int m, double *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; double *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; double *dAend = dA + i*ldda; // end at diagonal dA(i,i) while( dA < dAend ) { *dAT = MAGMA_D_CONJ(*dA); // upper := lower dA += ldda; dAT += 1; } *dA = MAGMA_D_MAKE( MAGMA_D_REAL(*dA), 0 ); // make diagonal real } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void dsymmetrize_tiles_upper( int m, double *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; double *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; double *dAend = dA + i*ldda; // end at diagonal dA(i,i) while( dA < dAend ) { *dA = MAGMA_D_CONJ(*dAT); // lower := upper dA += ldda; dAT += 1; } *dA = MAGMA_D_MAKE( MAGMA_D_REAL(*dA), 0 ); // make diagonal real } } /***************************************************************************//** Purpose ------- DSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa, to make some blocks of dA into general representations of a symmetric block. In Complex, it sets the diagonal to be Real. This processes NTILE blocks, typically the diagonal blocks. Each block is offset by mstride rows and nstride columns from the previous block. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows & columns of each square block of dA. M >= 0. @param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) The matrix dA. N = m + nstride*(ntile-1). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)). @param[in] ntile INTEGER Number of blocks to symmetrize. ntile >= 0. @param[in] mstride INTEGER Row offset from start of one block to start of next block. mstride >= 0. Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles from overlapping. @param[in] nstride INTEGER Column offset from start of one block to start of next block. nstride >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_symmetrize_batched *******************************************************************************/ extern "C" void magmablas_dsymmetrize_tiles( magma_uplo_t uplo, magma_int_t m, magmaDouble_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m + mstride*(ntile-1)) ) info = -5; else if ( ntile < 0 ) info = -6; else if ( mstride < 0 ) info = -7; else if ( nstride < 0 ) info = -8; else if ( mstride < m && nstride < m ) // only one must be >= m. info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || ntile == 0 ) return; dim3 threads( NB, 1 ); dim3 grid( magma_ceildiv( m, NB ), ntile ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( uplo == MagmaUpper ) { hipLaunchKernelGGL(( dsymmetrize_tiles_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda, mstride, nstride ); } else { hipLaunchKernelGGL(( dsymmetrize_tiles_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda, mstride, nstride ); } }
e79be5e24138b6aa31bf37a309434f21b44f0ecf.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/zsymmetrize_tiles.cu, normal z -> d, Mon Jun 25 18:24:13 2018 @author Mark Gates */ #include "magma_internal.h" #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ceil(m/NB) x ntile. Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void dsymmetrize_tiles_lower( int m, double *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; double *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; double *dAend = dA + i*ldda; // end at diagonal dA(i,i) while( dA < dAend ) { *dAT = MAGMA_D_CONJ(*dA); // upper := lower dA += ldda; dAT += 1; } *dA = MAGMA_D_MAKE( MAGMA_D_REAL(*dA), 0 ); // make diagonal real } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void dsymmetrize_tiles_upper( int m, double *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; double *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; double *dAend = dA + i*ldda; // end at diagonal dA(i,i) while( dA < dAend ) { *dA = MAGMA_D_CONJ(*dAT); // lower := upper dA += ldda; dAT += 1; } *dA = MAGMA_D_MAKE( MAGMA_D_REAL(*dA), 0 ); // make diagonal real } } /***************************************************************************//** Purpose ------- DSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa, to make some blocks of dA into general representations of a symmetric block. In Complex, it sets the diagonal to be Real. This processes NTILE blocks, typically the diagonal blocks. Each block is offset by mstride rows and nstride columns from the previous block. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows & columns of each square block of dA. M >= 0. @param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) The matrix dA. N = m + nstride*(ntile-1). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)). @param[in] ntile INTEGER Number of blocks to symmetrize. ntile >= 0. @param[in] mstride INTEGER Row offset from start of one block to start of next block. mstride >= 0. Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles from overlapping. @param[in] nstride INTEGER Column offset from start of one block to start of next block. nstride >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_symmetrize_batched *******************************************************************************/ extern "C" void magmablas_dsymmetrize_tiles( magma_uplo_t uplo, magma_int_t m, magmaDouble_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m + mstride*(ntile-1)) ) info = -5; else if ( ntile < 0 ) info = -6; else if ( mstride < 0 ) info = -7; else if ( nstride < 0 ) info = -8; else if ( mstride < m && nstride < m ) // only one must be >= m. info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || ntile == 0 ) return; dim3 threads( NB, 1 ); dim3 grid( magma_ceildiv( m, NB ), ntile ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( uplo == MagmaUpper ) { dsymmetrize_tiles_upper <<< grid, threads, 0, queue->cuda_stream() >>> ( m, dA, ldda, mstride, nstride ); } else { dsymmetrize_tiles_lower <<< grid, threads, 0, queue->cuda_stream() >>> ( m, dA, ldda, mstride, nstride ); } }
abd76a0f12f8b61a14cb0f859f24d508874c9ea8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kSquare(float* gData, float* target, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x) target[i] = gData[i] * gData[i]; }
abd76a0f12f8b61a14cb0f859f24d508874c9ea8.cu
#include "includes.h" __global__ void kSquare(float* gData, float* target, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x) target[i] = gData[i] * gData[i]; }
a2faf060fcc1fd0283eafe6fde71f1b21d4ffb10.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void meanMatrix(double *dMatrix, double *dMean, int dSize, int *d_mutex){ __shared__ double cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; double temp = 0; while (tid < dSize) { temp += dMatrix[tid]; tid += blockDim.x * gridDim.x; } // set the cache values cache[cacheIndex] = temp; // synchronize threads in this block __syncthreads(); int i = blockDim.x/2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if(cacheIndex == 0){ while(atomicCAS(d_mutex,0,1) != 0); //lock *dMean += cache[0]; atomicExch(d_mutex, 0); //unlock *dMean = dMean[0]/dSize; } }
a2faf060fcc1fd0283eafe6fde71f1b21d4ffb10.cu
#include "includes.h" __global__ void meanMatrix(double *dMatrix, double *dMean, int dSize, int *d_mutex){ __shared__ double cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; double temp = 0; while (tid < dSize) { temp += dMatrix[tid]; tid += blockDim.x * gridDim.x; } // set the cache values cache[cacheIndex] = temp; // synchronize threads in this block __syncthreads(); int i = blockDim.x/2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if(cacheIndex == 0){ while(atomicCAS(d_mutex,0,1) != 0); //lock *dMean += cache[0]; atomicExch(d_mutex, 0); //unlock *dMean = dMean[0]/dSize; } }
d070caee06eed3e98f3dad192946601d672e5722.hip
// !!! This is a file automatically generated by hipify!!! #include "stdio.h" #include "stdlib.h" #include "string.h" #include "errno.h" #include "pthread.h" #include "math.h" #define THREADS 4 // Recibe dos matrices y las multiplica, la matriz c debe estar inicializada en 0 typedef struct tdata { unsigned long ** a; unsigned long ** b; unsigned long ** c; int N; int idx; int total; }tdata; void * matrixMult(void * data) { tdata * toprocess = (tdata*)data; int i, j, k; int N = toprocess->N; int idx = toprocess->idx; int total = toprocess->total; for(i = 0; i < N; ++i) { if((idx*N)+i < total) { for(j = 0; j < total; ++j) { for(k = 0; k < total; ++k) { toprocess->c[(idx*N)+i][j] += toprocess->a[(idx*N)+i][k]*toprocess->b[k][j]; } } } } pthread_exit(NULL); } int main(int argc, char * argv[]) { // Declaracion de variables int i, j, cont, N, linesToDo; unsigned long **a, **b, **c; char *p; hipEvent_t start, stop, startTotal, stopTotal; float tiempo, tiempoTotal; pthread_t * threads; tdata * data; // Inicializar medidas de tiempo hipEventCreate(&start); hipEventCreate(&startTotal); hipEventCreate(&stop); hipEventCreate(&stopTotal); // Comenzar a medir tiempo total hipEventRecord(startTotal, 0); if(argc >= 2) { N = (int)strtol(argv[1], &p, 10); if (*p != '\0' && errno != 0) { printf("Primer parametro debe ser un numero.\n"); printf("Error: %s\n", strerror(errno)); return 1; } } else { printf("No hay valor de N de entrada, ingrese valor.\n"); return 1; } // Reservar memoria para arreglo de arreglos a = (unsigned long**)malloc(N*sizeof(unsigned long*)); b = (unsigned long**)malloc(N*sizeof(unsigned long*)); c = (unsigned long**)malloc(N*sizeof(unsigned long*)); // Reservar memoria para cada arreglo for(i = 0; i < N; ++i) { *(a+i) = (unsigned long*)malloc(N*sizeof(unsigned long)); *(b+i) = (unsigned long*)malloc(N*sizeof(unsigned long)); *(c+i) = (unsigned long*)malloc(N*sizeof(unsigned long)); } // Resrevar memoria para threads, datos de cada thread y llenarlos linesToDo = ceil((double)N/THREADS); data = (tdata*)malloc(THREADS*sizeof(tdata)); threads = (pthread_t*)malloc(THREADS*sizeof(pthread_t)); for(i = 0; i < THREADS; ++i) { (data+i)->a = a; (data+i)->b = b; (data+i)->c = c; (data+i)->N = linesToDo; (data+i)->idx = i; (data+i)->total = N; } // LLenar matrices con valores prueba for(i = 0; i < N; ++i) { for(j = 0; j < N; ++j) { a[i][j] = rand() % 1000 + 11; b[i][j] = rand() % 1000 + 11; c[i][j] = 0; ++cont; } } // Medir tiempo de calculo hipEventRecord(start, 0); // Hacer la multiplicacion for(i = 0; i < THREADS; ++i) { if(pthread_create(threads+i, NULL, matrixMult, data+i)) { printf("Error al crear thread"); return 1; } } for(i = 0; i < THREADS; ++i) { pthread_join(*(threads+i), NULL); } // Finalizar tiempo de calculo hipEventRecord(stop, 0); hipEventSynchronize(stop); // Finalizar tiempo total hipEventRecord(stopTotal, 0); hipEventSynchronize(stopTotal); // calcular tiempos hipEventElapsedTime(&tiempo, start, stop); hipEventElapsedTime(&tiempoTotal, startTotal, stopTotal); // Imprimir que matrices se van a multiplicar if(argc >= 3 && !strcmp(argv[2], "2")) { printf("matriz a:\n"); for(i = 0; i < N; ++i) { for(j = 0; j < N; ++j) { printf("%lu\t", a[i][j]); } printf("\n"); } printf("\nmatriz b:\n"); for(i = 0; i < N; ++i) { for(j = 0; j < N; ++j) { printf("%lu\t", b[i][j]); } printf("\n"); } } // Imprimir resultado si el argumento es 1 o 2 if(argc >= 3 && (!strcmp(argv[2], "2") || !strcmp(argv[2], "1"))) { printf("\nmatriz resultado:\n"); for(i = 0; i < N; ++i) { for(j = 0; j < N; ++j) { printf("%lu\t", c[i][j]); } printf("\n"); } } printf("Tiempo total: %f ms.\n", tiempoTotal); printf("Tiempo de calculo: %f ms.\n", tiempo); // Liberar memoria for(i = 0; i < N; ++i) { free(*(a+i)); free(*(b+i)); free(*(c+i)); } free(a); free(b); free(c); free(data); free(threads); return 0; }
d070caee06eed3e98f3dad192946601d672e5722.cu
#include "stdio.h" #include "stdlib.h" #include "string.h" #include "errno.h" #include "pthread.h" #include "math.h" #define THREADS 4 // Recibe dos matrices y las multiplica, la matriz c debe estar inicializada en 0 typedef struct tdata { unsigned long ** a; unsigned long ** b; unsigned long ** c; int N; int idx; int total; }tdata; void * matrixMult(void * data) { tdata * toprocess = (tdata*)data; int i, j, k; int N = toprocess->N; int idx = toprocess->idx; int total = toprocess->total; for(i = 0; i < N; ++i) { if((idx*N)+i < total) { for(j = 0; j < total; ++j) { for(k = 0; k < total; ++k) { toprocess->c[(idx*N)+i][j] += toprocess->a[(idx*N)+i][k]*toprocess->b[k][j]; } } } } pthread_exit(NULL); } int main(int argc, char * argv[]) { // Declaracion de variables int i, j, cont, N, linesToDo; unsigned long **a, **b, **c; char *p; cudaEvent_t start, stop, startTotal, stopTotal; float tiempo, tiempoTotal; pthread_t * threads; tdata * data; // Inicializar medidas de tiempo cudaEventCreate(&start); cudaEventCreate(&startTotal); cudaEventCreate(&stop); cudaEventCreate(&stopTotal); // Comenzar a medir tiempo total cudaEventRecord(startTotal, 0); if(argc >= 2) { N = (int)strtol(argv[1], &p, 10); if (*p != '\0' && errno != 0) { printf("Primer parametro debe ser un numero.\n"); printf("Error: %s\n", strerror(errno)); return 1; } } else { printf("No hay valor de N de entrada, ingrese valor.\n"); return 1; } // Reservar memoria para arreglo de arreglos a = (unsigned long**)malloc(N*sizeof(unsigned long*)); b = (unsigned long**)malloc(N*sizeof(unsigned long*)); c = (unsigned long**)malloc(N*sizeof(unsigned long*)); // Reservar memoria para cada arreglo for(i = 0; i < N; ++i) { *(a+i) = (unsigned long*)malloc(N*sizeof(unsigned long)); *(b+i) = (unsigned long*)malloc(N*sizeof(unsigned long)); *(c+i) = (unsigned long*)malloc(N*sizeof(unsigned long)); } // Resrevar memoria para threads, datos de cada thread y llenarlos linesToDo = ceil((double)N/THREADS); data = (tdata*)malloc(THREADS*sizeof(tdata)); threads = (pthread_t*)malloc(THREADS*sizeof(pthread_t)); for(i = 0; i < THREADS; ++i) { (data+i)->a = a; (data+i)->b = b; (data+i)->c = c; (data+i)->N = linesToDo; (data+i)->idx = i; (data+i)->total = N; } // LLenar matrices con valores prueba for(i = 0; i < N; ++i) { for(j = 0; j < N; ++j) { a[i][j] = rand() % 1000 + 11; b[i][j] = rand() % 1000 + 11; c[i][j] = 0; ++cont; } } // Medir tiempo de calculo cudaEventRecord(start, 0); // Hacer la multiplicacion for(i = 0; i < THREADS; ++i) { if(pthread_create(threads+i, NULL, matrixMult, data+i)) { printf("Error al crear thread"); return 1; } } for(i = 0; i < THREADS; ++i) { pthread_join(*(threads+i), NULL); } // Finalizar tiempo de calculo cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Finalizar tiempo total cudaEventRecord(stopTotal, 0); cudaEventSynchronize(stopTotal); // calcular tiempos cudaEventElapsedTime(&tiempo, start, stop); cudaEventElapsedTime(&tiempoTotal, startTotal, stopTotal); // Imprimir que matrices se van a multiplicar if(argc >= 3 && !strcmp(argv[2], "2")) { printf("matriz a:\n"); for(i = 0; i < N; ++i) { for(j = 0; j < N; ++j) { printf("%lu\t", a[i][j]); } printf("\n"); } printf("\nmatriz b:\n"); for(i = 0; i < N; ++i) { for(j = 0; j < N; ++j) { printf("%lu\t", b[i][j]); } printf("\n"); } } // Imprimir resultado si el argumento es 1 o 2 if(argc >= 3 && (!strcmp(argv[2], "2") || !strcmp(argv[2], "1"))) { printf("\nmatriz resultado:\n"); for(i = 0; i < N; ++i) { for(j = 0; j < N; ++j) { printf("%lu\t", c[i][j]); } printf("\n"); } } printf("Tiempo total: %f ms.\n", tiempoTotal); printf("Tiempo de calculo: %f ms.\n", tiempo); // Liberar memoria for(i = 0; i < N; ++i) { free(*(a+i)); free(*(b+i)); free(*(c+i)); } free(a); free(b); free(c); free(data); free(threads); return 0; }
eb436e39c77956705554b0967951796bfe373bf6.hip
// !!! This is a file automatically generated by hipify!!! #include "mycuda.h" #include "mycuda_public.h" #include <hip/hip_runtime.h> #include <device_launch_parameters.h> using array = std::vector<int>; using array2d = std::vector<array>; __global__ void ScanKernel(const int* a, int size) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < size) printf("i=%d, in[i]=%d\n", i, in[i]); } void Tmp(const cuda::ArrayInt& dev_in, cuda::ArrayInt& dev_out, int size) { int numBlocks; int numThreads; if (size < MAX_THREADS) { numBlocks = 1; numThreads = size; } else { numBlocks = (size + MAX_THREADS - 1) / MAX_THREADS; numThreads = MAX_THREADS; } hipLaunchKernelGGL(( TmpKernel) , dim3(numBlocks), dim3(numThreads) , 0, 0, dev_in.data() + 3, dev_out.data(), size - 3); #ifdef _DEBUG cuda::CheckLastError(); cuda::DeviceSynchronize(); #endif } int testTmp() { int size = 24; std::vector<int> a(size); for (int i = 0; i < size; ++i) a[i] = i; std::vector<int> b(size); cuda::ArrayInt dev_a(a), dev_b(size); Tmp(dev_a, dev_b, size); cuda::Memcpy(b, dev_b, size); /*for (int i = 0; i < size; ++i) std::cout << "b" << i << '=' << b[i] << '\n';*/ return 0; } int test_sum() { array b(1); array a = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; cuda::ArrayInt dev_a(a), dev_b(1); cuda::Sum(dev_a, dev_b, a.size()); cuda::Memcpy(b, dev_b, 1); std::cout << b[0] << '\n'; array a2(1030, 1); cuda::ArrayInt dev_a2(a2); cuda::Sum(dev_a2, dev_b, a2.size()); cuda::Memcpy(b, dev_b, 1); std::cout << b[0] << '\n'; array a3 = { 6, 5, 4, 5, 7, 8, 10 }; cuda::ArrayInt dev_a3(a3); int sum3 = cuda::Sum(dev_a3.data() + 1, a3.size() - 1); std::cout << sum3 << '\n'; /* 39 */ return 0; } int test_min() { array b(1); cuda::ArrayInt dev_b(1); array a(128); for (int i = 0; i < (int)a.size(); ++i) a[i] = int(a.size()) - i + 1000000; cuda::ArrayInt dev_a(a); cuda::Min(dev_a, dev_b, a.size()); cuda::Memcpy(b, dev_b, 1); std::cout << b[0] << '\n'; array a2(64, 9); a2[31] = 7; a2[33] = 10; cuda::ArrayInt dev_a2(a2); cuda::Min(dev_a2, dev_b, a2.size()); cuda::Memcpy(b, dev_b, 1); std::cout << b[0] << '\n'; cuda::CheckLastError(); array a3 = { 6, 5, 4, 5, 13, 7, 8, 10, 2 }; cuda::ArrayInt dev_a3(a3); cuda::Min(dev_a3, dev_b, a3.size()); cuda::Memcpy(b, dev_b, 1); std::cout << b[0] << '\n'; cuda::CheckLastError(); return 0; } int test_min_index() { array b(1); cuda::ArrayInt dev_b(1); array a(128, 0); a[50] = 1; cuda::ArrayInt dev_a(a); cuda::MinIndex(dev_a, dev_b, a.size()); cuda::Memcpy(b, dev_b, 1); std::cout << b[0] << " Expected: " << 50 << '\n'; array a2(60, 0); a2[31] = 7; a2[33] = 10; cuda::ArrayInt dev_a2(a2); cuda::MinIndex(dev_a2, dev_b, a2.size()); cuda::Memcpy(b, dev_b, 1); std::cout << b[0] << " Expected: " << 31 << '\n'; cuda::CheckLastError(); array a3 = { 0, 0, 1, 2, 3, 4, 5 }; cuda::ArrayInt dev_a3(a3); cuda::MinIndex(dev_a3, dev_b, a3.size()); cuda::Memcpy(b, dev_b, 1); std::cout << b[0] << " Expected: " << 2 << '\n'; cuda::CheckLastError(); return 0; } int test_echelon() { Timer timer; array2d m = { {1, 3, 4}, {1, 2, 4}, {3}, {0, 1, 2, 3, 4} }; array2d rref = cuda::EchelonCuda(m); for (size_t i = 0; i < rref.size(); ++i) { for (size_t j = 0; j < rref[i].size(); ++j) std::cout << rref[i][j] << ", "; std::cout << '\n'; } std::cout << '\n'; /* hipDeviceReset must be called before exiting in order for profiling and ** tracing tools such as Nsight and Visual Profiler to show complete traces. */ if (hipDeviceReset() != hipSuccess) { std::cerr << "hipDeviceReset failed!"; return 1; } return 0; } int main() { return test_echelon(); }
eb436e39c77956705554b0967951796bfe373bf6.cu
#include "mycuda.h" #include "mycuda_public.h" #include <cuda_runtime.h> #include <device_launch_parameters.h> using array = std::vector<int>; using array2d = std::vector<array>; __global__ void ScanKernel(const int* a, int size) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < size) printf("i=%d, in[i]=%d\n", i, in[i]); } void Tmp(const cuda::ArrayInt& dev_in, cuda::ArrayInt& dev_out, int size) { int numBlocks; int numThreads; if (size < MAX_THREADS) { numBlocks = 1; numThreads = size; } else { numBlocks = (size + MAX_THREADS - 1) / MAX_THREADS; numThreads = MAX_THREADS; } TmpKernel <<< numBlocks, numThreads >>> (dev_in.data() + 3, dev_out.data(), size - 3); #ifdef _DEBUG cuda::CheckLastError(); cuda::DeviceSynchronize(); #endif } int testTmp() { int size = 24; std::vector<int> a(size); for (int i = 0; i < size; ++i) a[i] = i; std::vector<int> b(size); cuda::ArrayInt dev_a(a), dev_b(size); Tmp(dev_a, dev_b, size); cuda::Memcpy(b, dev_b, size); /*for (int i = 0; i < size; ++i) std::cout << "b" << i << '=' << b[i] << '\n';*/ return 0; } int test_sum() { array b(1); array a = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; cuda::ArrayInt dev_a(a), dev_b(1); cuda::Sum(dev_a, dev_b, a.size()); cuda::Memcpy(b, dev_b, 1); std::cout << b[0] << '\n'; array a2(1030, 1); cuda::ArrayInt dev_a2(a2); cuda::Sum(dev_a2, dev_b, a2.size()); cuda::Memcpy(b, dev_b, 1); std::cout << b[0] << '\n'; array a3 = { 6, 5, 4, 5, 7, 8, 10 }; cuda::ArrayInt dev_a3(a3); int sum3 = cuda::Sum(dev_a3.data() + 1, a3.size() - 1); std::cout << sum3 << '\n'; /* 39 */ return 0; } int test_min() { array b(1); cuda::ArrayInt dev_b(1); array a(128); for (int i = 0; i < (int)a.size(); ++i) a[i] = int(a.size()) - i + 1000000; cuda::ArrayInt dev_a(a); cuda::Min(dev_a, dev_b, a.size()); cuda::Memcpy(b, dev_b, 1); std::cout << b[0] << '\n'; array a2(64, 9); a2[31] = 7; a2[33] = 10; cuda::ArrayInt dev_a2(a2); cuda::Min(dev_a2, dev_b, a2.size()); cuda::Memcpy(b, dev_b, 1); std::cout << b[0] << '\n'; cuda::CheckLastError(); array a3 = { 6, 5, 4, 5, 13, 7, 8, 10, 2 }; cuda::ArrayInt dev_a3(a3); cuda::Min(dev_a3, dev_b, a3.size()); cuda::Memcpy(b, dev_b, 1); std::cout << b[0] << '\n'; cuda::CheckLastError(); return 0; } int test_min_index() { array b(1); cuda::ArrayInt dev_b(1); array a(128, 0); a[50] = 1; cuda::ArrayInt dev_a(a); cuda::MinIndex(dev_a, dev_b, a.size()); cuda::Memcpy(b, dev_b, 1); std::cout << b[0] << " Expected: " << 50 << '\n'; array a2(60, 0); a2[31] = 7; a2[33] = 10; cuda::ArrayInt dev_a2(a2); cuda::MinIndex(dev_a2, dev_b, a2.size()); cuda::Memcpy(b, dev_b, 1); std::cout << b[0] << " Expected: " << 31 << '\n'; cuda::CheckLastError(); array a3 = { 0, 0, 1, 2, 3, 4, 5 }; cuda::ArrayInt dev_a3(a3); cuda::MinIndex(dev_a3, dev_b, a3.size()); cuda::Memcpy(b, dev_b, 1); std::cout << b[0] << " Expected: " << 2 << '\n'; cuda::CheckLastError(); return 0; } int test_echelon() { Timer timer; array2d m = { {1, 3, 4}, {1, 2, 4}, {3}, {0, 1, 2, 3, 4} }; array2d rref = cuda::EchelonCuda(m); for (size_t i = 0; i < rref.size(); ++i) { for (size_t j = 0; j < rref[i].size(); ++j) std::cout << rref[i][j] << ", "; std::cout << '\n'; } std::cout << '\n'; /* cudaDeviceReset must be called before exiting in order for profiling and ** tracing tools such as Nsight and Visual Profiler to show complete traces. */ if (cudaDeviceReset() != cudaSuccess) { std::cerr << "cudaDeviceReset failed!"; return 1; } return 0; } int main() { return test_echelon(); }
4d3934196664fbdceb6ea661b93f8e58adb75d34.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <rocblas.h> using std::cout; int BLOCK_MAX_THREADS = 512; double random(float start, float end) { float random = ((float) rand()) / (float) RAND_MAX; float r = random * (end - start); return start + r; } void createArrayWithRandomValues(float* inputArray, int size) { srand(time(NULL)); int i = 0; while(i<size) { inputArray[i] = random(0,10); i++; } } __global__ void MatrixMultKernel(float* d_A, float* d_B, float* d_C, int rowsA, int columnsB, int denom) { int index = threadIdx.x + blockIdx.x * blockDim.x; int size = rowsA * columnsB; if(index < size) { float dotProduct = 0; int rowIndex = index / columnsB; int columnIndex = index % columnsB; int rowIndexA = rowIndex * denom; for(int i=0; i<denom; i++) { float row = d_A[rowIndexA+i]; float column = d_B[columnIndex + (columnsB * i)]; int prod = row * column; dotProduct = dotProduct + prod; } d_C[index] = dotProduct; } __syncthreads(); } void gpu_blas_mmul(const float *A, const float *B, float *C, const int m, const int k, const int n) { int lda=m,ldb=k,ldc=m; const float alf = 1; const float bet = 0; const float *alpha = &alf; const float *beta = &bet; hipblasHandle_t handle; hipblasCreate(&handle); hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, B, lda, A, ldb, beta, C, ldc); hipblasDestroy(handle); } int main() { float cuda_elapsed_time, cuda_elapsed_time2; hipEvent_t cuda_start, cuda_start2, cuda_stop, cuda_stop2; hipEventCreate(&cuda_start); hipEventCreate(&cuda_stop); hipEventCreate(&cuda_start2); hipEventCreate(&cuda_stop2); int rowsA = 30000; int columnsA = 200; int sizeA = rowsA*columnsA; int rowsB = 20000; int columnsB = 400; int sizeB = rowsB*columnsB; int sizeC = rowsA*columnsB; float* matrixA = new float[sizeA]; float* matrixB = new float[sizeB]; float* matrixC = new float[sizeC]; createArrayWithRandomValues(matrixA, sizeA); createArrayWithRandomValues(matrixB, sizeB); /* uncomment to see inputs cout<<"Matrix A: \n"; for(int i=0; i<sizeA; i++) { cout<<matrixA[i]<<" "; } cout<<"\n"; cout<<"Matrix B: \n"; for(int i=0; i<sizeB; i++) { cout<<matrixB[i]<<" "; } cout<<"\n"; */ float* dmA; float* dmB; float* dmC; hipMalloc((void**) &dmA, sizeof(float)*sizeA); hipMemcpy(dmA, matrixA, sizeof(float)*sizeA, hipMemcpyHostToDevice); hipMalloc((void**) &dmB, sizeof(float)*sizeB); hipMemcpy(dmB, matrixB, sizeof(float)*sizeB, hipMemcpyHostToDevice); hipMalloc((void**) &dmC, sizeof(float)*sizeC); hipMemcpy(dmC, matrixC, sizeof(float)*sizeC, hipMemcpyHostToDevice); int spb = sizeC + (BLOCK_MAX_THREADS - 1); int numBlocks = spb / BLOCK_MAX_THREADS; hipEventRecord(cuda_start, 0); hipLaunchKernelGGL(( MatrixMultKernel), dim3(numBlocks), dim3(BLOCK_MAX_THREADS), 0, 0, dmA, dmB, dmC, rowsA, columnsB, columnsA); hipEventRecord(cuda_stop, 0); hipMemcpy(matrixC, dmC, sizeof(float)*sizeC, hipMemcpyDeviceToHost); /*uncomment to check result for(int i=0; i<sizeC; i++) { cout<<matrixC[i]<<" "; } cout<<"\n\n"; */ hipFree(dmA); hipFree(dmB); hipFree(dmC); float* mmA; float* mmB; float* mmC; float* res = new float[sizeC]; hipMalloc((void**) &mmA, sizeof(float)*sizeA); hipMemcpy(mmA, matrixA, sizeof(float)*sizeA, hipMemcpyHostToDevice); hipMalloc((void**) &mmB, sizeof(float)*sizeB); hipMemcpy(mmB, matrixB, sizeof(float)*sizeB, hipMemcpyHostToDevice); hipMalloc((void**) &mmC, sizeof(float)*sizeC); hipMemcpy(mmC, res, sizeof(float)*sizeC, hipMemcpyHostToDevice); hipEventRecord(cuda_start2, 0); gpu_blas_mmul(mmA, mmB, mmC, columnsB, columnsA, columnsB); hipEventRecord(cuda_stop2, 0); hipMemcpy(res, mmC ,sizeof(float)*sizeC,hipMemcpyDeviceToHost); /* uncomment to check result for(int i=0; i<sizeC; i++) { cout<<res[i]<<" "; } cout<<"\n"; */ float mse = 0.0; for (int i = 0; i < sizeC; ++i) { mse = mse + pow(res[i] - matrixC[i], 2); } mse = mse / sizeC; cout << "MSE: " << mse << std::endl; hipEventElapsedTime(&cuda_elapsed_time, cuda_start, cuda_stop); hipEventElapsedTime(&cuda_elapsed_time2, cuda_start2, cuda_stop2); printf("Algorithm only cuda clock cycles for regular : %f\n", cuda_elapsed_time); printf("Algorithm only cuda clock cycles for cublas : %f\n", cuda_elapsed_time2); free(matrixA); free(matrixB); free(matrixC); free(res); hipFree(mmA); hipFree(mmB); hipFree(mmC); return 0; }
4d3934196664fbdceb6ea661b93f8e58adb75d34.cu
#include <iostream> #include <cublas_v2.h> using std::cout; int BLOCK_MAX_THREADS = 512; double random(float start, float end) { float random = ((float) rand()) / (float) RAND_MAX; float r = random * (end - start); return start + r; } void createArrayWithRandomValues(float* inputArray, int size) { srand(time(NULL)); int i = 0; while(i<size) { inputArray[i] = random(0,10); i++; } } __global__ void MatrixMultKernel(float* d_A, float* d_B, float* d_C, int rowsA, int columnsB, int denom) { int index = threadIdx.x + blockIdx.x * blockDim.x; int size = rowsA * columnsB; if(index < size) { float dotProduct = 0; int rowIndex = index / columnsB; int columnIndex = index % columnsB; int rowIndexA = rowIndex * denom; for(int i=0; i<denom; i++) { float row = d_A[rowIndexA+i]; float column = d_B[columnIndex + (columnsB * i)]; int prod = row * column; dotProduct = dotProduct + prod; } d_C[index] = dotProduct; } __syncthreads(); } void gpu_blas_mmul(const float *A, const float *B, float *C, const int m, const int k, const int n) { int lda=m,ldb=k,ldc=m; const float alf = 1; const float bet = 0; const float *alpha = &alf; const float *beta = &bet; cublasHandle_t handle; cublasCreate(&handle); cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, B, lda, A, ldb, beta, C, ldc); cublasDestroy(handle); } int main() { float cuda_elapsed_time, cuda_elapsed_time2; cudaEvent_t cuda_start, cuda_start2, cuda_stop, cuda_stop2; cudaEventCreate(&cuda_start); cudaEventCreate(&cuda_stop); cudaEventCreate(&cuda_start2); cudaEventCreate(&cuda_stop2); int rowsA = 30000; int columnsA = 200; int sizeA = rowsA*columnsA; int rowsB = 20000; int columnsB = 400; int sizeB = rowsB*columnsB; int sizeC = rowsA*columnsB; float* matrixA = new float[sizeA]; float* matrixB = new float[sizeB]; float* matrixC = new float[sizeC]; createArrayWithRandomValues(matrixA, sizeA); createArrayWithRandomValues(matrixB, sizeB); /* uncomment to see inputs cout<<"Matrix A: \n"; for(int i=0; i<sizeA; i++) { cout<<matrixA[i]<<" "; } cout<<"\n"; cout<<"Matrix B: \n"; for(int i=0; i<sizeB; i++) { cout<<matrixB[i]<<" "; } cout<<"\n"; */ float* dmA; float* dmB; float* dmC; cudaMalloc((void**) &dmA, sizeof(float)*sizeA); cudaMemcpy(dmA, matrixA, sizeof(float)*sizeA, cudaMemcpyHostToDevice); cudaMalloc((void**) &dmB, sizeof(float)*sizeB); cudaMemcpy(dmB, matrixB, sizeof(float)*sizeB, cudaMemcpyHostToDevice); cudaMalloc((void**) &dmC, sizeof(float)*sizeC); cudaMemcpy(dmC, matrixC, sizeof(float)*sizeC, cudaMemcpyHostToDevice); int spb = sizeC + (BLOCK_MAX_THREADS - 1); int numBlocks = spb / BLOCK_MAX_THREADS; cudaEventRecord(cuda_start, 0); MatrixMultKernel<<<numBlocks, BLOCK_MAX_THREADS>>>(dmA, dmB, dmC, rowsA, columnsB, columnsA); cudaEventRecord(cuda_stop, 0); cudaMemcpy(matrixC, dmC, sizeof(float)*sizeC, cudaMemcpyDeviceToHost); /*uncomment to check result for(int i=0; i<sizeC; i++) { cout<<matrixC[i]<<" "; } cout<<"\n\n"; */ cudaFree(dmA); cudaFree(dmB); cudaFree(dmC); float* mmA; float* mmB; float* mmC; float* res = new float[sizeC]; cudaMalloc((void**) &mmA, sizeof(float)*sizeA); cudaMemcpy(mmA, matrixA, sizeof(float)*sizeA, cudaMemcpyHostToDevice); cudaMalloc((void**) &mmB, sizeof(float)*sizeB); cudaMemcpy(mmB, matrixB, sizeof(float)*sizeB, cudaMemcpyHostToDevice); cudaMalloc((void**) &mmC, sizeof(float)*sizeC); cudaMemcpy(mmC, res, sizeof(float)*sizeC, cudaMemcpyHostToDevice); cudaEventRecord(cuda_start2, 0); gpu_blas_mmul(mmA, mmB, mmC, columnsB, columnsA, columnsB); cudaEventRecord(cuda_stop2, 0); cudaMemcpy(res, mmC ,sizeof(float)*sizeC,cudaMemcpyDeviceToHost); /* uncomment to check result for(int i=0; i<sizeC; i++) { cout<<res[i]<<" "; } cout<<"\n"; */ float mse = 0.0; for (int i = 0; i < sizeC; ++i) { mse = mse + pow(res[i] - matrixC[i], 2); } mse = mse / sizeC; cout << "MSE: " << mse << std::endl; cudaEventElapsedTime(&cuda_elapsed_time, cuda_start, cuda_stop); cudaEventElapsedTime(&cuda_elapsed_time2, cuda_start2, cuda_stop2); printf("Algorithm only cuda clock cycles for regular : %f\n", cuda_elapsed_time); printf("Algorithm only cuda clock cycles for cublas : %f\n", cuda_elapsed_time2); free(matrixA); free(matrixB); free(matrixC); free(res); cudaFree(mmA); cudaFree(mmB); cudaFree(mmC); return 0; }
9952a23d52caf82a7c0abb1cf4841ea75a5caf42.hip
// !!! This is a file automatically generated by hipify!!! #include "VORApp.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> iglu::IGLUApp* app; int main() { using namespace OGL; app = new VORApp("../../CommonSampleFiles/scenes/virObjRef.txt"); app->Run(); return 0; }
9952a23d52caf82a7c0abb1cf4841ea75a5caf42.cu
#include "VORApp.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> iglu::IGLUApp* app; int main() { using namespace OGL; app = new VORApp("../../CommonSampleFiles/scenes/virObjRef.txt"); app->Run(); return 0; }
d6a92ab80b93f8bc83421e338fc5181825ccc62b.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <string.h> #include <math.h> #include <assert.h> #include "parser.h" #define BATCH_SIZE 20 /* BATCH_SIZE * * Why use BATCH_SIZE ? (Multiple images at once) * * To saturate Streaming Multiprocessors with enough computaion BLOCKS * * CRITERIA: * - Deploy enough blocks (More than n * SM counts) for latency hiding * - Saturate each block with enough threads */ /* NVIDIA GEFORCE GTX1080 * GPU SPEC: * - warp_size: 32 threads * - word_size: 4 Bytes * - SM_count: 20 Streaming Multiprocessors * * SM SPEC: * - max_warps: 64 * - max_thread_blocks : 32 * - max_threads: 2048 * - max_registers: 65536 words * - CUDA_cores: 64 cores * - share_memory: 64 kB * * BLOCK SPEC: * - max_threads: 1024 * - max_registers: 65536 words * * THREAD SPEC: * - max_registers: 255 words * * SHARED MEMORY SPEC: * - 64 kB per SM * - Composed of 32 memory bank hardwares * - Does bank interleaving every word (4 Bytes) * */ /* Memory design * * 0. INPUT image data * => ALL goes into global memory * * 1. Filter map data * => Put as much as we can into constant memory (d_map), but leftover should go to global memory (d_map_spill) * * 2. Result data * => Should go to global memory since write-once * * 3. What to cache into shared memory? * => Bring Filter map data into shared_memory (only necessary part) * => Bring INPUT data into shared_memory (only necessary part) * */ __constant__ int D_BATCH_SIZE = BATCH_SIZE; __constant__ int D_NUM_TEST = NUM_TEST; __constant__ __gpu_map__ d_map; __device__ float sigmoid(float x) { return (1 / (1 + exp(-x))); } /* * ARGUMENTS: * - curr_step: Which step are we in? (In MAIN_LOOP) * - stage: Stage number(ex; 1 means C1 layer, 3 means C3 layer) * - num_output: Number of output maps * - num_input: Number of input maps * - height_input: Height of input maps * - width_input: Width of input maps * - size_filter: Size of filter map, 5 for LeNet-5 * - d_map + d_map_spill: Contains filter maps for all layers * - inputs: Source of input images * - outputs: Destination to store output(computed) images * - size_input: Length of input 1D array, for fully connected layer * - size_output: Length of output 1D array, for fully connected layer */ __global__ void // Convolution computation kernel convolution_kernel( int curr_step, int stage, int num_output, int num_input, int height_input, int width_input, int size_filter, float *inputs, float *outputs ) { // Get index info int BID_x = blockIdx.x; // foreach: output image ~6 or ~16 int BID_y = blockIdx.y; // foreach: BATCH among curr_step_inputs[BATCH_SIZE] int TID_x = threadIdx.x; // foreach: output image row ~28 or ~10 int TID_y = threadIdx.y; // foreach: output image column ~28 or ~10 float acc = 0; // For every kernel launch, all threads of the warp are on the one side of branch if (stage == 1) {// C1_layer convolution: D_BATCH_SIZE * { [1 @ 32 * 32] .X [6 * 1 @ 5 * 5] => [6 @ 28 * 28] } // Get the starting point from entire MNIST data set float *input_start = inputs + (curr_step * D_BATCH_SIZE * (32 * 32)) + (BID_y * 32 * 32); // Load data into shared memory __shared__ float input[32][32]; // Do shared memory access in 32 stride to avoid shared memory bank conflict int myCnt = 28 * TID_x + TID_y; if (myCnt < 32) { for (int i = 0; i < 32; i++) { input[i][myCnt] = input_start[(32 * i) + myCnt]; } } __syncthreads(); __shared__ float filter[5][5]; // Only 25 entries -> No shared memory bank conflict if (TID_x < size_filter && TID_y < size_filter) { filter[TID_x][TID_y] = d_map.C1_param[BID_x][0][TID_x][TID_y]; } __syncthreads(); for (int f_row = 0; f_row < size_filter; f_row++) { for (int f_col = 0; f_col < size_filter; f_col++) { acc += input[TID_x + f_row][TID_y + f_col] * filter[f_row][f_col]; } } outputs[(BID_y * 6 * 28 * 28) + (BID_x * 28 * 28) + (TID_x * 28) + TID_y] = acc; } else // Desired stage = 3 {// C3_layer convolution: D_BATCH_SIZE * { [6 @ 14 * 14] .X [16 * 6 @ 5 * 5] => [16 @ 10 * 10] } // Get the starting point from d_s2_results[BATCH_SIZE] float *input_start = inputs + (BID_y * (14 * 14)); for (int c = 0; c < num_input; c++) {// For every input channel, which isn't 1 for C3 layer // Load data into shared memory __shared__ float input[14][14]; // Do shared memory access in 14 strides to avoid shared memory bank conflict int myCnt = 10 * TID_x + TID_y; if (myCnt < 14) { for (int i = 0; i < 14; i++) { input[i][myCnt] = input_start[(14 * i) + myCnt]; } } __syncthreads(); __shared__ float filter[5][5]; // Only 25 entries -> No shared memory bank conflict if (TID_x < size_filter && TID_y < size_filter) { filter[TID_x][TID_y] = d_map.C3_param[BID_x][c][TID_x][TID_y]; } __syncthreads(); for (int f_row = 0; f_row < size_filter; f_row++) { for (int f_col = 0; f_col < size_filter; f_col++) { acc += input[TID_x + f_row][TID_y + f_col] * filter[f_row][f_col]; } } } outputs[(BID_y * 16 * 10 * 10) + (BID_x * 10 * 10) + (TID_x * 10) + TID_y]; } return; } __global__ void // Pooling computation kernel pooling_kernel( int curr_step, int stage, int num_output, int height_input, int width_input, float *inputs, float *outputs ) { // Get index info int BID_x = blockIdx.x; // foreach: output image ~6 or ~16 int BID_y = blockIdx.y; // foreach: BATCH among curr_step_inputs[BATCH_SIZE] int TID_x = threadIdx.x; // foreach: output image row ~14 or ~5 int TID_y = threadIdx.y; // foreach: output image column ~14 or ~5 float acc = 0; if (stage == 2) {// S2_layer pooling: D_BATCH_SIZE * { Sigmoid([6 @ 28 * 28] + bias[6]) => [6 @ 14 * 14] } // No need to load C1_bias since it will be cached into L1 float *input_start = inputs + (BID_y * 6 * 28 * 28) + (BID_x * 28 * 28); for (int s_row = 0; s_row < 2; s_row++) { for (int s_col = 0; s_col < 2; s_col++) { acc += input_start[(28 * (2 * TID_x + s_row)) + (2 * TID_y + s_col)] / 4; } } outputs[(BID_y * 6 * 14 * 14) + (BID_x * 14 * 14) + (TID_x * 14) + TID_y] = sigmoid(acc + d_map.C1_bias[BID_x]); } else // Desired stage = 4 {// S4_layer pooling: D_BATCH_SIZE * { Sigmoid([16 @ 10 * 10] + bias[16]) => [16 @ 5 * 5] } // No need to load C3_bias since it will be cached into L1 float *input_start = inputs + (BID_y * 16 * 10 * 10) + (BID_x * 10 * 10); for (int s_row = 0; s_row < 2; s_row++) { for (int s_col = 0; s_col < 2; s_col++) { acc += input_start[(10 * (2 * TID_x + s_row)) + (2 * TID_y + s_col)] / 4; } } outputs[(BID_y * 16 * 5 * 5) + (BID_x * 5 * 5) + (TID_x * 5) + TID_y] = sigmoid(acc + d_map.C3_bias[BID_x]); } return; } __global__ void // Fully connecting computation kernel fullyConnect_kernel( int curr_step, int stage, int size_input, int size_output, __gpu_map_spill__ *d_map_spill, float *inputs, float *outputs ) { // This layer is pretty much simple matrix multipliction of (ex [120][400] X [400][1] => [120][1] ) int BID_x = blockIdx.x; // I will divide [120][140] into 4 segments, to acquire more blocks for latency hiding int BID_y = blockIdx.y; // Unit position in BATCH_SIZE int TID_x = threadIdx.x; // Thread ID. threads ~400 or ~120 if (stage == 5) {// F5_layer full connection: D_BATCH_SIZE * { Sigmoid([120 * 400] X Serial[16 @ 5 * 5] + bias[120 * 1]) => [120 * 1] } // Load input data into shared memory // Loading F5_param is unnecessary, since elements in F5_param are only for one-shot use __shared__ float prod_elementwise[400]; __shared__ float input[400]; if (TID_x < 20) {// Take 20 strides to avoid shared memory bank conflict for (int i = 0; i < (400 / 20); i++) { input[(i * 20) + TID_x] = inputs[(BID_y * 400) + (i * 20) + TID_x]; } } __syncthreads(); for (int i = 0; i < (120 / 4); i++) { prod_elementwise[TID_x] = (*d_map_spill).F5_param[((BID_x * (120 / 4)) + i)][TID_x] * input[TID_x]; __syncthreads(); if (TID_x == 0) { float prod_sum = 0; for (int j = 0; j < 400; j++) { prod_sum += prod_elementwise[j]; } outputs[(BID_y * 120) + (BID_x * (120 / 4)) + i] = sigmoid(prod_sum + d_map.F5_bias[(BID_x * (120 / 4) + i)]); } } } else // Desired stage = 6 {// F6_layer full connection: D_BATCH_SIZE * { Sigmoid([84 * 120] X [120 * 1] + bias[84 * 1]) => [84 * 1] } // Load input data into shared memory // Loading F6_param is unnecessary, since elements in F6_param are only for one-shot use __shared__ float prod_elementwise[120]; __shared__ float input[120]; if (TID_x < 20) {// Take 20 strides to avoid shared memory bank conflict for (int i = 0; i < (120 / 20); i++) { input[(i * 20) + TID_x] = inputs[(BID_y * 120) + (i * 20) + TID_x]; } } __syncthreads(); for (int i = 0; i < (84 / 4); i++) { prod_elementwise[TID_x] = d_map.F6_param[(BID_x * (120 / 4)) + i][TID_x] * input[TID_x]; __syncthreads(); if (TID_x == 0) { float prod_sum = 0; for (int j = 0; j < 120; j++) { prod_sum += prod_elementwise[j]; } outputs[(BID_y * 84) + (BID_x * (84 / 4)) + i] = sigmoid(prod_sum + d_map.F6_bias[(BID_x * (84 / 4)) + i]); } } } return; } __global__ void // Output layer compuation kernel output_kernel( int curr_step, int stage, int size_input, int size_output, __gpu_map_spill__ *d_map_spill, float *inputs, float *outputs ) { // OUTPUT_layer: D_BATCH_SIZE * { [10 * 84] X [84 * 1] + [10 * 1] => [10 * 1] } // Get index info int BID_y = blockIdx.y; // foreach: BATCH among curr_step_inputs[BATCH_SIZE] int TID_x = threadIdx.x; // foreach: elements in a row (84) // Load data into shared memory __shared__ float OUTPUT_param[10][84]; if (TID_x < 21) { for (int i = 0; i < 10; i++) { for (int j = 0; j < 4; j++) { OUTPUT_param[i][(j * 21) + TID_x] = d_map.OUTPUT_param[i][(j * 21) + TID_x]; } } } __syncthreads(); __shared__ float input[84]; if (TID_x < 21) { for (int i = 0; i < 4; i++) { input[(i * 21) + TID_x] = inputs[(BID_y * 84) + (i * 21) + TID_x]; } } __syncthreads(); __shared__ float prod_elementwise[84]; for (int i = 0; i < 10; i++) { prod_elementwise[TID_x] = OUTPUT_param[i][TID_x] * input[TID_x]; __syncthreads(); if (TID_x == 0) { float prod_sum = 0; for (int j = 0; j < 84; j++) { prod_sum += prod_elementwise[j]; } outputs[(curr_step * D_BATCH_SIZE * 10) + (BID_y * 10) + i] = prod_sum + d_map.OUTPUT_bias[i]; } } return; } __global__ void // Number determination kernel numberDetermine_kernel( int curr_step, int stage, float *inputs, int *outputs ) { // NUMBER_layer: D_NUM_TEST * { ReduceMax[10 * 1] => SINGLE_DIGIT } // Get index info int BID_x = blockIdx.x; // 100 int TID_x = threadIdx.x; // 100 int index_image = (BID_x * 100) + TID_x; float highest_prob = inputs[(index_image * 10) + 0]; int ans = 0; for (int i = 1; i < 10; i++) { if (inputs[(index_image * 10) + i] > highest_prob) { highest_prob = inputs[(index_image * 10) + i]; ans = i; } } outputs[index_image] = ans; return; } void forward_GPU(float **ptr_test_data, int **ptr_test_label, __map__ *map, int *cnt_correct) {// Deploy forward computation job on GPU float *test_data = *ptr_test_data; int *test_label = *ptr_test_label; // Acquire memory space in GPU // Prefix "d_" means ADDRESS in device memory // Handlers for device memory manipulation int *inferences = (int *) malloc(sizeof(int) * NUM_TEST); int *d_inferences; float *d_test_data; __gpu_map_spill__ *d_map_spill; float *d_c1_results; float *d_s2_results; float *d_c3_results; float *d_s4_results; float *d_f5_results; float *d_f6_results; float *d_output_results; // WARNING: MALLOC 1 __gpu_map__ *tmp_map = (__gpu_map__ *) malloc(sizeof(__gpu_map__)); __gpu_map_spill__ *tmp_map_spill = (__gpu_map_spill__ *) malloc(sizeof(__gpu_map_spill__)); assert(tmp_map != NULL && "MALLOC FAILED!\n"); assert(tmp_map_spill != NULL && "MALLOC FAILED!\n"); // Fill in gpu_map data // tmp_map = map - F5_param memcpy((*tmp_map).C1_param, (*map).C1_param, sizeof(float) * 6 * 1 * 5 * 5); memcpy((*tmp_map).C1_bias, (*map).C1_bias, sizeof(float) * 6); memcpy((*tmp_map).C3_param, (*map).C3_param, sizeof(float) * 16 * 6 * 5 * 5); memcpy((*tmp_map).C3_bias, (*map).C3_bias, sizeof(float) * 16); memcpy((*tmp_map).F5_bias, (*map).F5_bias, sizeof(float) * 120); memcpy((*tmp_map).F6_param, (*map).F6_param, sizeof(float) * 84 * 120); memcpy((*tmp_map).F6_bias, (*map).F6_bias, sizeof(float) * 84); memcpy((*tmp_map).OUTPUT_param, (*map).OUTPUT_param, sizeof(float) * 10 * 84); memcpy((*tmp_map).OUTPUT_bias, (*map).OUTPUT_bias, sizeof(float) * 10); // tmp_map_spill = F5 param memcpy((*tmp_map_spill).F5_param, (*map).F5_param, sizeof(float) * 120 * 400); // Fix NUM_TEST into d_NUM_TEST so d_NUM_TEST can be multiple of BATCH_SIZE, so we can walk in stride int d_NUM_TEST = ((int) ceil((double) ((float) NUM_TEST / (float) BATCH_SIZE))) * BATCH_SIZE; int batch_size = BATCH_SIZE; // WARNING: MALLOC 0 hipMalloc((void **) &d_inferences, sizeof(int) * NUM_TEST); hipMalloc((void **) &d_test_data, sizeof(float) * NUM_TEST * 32 * 32); hipMalloc((void **) &d_map_spill, sizeof(__gpu_map_spill__)); hipMalloc((void **) &d_c1_results, sizeof(float) * BATCH_SIZE * 6 * 28 * 28); hipMalloc((void **) &d_s2_results, sizeof(float) * BATCH_SIZE * 6 * 14 * 14); hipMalloc((void **) &d_c3_results, sizeof(float) * BATCH_SIZE * 16 * 10 * 10); hipMalloc((void **) &d_s4_results, sizeof(float) * BATCH_SIZE * 16 * 5 * 5); hipMalloc((void **) &d_f5_results, sizeof(float) * BATCH_SIZE * 120); hipMalloc((void **) &d_f6_results, sizeof(float) * BATCH_SIZE * 84); hipMalloc((void **) &d_output_results, sizeof(float) * NUM_TEST * 10); // CUDA memcpy from host to device hipMemcpyToSymbol(d_map, tmp_map, sizeof(__gpu_map__), 0, hipMemcpyHostToDevice); hipMemcpy(d_map_spill, tmp_map_spill, sizeof(__gpu_map_spill__), hipMemcpyHostToDevice); // WARNING: FREE 1 free(tmp_map); free(tmp_map_spill); // ENTERING MAIN_LOOP dim3 block; dim3 thread; for (int step = 0; (step * BATCH_SIZE) < d_NUM_TEST; step++) {// Advance step by step, with BATCH_SIZE stride (Processing forward chain for "BATCH_SIZE" number of MNIST images) // 0. Convolution layer C1 block.x = 6; block.y = BATCH_SIZE; block.z = 1; thread.x = 28; thread.y = 28; thread.z = 1; hipLaunchKernelGGL(( convolution_kernel), dim3(block), dim3(thread), 0, 0, step, 1, 6, 1, 32, 32, 5, d_test_data, d_c1_results); // 1. Pooling layer S2 block.x = 6; block.y = BATCH_SIZE; block.z = 1; thread.x = 14; thread.y = 14; thread.z = 1; hipLaunchKernelGGL(( pooling_kernel), dim3(block), dim3(thread), 0, 0, step, 2, 6, 28, 28, d_c1_results, d_s2_results); // 2. Convolution layer C3 block.x = 16; block.y = BATCH_SIZE; block.z = 1; thread.x = 10; thread.y = 10; thread.z = 1; hipLaunchKernelGGL(( convolution_kernel), dim3(block), dim3(thread), 0, 0, step, 3, 16, 6, 14, 14, 5, d_s2_results, d_c3_results); // 3. Pooling layer S4 block.x = 16; block.y = BATCH_SIZE; block.z = 1; thread.x = 5; thread.y = 5; thread.z = 1; hipLaunchKernelGGL(( pooling_kernel), dim3(block), dim3(thread), 0, 0, step, 4, 16, 10, 10, d_c3_results, d_s4_results); // 4. Fully connected layer F5 block.x = 4; block.y = BATCH_SIZE; block.z = 1; thread.x = 400; thread.y = 1; thread.z = 1; hipLaunchKernelGGL(( fullyConnect_kernel), dim3(block), dim3(thread), 0, 0, step, 5, 400, 120, d_map_spill, d_s4_results, d_f5_results); // 5. Fully connected layer F6 block.x = 4; block.y = BATCH_SIZE; block.z = 1; thread.x = 120; thread.y = 1; thread.z = 1; hipLaunchKernelGGL(( fullyConnect_kernel), dim3(block), dim3(thread), 0, 0, step, 6, 120, 84, d_map_spill, d_f5_results, d_f6_results); // 6. Output layer OUTPUT block.x = 1; block.y = BATCH_SIZE; block.z = 1; thread.x = 84; thread.y = 1; thread.z = 1; hipLaunchKernelGGL(( output_kernel), dim3(block), dim3(thread), 0, 0, step, 7, 84, 10, d_map_spill, d_f6_results, d_output_results); } // 7. Determine numbers block.x = 100; block.y = 1; block.z = 1; thread.x = 100; thread.y = 1; thread.z = 1; hipLaunchKernelGGL(( numberDetermine_kernel), dim3(block), dim3(thread), 0, 0, 8, 8, d_output_results, d_inferences); // 8. Copy inference answers to Host hipMemcpy(inferences, d_inferences, sizeof(int) * NUM_TEST, hipMemcpyDeviceToHost); // 9. Scoring for (int i = 0; i < NUM_TEST; i++) { if (inferences[i] == test_label[i]) { (*cnt_correct)++; } } // WARNING: FREE 0 free(inferences); hipFree(d_inferences); hipFree(d_map_spill); hipFree(d_test_data); hipFree(d_c1_results); hipFree(d_s2_results); hipFree(d_c3_results); hipFree(d_s4_results); hipFree(d_f5_results); hipFree(d_f6_results); hipFree(d_output_results); return; }
d6a92ab80b93f8bc83421e338fc5181825ccc62b.cu
#include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <string.h> #include <math.h> #include <assert.h> #include "parser.h" #define BATCH_SIZE 20 /* BATCH_SIZE * * Why use BATCH_SIZE ? (Multiple images at once) * * To saturate Streaming Multiprocessors with enough computaion BLOCKS * * CRITERIA: * - Deploy enough blocks (More than n * SM counts) for latency hiding * - Saturate each block with enough threads */ /* NVIDIA GEFORCE GTX1080 * GPU SPEC: * - warp_size: 32 threads * - word_size: 4 Bytes * - SM_count: 20 Streaming Multiprocessors * * SM SPEC: * - max_warps: 64 * - max_thread_blocks : 32 * - max_threads: 2048 * - max_registers: 65536 words * - CUDA_cores: 64 cores * - share_memory: 64 kB * * BLOCK SPEC: * - max_threads: 1024 * - max_registers: 65536 words * * THREAD SPEC: * - max_registers: 255 words * * SHARED MEMORY SPEC: * - 64 kB per SM * - Composed of 32 memory bank hardwares * - Does bank interleaving every word (4 Bytes) * */ /* Memory design * * 0. INPUT image data * => ALL goes into global memory * * 1. Filter map data * => Put as much as we can into constant memory (d_map), but leftover should go to global memory (d_map_spill) * * 2. Result data * => Should go to global memory since write-once * * 3. What to cache into shared memory? * => Bring Filter map data into shared_memory (only necessary part) * => Bring INPUT data into shared_memory (only necessary part) * */ __constant__ int D_BATCH_SIZE = BATCH_SIZE; __constant__ int D_NUM_TEST = NUM_TEST; __constant__ __gpu_map__ d_map; __device__ float sigmoid(float x) { return (1 / (1 + exp(-x))); } /* * ARGUMENTS: * - curr_step: Which step are we in? (In MAIN_LOOP) * - stage: Stage number(ex; 1 means C1 layer, 3 means C3 layer) * - num_output: Number of output maps * - num_input: Number of input maps * - height_input: Height of input maps * - width_input: Width of input maps * - size_filter: Size of filter map, 5 for LeNet-5 * - d_map + d_map_spill: Contains filter maps for all layers * - inputs: Source of input images * - outputs: Destination to store output(computed) images * - size_input: Length of input 1D array, for fully connected layer * - size_output: Length of output 1D array, for fully connected layer */ __global__ void // Convolution computation kernel convolution_kernel( int curr_step, int stage, int num_output, int num_input, int height_input, int width_input, int size_filter, float *inputs, float *outputs ) { // Get index info int BID_x = blockIdx.x; // foreach: output image ~6 or ~16 int BID_y = blockIdx.y; // foreach: BATCH among curr_step_inputs[BATCH_SIZE] int TID_x = threadIdx.x; // foreach: output image row ~28 or ~10 int TID_y = threadIdx.y; // foreach: output image column ~28 or ~10 float acc = 0; // For every kernel launch, all threads of the warp are on the one side of branch if (stage == 1) {// C1_layer convolution: D_BATCH_SIZE * { [1 @ 32 * 32] .X [6 * 1 @ 5 * 5] => [6 @ 28 * 28] } // Get the starting point from entire MNIST data set float *input_start = inputs + (curr_step * D_BATCH_SIZE * (32 * 32)) + (BID_y * 32 * 32); // Load data into shared memory __shared__ float input[32][32]; // Do shared memory access in 32 stride to avoid shared memory bank conflict int myCnt = 28 * TID_x + TID_y; if (myCnt < 32) { for (int i = 0; i < 32; i++) { input[i][myCnt] = input_start[(32 * i) + myCnt]; } } __syncthreads(); __shared__ float filter[5][5]; // Only 25 entries -> No shared memory bank conflict if (TID_x < size_filter && TID_y < size_filter) { filter[TID_x][TID_y] = d_map.C1_param[BID_x][0][TID_x][TID_y]; } __syncthreads(); for (int f_row = 0; f_row < size_filter; f_row++) { for (int f_col = 0; f_col < size_filter; f_col++) { acc += input[TID_x + f_row][TID_y + f_col] * filter[f_row][f_col]; } } outputs[(BID_y * 6 * 28 * 28) + (BID_x * 28 * 28) + (TID_x * 28) + TID_y] = acc; } else // Desired stage = 3 {// C3_layer convolution: D_BATCH_SIZE * { [6 @ 14 * 14] .X [16 * 6 @ 5 * 5] => [16 @ 10 * 10] } // Get the starting point from d_s2_results[BATCH_SIZE] float *input_start = inputs + (BID_y * (14 * 14)); for (int c = 0; c < num_input; c++) {// For every input channel, which isn't 1 for C3 layer // Load data into shared memory __shared__ float input[14][14]; // Do shared memory access in 14 strides to avoid shared memory bank conflict int myCnt = 10 * TID_x + TID_y; if (myCnt < 14) { for (int i = 0; i < 14; i++) { input[i][myCnt] = input_start[(14 * i) + myCnt]; } } __syncthreads(); __shared__ float filter[5][5]; // Only 25 entries -> No shared memory bank conflict if (TID_x < size_filter && TID_y < size_filter) { filter[TID_x][TID_y] = d_map.C3_param[BID_x][c][TID_x][TID_y]; } __syncthreads(); for (int f_row = 0; f_row < size_filter; f_row++) { for (int f_col = 0; f_col < size_filter; f_col++) { acc += input[TID_x + f_row][TID_y + f_col] * filter[f_row][f_col]; } } } outputs[(BID_y * 16 * 10 * 10) + (BID_x * 10 * 10) + (TID_x * 10) + TID_y]; } return; } __global__ void // Pooling computation kernel pooling_kernel( int curr_step, int stage, int num_output, int height_input, int width_input, float *inputs, float *outputs ) { // Get index info int BID_x = blockIdx.x; // foreach: output image ~6 or ~16 int BID_y = blockIdx.y; // foreach: BATCH among curr_step_inputs[BATCH_SIZE] int TID_x = threadIdx.x; // foreach: output image row ~14 or ~5 int TID_y = threadIdx.y; // foreach: output image column ~14 or ~5 float acc = 0; if (stage == 2) {// S2_layer pooling: D_BATCH_SIZE * { Sigmoid([6 @ 28 * 28] + bias[6]) => [6 @ 14 * 14] } // No need to load C1_bias since it will be cached into L1 float *input_start = inputs + (BID_y * 6 * 28 * 28) + (BID_x * 28 * 28); for (int s_row = 0; s_row < 2; s_row++) { for (int s_col = 0; s_col < 2; s_col++) { acc += input_start[(28 * (2 * TID_x + s_row)) + (2 * TID_y + s_col)] / 4; } } outputs[(BID_y * 6 * 14 * 14) + (BID_x * 14 * 14) + (TID_x * 14) + TID_y] = sigmoid(acc + d_map.C1_bias[BID_x]); } else // Desired stage = 4 {// S4_layer pooling: D_BATCH_SIZE * { Sigmoid([16 @ 10 * 10] + bias[16]) => [16 @ 5 * 5] } // No need to load C3_bias since it will be cached into L1 float *input_start = inputs + (BID_y * 16 * 10 * 10) + (BID_x * 10 * 10); for (int s_row = 0; s_row < 2; s_row++) { for (int s_col = 0; s_col < 2; s_col++) { acc += input_start[(10 * (2 * TID_x + s_row)) + (2 * TID_y + s_col)] / 4; } } outputs[(BID_y * 16 * 5 * 5) + (BID_x * 5 * 5) + (TID_x * 5) + TID_y] = sigmoid(acc + d_map.C3_bias[BID_x]); } return; } __global__ void // Fully connecting computation kernel fullyConnect_kernel( int curr_step, int stage, int size_input, int size_output, __gpu_map_spill__ *d_map_spill, float *inputs, float *outputs ) { // This layer is pretty much simple matrix multipliction of (ex [120][400] X [400][1] => [120][1] ) int BID_x = blockIdx.x; // I will divide [120][140] into 4 segments, to acquire more blocks for latency hiding int BID_y = blockIdx.y; // Unit position in BATCH_SIZE int TID_x = threadIdx.x; // Thread ID. threads ~400 or ~120 if (stage == 5) {// F5_layer full connection: D_BATCH_SIZE * { Sigmoid([120 * 400] X Serial[16 @ 5 * 5] + bias[120 * 1]) => [120 * 1] } // Load input data into shared memory // Loading F5_param is unnecessary, since elements in F5_param are only for one-shot use __shared__ float prod_elementwise[400]; __shared__ float input[400]; if (TID_x < 20) {// Take 20 strides to avoid shared memory bank conflict for (int i = 0; i < (400 / 20); i++) { input[(i * 20) + TID_x] = inputs[(BID_y * 400) + (i * 20) + TID_x]; } } __syncthreads(); for (int i = 0; i < (120 / 4); i++) { prod_elementwise[TID_x] = (*d_map_spill).F5_param[((BID_x * (120 / 4)) + i)][TID_x] * input[TID_x]; __syncthreads(); if (TID_x == 0) { float prod_sum = 0; for (int j = 0; j < 400; j++) { prod_sum += prod_elementwise[j]; } outputs[(BID_y * 120) + (BID_x * (120 / 4)) + i] = sigmoid(prod_sum + d_map.F5_bias[(BID_x * (120 / 4) + i)]); } } } else // Desired stage = 6 {// F6_layer full connection: D_BATCH_SIZE * { Sigmoid([84 * 120] X [120 * 1] + bias[84 * 1]) => [84 * 1] } // Load input data into shared memory // Loading F6_param is unnecessary, since elements in F6_param are only for one-shot use __shared__ float prod_elementwise[120]; __shared__ float input[120]; if (TID_x < 20) {// Take 20 strides to avoid shared memory bank conflict for (int i = 0; i < (120 / 20); i++) { input[(i * 20) + TID_x] = inputs[(BID_y * 120) + (i * 20) + TID_x]; } } __syncthreads(); for (int i = 0; i < (84 / 4); i++) { prod_elementwise[TID_x] = d_map.F6_param[(BID_x * (120 / 4)) + i][TID_x] * input[TID_x]; __syncthreads(); if (TID_x == 0) { float prod_sum = 0; for (int j = 0; j < 120; j++) { prod_sum += prod_elementwise[j]; } outputs[(BID_y * 84) + (BID_x * (84 / 4)) + i] = sigmoid(prod_sum + d_map.F6_bias[(BID_x * (84 / 4)) + i]); } } } return; } __global__ void // Output layer compuation kernel output_kernel( int curr_step, int stage, int size_input, int size_output, __gpu_map_spill__ *d_map_spill, float *inputs, float *outputs ) { // OUTPUT_layer: D_BATCH_SIZE * { [10 * 84] X [84 * 1] + [10 * 1] => [10 * 1] } // Get index info int BID_y = blockIdx.y; // foreach: BATCH among curr_step_inputs[BATCH_SIZE] int TID_x = threadIdx.x; // foreach: elements in a row (84) // Load data into shared memory __shared__ float OUTPUT_param[10][84]; if (TID_x < 21) { for (int i = 0; i < 10; i++) { for (int j = 0; j < 4; j++) { OUTPUT_param[i][(j * 21) + TID_x] = d_map.OUTPUT_param[i][(j * 21) + TID_x]; } } } __syncthreads(); __shared__ float input[84]; if (TID_x < 21) { for (int i = 0; i < 4; i++) { input[(i * 21) + TID_x] = inputs[(BID_y * 84) + (i * 21) + TID_x]; } } __syncthreads(); __shared__ float prod_elementwise[84]; for (int i = 0; i < 10; i++) { prod_elementwise[TID_x] = OUTPUT_param[i][TID_x] * input[TID_x]; __syncthreads(); if (TID_x == 0) { float prod_sum = 0; for (int j = 0; j < 84; j++) { prod_sum += prod_elementwise[j]; } outputs[(curr_step * D_BATCH_SIZE * 10) + (BID_y * 10) + i] = prod_sum + d_map.OUTPUT_bias[i]; } } return; } __global__ void // Number determination kernel numberDetermine_kernel( int curr_step, int stage, float *inputs, int *outputs ) { // NUMBER_layer: D_NUM_TEST * { ReduceMax[10 * 1] => SINGLE_DIGIT } // Get index info int BID_x = blockIdx.x; // 100 int TID_x = threadIdx.x; // 100 int index_image = (BID_x * 100) + TID_x; float highest_prob = inputs[(index_image * 10) + 0]; int ans = 0; for (int i = 1; i < 10; i++) { if (inputs[(index_image * 10) + i] > highest_prob) { highest_prob = inputs[(index_image * 10) + i]; ans = i; } } outputs[index_image] = ans; return; } void forward_GPU(float **ptr_test_data, int **ptr_test_label, __map__ *map, int *cnt_correct) {// Deploy forward computation job on GPU float *test_data = *ptr_test_data; int *test_label = *ptr_test_label; // Acquire memory space in GPU // Prefix "d_" means ADDRESS in device memory // Handlers for device memory manipulation int *inferences = (int *) malloc(sizeof(int) * NUM_TEST); int *d_inferences; float *d_test_data; __gpu_map_spill__ *d_map_spill; float *d_c1_results; float *d_s2_results; float *d_c3_results; float *d_s4_results; float *d_f5_results; float *d_f6_results; float *d_output_results; // WARNING: MALLOC 1 __gpu_map__ *tmp_map = (__gpu_map__ *) malloc(sizeof(__gpu_map__)); __gpu_map_spill__ *tmp_map_spill = (__gpu_map_spill__ *) malloc(sizeof(__gpu_map_spill__)); assert(tmp_map != NULL && "MALLOC FAILED!\n"); assert(tmp_map_spill != NULL && "MALLOC FAILED!\n"); // Fill in gpu_map data // tmp_map = map - F5_param memcpy((*tmp_map).C1_param, (*map).C1_param, sizeof(float) * 6 * 1 * 5 * 5); memcpy((*tmp_map).C1_bias, (*map).C1_bias, sizeof(float) * 6); memcpy((*tmp_map).C3_param, (*map).C3_param, sizeof(float) * 16 * 6 * 5 * 5); memcpy((*tmp_map).C3_bias, (*map).C3_bias, sizeof(float) * 16); memcpy((*tmp_map).F5_bias, (*map).F5_bias, sizeof(float) * 120); memcpy((*tmp_map).F6_param, (*map).F6_param, sizeof(float) * 84 * 120); memcpy((*tmp_map).F6_bias, (*map).F6_bias, sizeof(float) * 84); memcpy((*tmp_map).OUTPUT_param, (*map).OUTPUT_param, sizeof(float) * 10 * 84); memcpy((*tmp_map).OUTPUT_bias, (*map).OUTPUT_bias, sizeof(float) * 10); // tmp_map_spill = F5 param memcpy((*tmp_map_spill).F5_param, (*map).F5_param, sizeof(float) * 120 * 400); // Fix NUM_TEST into d_NUM_TEST so d_NUM_TEST can be multiple of BATCH_SIZE, so we can walk in stride int d_NUM_TEST = ((int) ceil((double) ((float) NUM_TEST / (float) BATCH_SIZE))) * BATCH_SIZE; int batch_size = BATCH_SIZE; // WARNING: MALLOC 0 cudaMalloc((void **) &d_inferences, sizeof(int) * NUM_TEST); cudaMalloc((void **) &d_test_data, sizeof(float) * NUM_TEST * 32 * 32); cudaMalloc((void **) &d_map_spill, sizeof(__gpu_map_spill__)); cudaMalloc((void **) &d_c1_results, sizeof(float) * BATCH_SIZE * 6 * 28 * 28); cudaMalloc((void **) &d_s2_results, sizeof(float) * BATCH_SIZE * 6 * 14 * 14); cudaMalloc((void **) &d_c3_results, sizeof(float) * BATCH_SIZE * 16 * 10 * 10); cudaMalloc((void **) &d_s4_results, sizeof(float) * BATCH_SIZE * 16 * 5 * 5); cudaMalloc((void **) &d_f5_results, sizeof(float) * BATCH_SIZE * 120); cudaMalloc((void **) &d_f6_results, sizeof(float) * BATCH_SIZE * 84); cudaMalloc((void **) &d_output_results, sizeof(float) * NUM_TEST * 10); // CUDA memcpy from host to device cudaMemcpyToSymbol(d_map, tmp_map, sizeof(__gpu_map__), 0, cudaMemcpyHostToDevice); cudaMemcpy(d_map_spill, tmp_map_spill, sizeof(__gpu_map_spill__), cudaMemcpyHostToDevice); // WARNING: FREE 1 free(tmp_map); free(tmp_map_spill); // ENTERING MAIN_LOOP dim3 block; dim3 thread; for (int step = 0; (step * BATCH_SIZE) < d_NUM_TEST; step++) {// Advance step by step, with BATCH_SIZE stride (Processing forward chain for "BATCH_SIZE" number of MNIST images) // 0. Convolution layer C1 block.x = 6; block.y = BATCH_SIZE; block.z = 1; thread.x = 28; thread.y = 28; thread.z = 1; convolution_kernel<<<block, thread>>>(step, 1, 6, 1, 32, 32, 5, d_test_data, d_c1_results); // 1. Pooling layer S2 block.x = 6; block.y = BATCH_SIZE; block.z = 1; thread.x = 14; thread.y = 14; thread.z = 1; pooling_kernel<<<block, thread>>>(step, 2, 6, 28, 28, d_c1_results, d_s2_results); // 2. Convolution layer C3 block.x = 16; block.y = BATCH_SIZE; block.z = 1; thread.x = 10; thread.y = 10; thread.z = 1; convolution_kernel<<<block, thread>>>(step, 3, 16, 6, 14, 14, 5, d_s2_results, d_c3_results); // 3. Pooling layer S4 block.x = 16; block.y = BATCH_SIZE; block.z = 1; thread.x = 5; thread.y = 5; thread.z = 1; pooling_kernel<<<block, thread>>>(step, 4, 16, 10, 10, d_c3_results, d_s4_results); // 4. Fully connected layer F5 block.x = 4; block.y = BATCH_SIZE; block.z = 1; thread.x = 400; thread.y = 1; thread.z = 1; fullyConnect_kernel<<<block, thread>>>(step, 5, 400, 120, d_map_spill, d_s4_results, d_f5_results); // 5. Fully connected layer F6 block.x = 4; block.y = BATCH_SIZE; block.z = 1; thread.x = 120; thread.y = 1; thread.z = 1; fullyConnect_kernel<<<block, thread>>>(step, 6, 120, 84, d_map_spill, d_f5_results, d_f6_results); // 6. Output layer OUTPUT block.x = 1; block.y = BATCH_SIZE; block.z = 1; thread.x = 84; thread.y = 1; thread.z = 1; output_kernel<<<block, thread>>>(step, 7, 84, 10, d_map_spill, d_f6_results, d_output_results); } // 7. Determine numbers block.x = 100; block.y = 1; block.z = 1; thread.x = 100; thread.y = 1; thread.z = 1; numberDetermine_kernel<<<block, thread>>>(8, 8, d_output_results, d_inferences); // 8. Copy inference answers to Host cudaMemcpy(inferences, d_inferences, sizeof(int) * NUM_TEST, cudaMemcpyDeviceToHost); // 9. Scoring for (int i = 0; i < NUM_TEST; i++) { if (inferences[i] == test_label[i]) { (*cnt_correct)++; } } // WARNING: FREE 0 free(inferences); cudaFree(d_inferences); cudaFree(d_map_spill); cudaFree(d_test_data); cudaFree(d_c1_results); cudaFree(d_s2_results); cudaFree(d_c3_results); cudaFree(d_s4_results); cudaFree(d_f5_results); cudaFree(d_f6_results); cudaFree(d_output_results); return; }
34ae9c10725bd986099eeb6e3545a15dfb064b75.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <iostream> #include "caffe/common.hpp" #include "caffe/util/gpu_util.cuh" #include "deformable_im2col.hpp" using namespace std; namespace caffe { template <typename Dtype> __device__ Dtype deformable_im2col_bilinear(const Dtype* bottom_data, const int data_width, const int height, const int width, Dtype h, Dtype w) { int h_low = floor(h); int w_low = floor(w); int h_high; int w_high; if (h_low >= height - 1) { h_high = h_low = height - 1; h = (Dtype)h_low; } else { h_high = h_low + 1; } if (w_low >= width - 1) { w_high = w_low = width - 1; w = (Dtype)w_low; } else { w_high = w_low + 1; } Dtype lh = h - h_low; Dtype lw = w - w_low; Dtype hh = 1 - lh, hw = 1 - lw; Dtype v1 = bottom_data[h_low * data_width + w_low]; Dtype v2 = bottom_data[h_low * data_width + w_high]; Dtype v3 = bottom_data[h_high * data_width + w_low]; Dtype v4 = bottom_data[h_high * data_width + w_high]; Dtype w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; Dtype val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename Dtype> __device__ Dtype get_gradient_weight(Dtype argmax_h, Dtype argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { //empty return 0; } argmax_h = max(argmax_h, (Dtype)0.0f); argmax_w = max(argmax_w, (Dtype)0.0f); int argmax_h_low = (int)argmax_h; int argmax_w_low = (int)argmax_w; int argmax_h_high; int argmax_w_high; if (argmax_h_low >= height - 1) { argmax_h_high = argmax_h_low = height - 1; argmax_h = (Dtype)argmax_h_low; } else { argmax_h_high = argmax_h_low + 1; } if (argmax_w_low >= width - 1) { argmax_w_high = argmax_w_low = width - 1; argmax_w = (Dtype)argmax_w_low; } else { argmax_w_high = argmax_w_low + 1; } Dtype weight = 0; if (h == argmax_h_low) { if (w == argmax_w_low) { weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); } } else if (h == argmax_h_high) { if (w == argmax_w_low) { weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); } } return weight; } template <typename Dtype> __device__ Dtype get_coordinate_weight(Dtype argmax_h, Dtype argmax_w, const int height, const int width, const Dtype* im_data, const int data_width, const int bp_dir) { if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { //empty return 0; } if (argmax_h < 0) argmax_h = 0; if (argmax_w < 0) argmax_w = 0; int argmax_h_low = (int)argmax_h; int argmax_w_low = (int)argmax_w; int argmax_h_high; int argmax_w_high; if (argmax_h_low >= height - 1) { argmax_h_high = argmax_h_low = height - 1; argmax_h = (Dtype)argmax_h_low; } else { argmax_h_high = argmax_h_low + 1; } if (argmax_w_low >= width - 1) { argmax_w_high = argmax_w_low = width - 1; argmax_w = (Dtype)argmax_w_low; } else { argmax_w_high = argmax_w_low + 1; } Dtype weight = 0; if (bp_dir == 0) { weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } /*! * \brief deformable_im2col gpu kernel. * DO NOT call this directly. Use wrapper function im2col() instead; */ template <typename Dtype> __global__ void deformable_im2col_gpu_kernel(const int n, const Dtype* data_im, const Dtype* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, Dtype* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int c_im = (index / width_col) / height_col; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; Dtype* data_col_ptr = data_col; data_col_ptr += (c_col * height_col + h_col) * width_col + w_col; const Dtype* data_im_ptr = data_im; data_im_ptr += (c_im * height + h_in) * width + w_in;//0 const Dtype* data_offset_ptr = data_offset; data_offset_ptr += deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col;//0 for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; Dtype val = static_cast<Dtype>(0); const Dtype h_im = h_in + i * dilation_h + offset_h; const Dtype w_im = w_in + j * dilation_w + offset_w; if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { const Dtype map_h = i * dilation_h + offset_h; const Dtype map_w = j * dilation_w + offset_w; const int cur_height = height - h_in; const int cur_width = width - w_in; val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); } *data_col_ptr = val; data_col_ptr += height_col * width_col; } } } } template <typename Dtype> void deformable_im2col_gpu(const Dtype* data_im, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Dtype* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col; int channel_per_deformable_group = channels/ deformable_group; hipLaunchKernelGGL(( deformable_im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, data_offset, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, data_col); CUDA_POST_KERNEL_CHECK; } template void deformable_im2col_gpu<float>(const float* data_im, const float* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float* data_col); template void deformable_im2col_gpu<double>(const double* data_im, const double* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, double* data_col); template <typename Dtype> __global__ void deformable_col2im_gpu_kernel(const int n, const Dtype* data_col, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, int channel_per_deformable_group, int height_col, int width_col, Dtype* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col) % kernel_w; const int i = (index / width_col / height_col / kernel_w) % kernel_h; const int c = index / width_col / height_col / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; const Dtype cur_inv_h_data = h_in + i * dilation_h + offset_h; const Dtype cur_inv_w_data = w_in + j * dilation_w + offset_w; const Dtype cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1 ) { int cur_bottom_grad_pos = (c * height + cur_h + dy) * width + cur_w + dx; Dtype weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); caffe_gpu_atomic_add(weight * cur_top_grad, grad_im + cur_bottom_grad_pos); } } } } } template <typename Dtype> void deformable_col2im_gpu(const Dtype* data_col, const Dtype* data_offset, const int channels,const int height, const int width,const int num_kernels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Dtype* grad_im) { int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int channel_per_deformable_group = channels / deformable_group; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( deformable_col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_col, data_offset,channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, grad_im); CUDA_POST_KERNEL_CHECK; } template void deformable_col2im_gpu<float>(const float* data_col, const float* data_offset, const int channels, const int height, const int width,const int num_kernels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float* grad_im); template void deformable_col2im_gpu<double>(const double* data_col, const double* data_offset, const int channels, const int height, const int width, const int num_kernels,const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, double* grad_im); template <typename Dtype> __global__ void deformable_col2im_coord_gpu_kernel(const int n, const Dtype* data_col, const Dtype* data_im, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, Dtype* grad_offset) { CUDA_KERNEL_LOOP(index, n) { Dtype val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = index / width_col / height_col; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const Dtype* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * width_col * height_col; const Dtype* data_im_ptr = data_im + deformable_group_index * channel_per_deformable_group / kernel_h / kernel_w * height * width; const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = ((col_c * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col) % kernel_w; int i = (col_pos / width_col / height_col / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; Dtype inv_h = h_in + i * dilation_h + offset_h; Dtype inv_w = w_in + j * dilation_w + offset_w; if (inv_h < 0 || inv_w < 0 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -1; } const Dtype weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } template <typename Dtype> void deformable_col2im_coord_gpu(const Dtype* data_col, const Dtype* data_im, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Dtype* grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * kernel_h * kernel_h * deformable_group; int channel_per_deformable_group = channels/ deformable_group; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_col, data_im,data_offset, channels,height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, grad_offset); CUDA_POST_KERNEL_CHECK; } template void deformable_col2im_coord_gpu<float>(const float* data_col, const float* data_im,const float* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float* grad_offset); template void deformable_col2im_coord_gpu<double>(const double* data_col, const double* data_im,const double* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, double* grad_offset); }
34ae9c10725bd986099eeb6e3545a15dfb064b75.cu
#include <algorithm> #include <iostream> #include "caffe/common.hpp" #include "caffe/util/gpu_util.cuh" #include "deformable_im2col.hpp" using namespace std; namespace caffe { template <typename Dtype> __device__ Dtype deformable_im2col_bilinear(const Dtype* bottom_data, const int data_width, const int height, const int width, Dtype h, Dtype w) { int h_low = floor(h); int w_low = floor(w); int h_high; int w_high; if (h_low >= height - 1) { h_high = h_low = height - 1; h = (Dtype)h_low; } else { h_high = h_low + 1; } if (w_low >= width - 1) { w_high = w_low = width - 1; w = (Dtype)w_low; } else { w_high = w_low + 1; } Dtype lh = h - h_low; Dtype lw = w - w_low; Dtype hh = 1 - lh, hw = 1 - lw; Dtype v1 = bottom_data[h_low * data_width + w_low]; Dtype v2 = bottom_data[h_low * data_width + w_high]; Dtype v3 = bottom_data[h_high * data_width + w_low]; Dtype v4 = bottom_data[h_high * data_width + w_high]; Dtype w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; Dtype val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename Dtype> __device__ Dtype get_gradient_weight(Dtype argmax_h, Dtype argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { //empty return 0; } argmax_h = max(argmax_h, (Dtype)0.0f); argmax_w = max(argmax_w, (Dtype)0.0f); int argmax_h_low = (int)argmax_h; int argmax_w_low = (int)argmax_w; int argmax_h_high; int argmax_w_high; if (argmax_h_low >= height - 1) { argmax_h_high = argmax_h_low = height - 1; argmax_h = (Dtype)argmax_h_low; } else { argmax_h_high = argmax_h_low + 1; } if (argmax_w_low >= width - 1) { argmax_w_high = argmax_w_low = width - 1; argmax_w = (Dtype)argmax_w_low; } else { argmax_w_high = argmax_w_low + 1; } Dtype weight = 0; if (h == argmax_h_low) { if (w == argmax_w_low) { weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); } } else if (h == argmax_h_high) { if (w == argmax_w_low) { weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); } } return weight; } template <typename Dtype> __device__ Dtype get_coordinate_weight(Dtype argmax_h, Dtype argmax_w, const int height, const int width, const Dtype* im_data, const int data_width, const int bp_dir) { if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { //empty return 0; } if (argmax_h < 0) argmax_h = 0; if (argmax_w < 0) argmax_w = 0; int argmax_h_low = (int)argmax_h; int argmax_w_low = (int)argmax_w; int argmax_h_high; int argmax_w_high; if (argmax_h_low >= height - 1) { argmax_h_high = argmax_h_low = height - 1; argmax_h = (Dtype)argmax_h_low; } else { argmax_h_high = argmax_h_low + 1; } if (argmax_w_low >= width - 1) { argmax_w_high = argmax_w_low = width - 1; argmax_w = (Dtype)argmax_w_low; } else { argmax_w_high = argmax_w_low + 1; } Dtype weight = 0; if (bp_dir == 0) { weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } /*! * \brief deformable_im2col gpu kernel. * DO NOT call this directly. Use wrapper function im2col() instead; */ template <typename Dtype> __global__ void deformable_im2col_gpu_kernel(const int n, const Dtype* data_im, const Dtype* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, Dtype* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int c_im = (index / width_col) / height_col; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; Dtype* data_col_ptr = data_col; data_col_ptr += (c_col * height_col + h_col) * width_col + w_col; const Dtype* data_im_ptr = data_im; data_im_ptr += (c_im * height + h_in) * width + w_in;//0 const Dtype* data_offset_ptr = data_offset; data_offset_ptr += deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col;//0 for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; Dtype val = static_cast<Dtype>(0); const Dtype h_im = h_in + i * dilation_h + offset_h; const Dtype w_im = w_in + j * dilation_w + offset_w; if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { const Dtype map_h = i * dilation_h + offset_h; const Dtype map_w = j * dilation_w + offset_w; const int cur_height = height - h_in; const int cur_width = width - w_in; val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); } *data_col_ptr = val; data_col_ptr += height_col * width_col; } } } } template <typename Dtype> void deformable_im2col_gpu(const Dtype* data_im, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Dtype* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col; int channel_per_deformable_group = channels/ deformable_group; deformable_im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, data_offset, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, data_col); CUDA_POST_KERNEL_CHECK; } template void deformable_im2col_gpu<float>(const float* data_im, const float* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float* data_col); template void deformable_im2col_gpu<double>(const double* data_im, const double* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, double* data_col); template <typename Dtype> __global__ void deformable_col2im_gpu_kernel(const int n, const Dtype* data_col, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, int channel_per_deformable_group, int height_col, int width_col, Dtype* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col) % kernel_w; const int i = (index / width_col / height_col / kernel_w) % kernel_h; const int c = index / width_col / height_col / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; const Dtype cur_inv_h_data = h_in + i * dilation_h + offset_h; const Dtype cur_inv_w_data = w_in + j * dilation_w + offset_w; const Dtype cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1 ) { int cur_bottom_grad_pos = (c * height + cur_h + dy) * width + cur_w + dx; Dtype weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); caffe_gpu_atomic_add(weight * cur_top_grad, grad_im + cur_bottom_grad_pos); } } } } } template <typename Dtype> void deformable_col2im_gpu(const Dtype* data_col, const Dtype* data_offset, const int channels,const int height, const int width,const int num_kernels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Dtype* grad_im) { int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int channel_per_deformable_group = channels / deformable_group; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) deformable_col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_col, data_offset,channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, grad_im); CUDA_POST_KERNEL_CHECK; } template void deformable_col2im_gpu<float>(const float* data_col, const float* data_offset, const int channels, const int height, const int width,const int num_kernels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float* grad_im); template void deformable_col2im_gpu<double>(const double* data_col, const double* data_offset, const int channels, const int height, const int width, const int num_kernels,const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, double* grad_im); template <typename Dtype> __global__ void deformable_col2im_coord_gpu_kernel(const int n, const Dtype* data_col, const Dtype* data_im, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, Dtype* grad_offset) { CUDA_KERNEL_LOOP(index, n) { Dtype val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = index / width_col / height_col; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const Dtype* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * width_col * height_col; const Dtype* data_im_ptr = data_im + deformable_group_index * channel_per_deformable_group / kernel_h / kernel_w * height * width; const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = ((col_c * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col) % kernel_w; int i = (col_pos / width_col / height_col / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; Dtype inv_h = h_in + i * dilation_h + offset_h; Dtype inv_w = w_in + j * dilation_w + offset_w; if (inv_h < 0 || inv_w < 0 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -1; } const Dtype weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } template <typename Dtype> void deformable_col2im_coord_gpu(const Dtype* data_col, const Dtype* data_im, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Dtype* grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * kernel_h * kernel_h * deformable_group; int channel_per_deformable_group = channels/ deformable_group; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) deformable_col2im_coord_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_col, data_im,data_offset, channels,height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, grad_offset); CUDA_POST_KERNEL_CHECK; } template void deformable_col2im_coord_gpu<float>(const float* data_col, const float* data_im,const float* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float* grad_offset); template void deformable_col2im_coord_gpu<double>(const double* data_col, const double* data_im,const double* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, double* grad_offset); }
a18bf93ca1315113043b32802dcd8a2d942f1913.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // -------------------------------------------------------- // Multitask Network Cascade // Written by Haozhi Qi // Copyright (c) 2016, Haozhi Qi // Licensed under The MIT License [see LICENSE for details] // -------------------------------------------------------- #include "fast_rcnn_layers.hpp" #include <iostream> namespace caffe { template <typename Dtype> __global__ void MaskPoolingForward(const int nthreads, const Dtype* bottom_data, const Dtype* bottom_masks, Dtype* top_data, const int channels, const int height, const int width) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the top output int pw = index % width; int ph = (index / width) % height; // int c = (index / width / height) % channels; int n = index / width / height / channels; int mask_index = n * height * width + ph * width + pw; // top feature map has identical shape with bottom feature map, so we reuse index here top_data[index] = bottom_data[index] * bottom_masks[mask_index]; } } template <typename Dtype> void MaskPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // bottom[0] is feature maps, of shape (n x c x h x w) // bottom[1] is masks, of shape (n x 1 x h x w) // output(n, c, h, w) = input_feature(n, c, h, w) * input_mask(n, 1, h, w) const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_masks = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); hipLaunchKernelGGL(( MaskPoolingForward<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, count, bottom_data, bottom_masks, top_data, channels_, height_, width_); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaskPoolingBackwardFeature(const int nthreads, const Dtype* bottom_data, const Dtype* bottom_masks, Dtype* bottom_diff, const Dtype* top_diff, const int channels, const int height, const int width) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; // int c = (index / width / height) % channels; int n = index / width / height / channels; // output w,h coordinate has the same size with input's w,h coordinate int mask_index = n * height * width + h * width + w; Dtype float_mask = bottom_masks[mask_index]; bottom_diff[index] = top_diff[index] * float_mask; } } template <typename Dtype> __global__ void MaskPoolingBackwardMask(const int nthreads, const Dtype* bottom_data, Dtype* bottom_diff, const Dtype* top_diff, const int channels, const int height, const int width) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, w, h) are index of mask element, with channel dim = 1 int w = index % width; int h = (index / width) % height; int n = index / width / height / 1; Dtype gradient = 0.0; for (int i = 0; i < channels; ++i) { int data_index = ((n * channels + i) * height + h) * width + w; gradient += top_diff[data_index] * bottom_data[data_index]; } int mask_index = ((n * height) + h) * width + w; bottom_diff[mask_index] = gradient; } } template <typename Dtype> void MaskPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_masks = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); if (propagate_down[0]) { hipLaunchKernelGGL(( MaskPoolingBackwardFeature<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, count, bottom_data, bottom_masks, bottom_diff, top_diff, channels_, height_, width_); } Dtype* bottom_mask_diff = bottom[1]->mutable_gpu_diff(); count = bottom[1]->count(); if (propagate_down[1]) { hipLaunchKernelGGL(( MaskPoolingBackwardMask<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, count, bottom_data, bottom_mask_diff, top_diff, channels_, height_, width_); } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(MaskPoolingLayer); } // namespace caffe
a18bf93ca1315113043b32802dcd8a2d942f1913.cu
// -------------------------------------------------------- // Multitask Network Cascade // Written by Haozhi Qi // Copyright (c) 2016, Haozhi Qi // Licensed under The MIT License [see LICENSE for details] // -------------------------------------------------------- #include "fast_rcnn_layers.hpp" #include <iostream> namespace caffe { template <typename Dtype> __global__ void MaskPoolingForward(const int nthreads, const Dtype* bottom_data, const Dtype* bottom_masks, Dtype* top_data, const int channels, const int height, const int width) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the top output int pw = index % width; int ph = (index / width) % height; // int c = (index / width / height) % channels; int n = index / width / height / channels; int mask_index = n * height * width + ph * width + pw; // top feature map has identical shape with bottom feature map, so we reuse index here top_data[index] = bottom_data[index] * bottom_masks[mask_index]; } } template <typename Dtype> void MaskPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // bottom[0] is feature maps, of shape (n x c x h x w) // bottom[1] is masks, of shape (n x 1 x h x w) // output(n, c, h, w) = input_feature(n, c, h, w) * input_mask(n, 1, h, w) const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_masks = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); MaskPoolingForward<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >>> (count, bottom_data, bottom_masks, top_data, channels_, height_, width_); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaskPoolingBackwardFeature(const int nthreads, const Dtype* bottom_data, const Dtype* bottom_masks, Dtype* bottom_diff, const Dtype* top_diff, const int channels, const int height, const int width) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; // int c = (index / width / height) % channels; int n = index / width / height / channels; // output w,h coordinate has the same size with input's w,h coordinate int mask_index = n * height * width + h * width + w; Dtype float_mask = bottom_masks[mask_index]; bottom_diff[index] = top_diff[index] * float_mask; } } template <typename Dtype> __global__ void MaskPoolingBackwardMask(const int nthreads, const Dtype* bottom_data, Dtype* bottom_diff, const Dtype* top_diff, const int channels, const int height, const int width) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, w, h) are index of mask element, with channel dim = 1 int w = index % width; int h = (index / width) % height; int n = index / width / height / 1; Dtype gradient = 0.0; for (int i = 0; i < channels; ++i) { int data_index = ((n * channels + i) * height + h) * width + w; gradient += top_diff[data_index] * bottom_data[data_index]; } int mask_index = ((n * height) + h) * width + w; bottom_diff[mask_index] = gradient; } } template <typename Dtype> void MaskPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_masks = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); if (propagate_down[0]) { MaskPoolingBackwardFeature<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >>> (count, bottom_data, bottom_masks, bottom_diff, top_diff, channels_, height_, width_); } Dtype* bottom_mask_diff = bottom[1]->mutable_gpu_diff(); count = bottom[1]->count(); if (propagate_down[1]) { MaskPoolingBackwardMask<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >>> (count, bottom_data, bottom_mask_diff, top_diff, channels_, height_, width_); } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(MaskPoolingLayer); } // namespace caffe
8440fba50596ef6f37f7417adffc1db9209c5c48.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #ifndef __HIPCC__ #define __HIPCC__ #endif #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/hip_runtime.h> #include <hip/device_functions.h> #include <hip/hip_runtime_api.h> #define LEN_M 4 #define LEN_N 4 #define LEN_K 3 #define TILE_WIDTH 2 __global__ void MatrixMulOnDevice(int m, int n, int k, float* A, float* B, float* C) { int Row = blockIdx.y*blockDim.y + threadIdx.y; int Col = blockIdx.x*blockDim.x + threadIdx.x; if ((Row < m) && (Col < k)) { float Cvalue = 0.0; for (int i = 0; i < n; ++i) Cvalue += A[Row*n + i] * B[Col + i*k]; C[Row*k + Col] = Cvalue; } } int main() { // Allocate and initialize the matrices A, B, C float * A, *B, *C; A = (float*)malloc(LEN_M*LEN_N * sizeof(float)); B = (float*)malloc(LEN_N*LEN_K * sizeof(float)); C = (float*)malloc(LEN_M*LEN_K * sizeof(float)); for (int i = 0; i<LEN_M*LEN_N; i++) A[i] = i; for (int i = 0; i<LEN_N*LEN_K; i++) B[i] = i; for (int i = 0; i<LEN_M*LEN_K; i++) C[i] = 0.0; // I/O to read the input matrices A and B float * dev_A, *dev_B, *dev_C; hipMalloc((void**)&dev_A, LEN_M*LEN_N * sizeof(float)); hipMalloc((void**)&dev_B, LEN_N*LEN_K * sizeof(float)); hipMalloc((void**)&dev_C, LEN_M*LEN_K * sizeof(float)); hipMemcpy(dev_A, A, LEN_M*LEN_N * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_B, B, LEN_N*LEN_K * sizeof(float), hipMemcpyHostToDevice); // A*B on the device dim3 dimGrid((LEN_K - 1) / TILE_WIDTH + 1, (LEN_M - 1) / TILE_WIDTH + 1); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); MatrixMulOnDevice << <dimGrid, dimBlock >> >(LEN_M, LEN_N, LEN_K, dev_A, dev_B, dev_C); // I/O to write the output matrix C hipMemcpy(C, dev_C, LEN_M*LEN_K * sizeof(float), hipMemcpyDeviceToHost); printf("C:"); for (int i = 0; i<LEN_M*LEN_K; i++) printf(" %6.1f", C[i]); printf("\n"); // Free matrices A, B, C hipFree(dev_A); hipFree(dev_B); hipFree(dev_C); free(A); free(B); free(C); return 0; }
8440fba50596ef6f37f7417adffc1db9209c5c48.cu
#include <cuda_runtime_api.h> #include <cuda.h> #include <stdio.h> #include <stdlib.h> #ifndef __CUDACC__ #define __CUDACC__ #endif #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda.h> #include <device_functions.h> #include <cuda_runtime_api.h> #define LEN_M 4 #define LEN_N 4 #define LEN_K 3 #define TILE_WIDTH 2 __global__ void MatrixMulOnDevice(int m, int n, int k, float* A, float* B, float* C) { int Row = blockIdx.y*blockDim.y + threadIdx.y; int Col = blockIdx.x*blockDim.x + threadIdx.x; if ((Row < m) && (Col < k)) { float Cvalue = 0.0; for (int i = 0; i < n; ++i) Cvalue += A[Row*n + i] * B[Col + i*k]; C[Row*k + Col] = Cvalue; } } int main() { // Allocate and initialize the matrices A, B, C float * A, *B, *C; A = (float*)malloc(LEN_M*LEN_N * sizeof(float)); B = (float*)malloc(LEN_N*LEN_K * sizeof(float)); C = (float*)malloc(LEN_M*LEN_K * sizeof(float)); for (int i = 0; i<LEN_M*LEN_N; i++) A[i] = i; for (int i = 0; i<LEN_N*LEN_K; i++) B[i] = i; for (int i = 0; i<LEN_M*LEN_K; i++) C[i] = 0.0; // I/O to read the input matrices A and B float * dev_A, *dev_B, *dev_C; cudaMalloc((void**)&dev_A, LEN_M*LEN_N * sizeof(float)); cudaMalloc((void**)&dev_B, LEN_N*LEN_K * sizeof(float)); cudaMalloc((void**)&dev_C, LEN_M*LEN_K * sizeof(float)); cudaMemcpy(dev_A, A, LEN_M*LEN_N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_B, B, LEN_N*LEN_K * sizeof(float), cudaMemcpyHostToDevice); // A*B on the device dim3 dimGrid((LEN_K - 1) / TILE_WIDTH + 1, (LEN_M - 1) / TILE_WIDTH + 1); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); MatrixMulOnDevice << <dimGrid, dimBlock >> >(LEN_M, LEN_N, LEN_K, dev_A, dev_B, dev_C); // I/O to write the output matrix C cudaMemcpy(C, dev_C, LEN_M*LEN_K * sizeof(float), cudaMemcpyDeviceToHost); printf("C:"); for (int i = 0; i<LEN_M*LEN_K; i++) printf(" %6.1f", C[i]); printf("\n"); // Free matrices A, B, C cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); free(A); free(B); free(C); return 0; }
0d02e2cb03622978c07d1a42732100f6b398044e.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCStorageCopy.cu" #else void THCStorage_(rawCopy)(THCState *state, THCStorage *self, real *src) { THCudaCheck(hipMemcpyAsync(self->data, src, self->size * sizeof(real), hipMemcpyDeviceToDevice, THCState_getCurrentStream(state))); } void THCStorage_(copy)(THCState *state, THCStorage *self, THCStorage *src) { THArgCheck(self->size == src->size, 2, "size does not match"); THCudaCheck(hipMemcpyAsync(self->data, src->data, self->size * sizeof(real), hipMemcpyDeviceToDevice, THCState_getCurrentStream(state))); } void THCStorage_(copyCuda)(THCState *state, THCStorage *self, THCStorage *src) { THArgCheck(self->size == src->size, 2, "size does not match"); THCudaCheck(hipMemcpyAsync(self->data, src->data, self->size * sizeof(real), hipMemcpyDeviceToDevice, THCState_getCurrentStream(state))); } // conversions are mediated by the CPU // yes, this is slow; feel free to write CUDA kernels for this #ifndef THC_REAL_IS_HALF #define THC_CUDA_STORAGE_IMPLEMENT_COPY(TYPEC,TYPECUDA) \ void THCStorage_(copyCuda##TYPEC)(THCState *state, THCStorage *self, struct THCuda##TYPECUDA##Storage *src) \ { \ if(THCTypeIdx_(Real) == THCTypeIdx_(TYPEC)) { \ THCStorage_(copy)(state, self, (THCStorage*) src); /* cast just removes compiler warning */ \ } else { \ THArgCheck(self->size == src->size, 2, "size does not match"); \ TH##TYPEC##Storage *buffer1 = TH##TYPEC##Storage_newWithSize(src->size); \ THStorage *buffer2 = THStorage_(newWithSize)(src->size); \ TH##TYPEC##Storage_copyCuda(state, buffer1, src); \ THStorage_(copy##TYPEC)(buffer2, buffer1); \ THCStorage_(copyCPU)(state, self, buffer2); \ TH##TYPEC##Storage_free(buffer1); \ THStorage_(free)(buffer2); \ } \ } #else #define THC_CUDA_STORAGE_IMPLEMENT_COPY(TYPEC,TYPECUDA) \ void THCStorage_(copyCuda##TYPEC)(THCState *state, THCStorage *self, struct THCuda##TYPECUDA##Storage *src) \ { \ THArgCheck(self->size == src->size, 2, "size does not match"); \ if(THCTypeIdx_(TYPEC) == THCTypeIdxFloat) { \ THCFloat2Half(state, self->data, (float*) src->data, src->size); /* cast removes compiler error */ \ } else { \ THCudaStorage *buffer = THCudaStorage_newWithSize(state, src->size); \ THCudaStorage_copyCuda##TYPEC(state, buffer, src); \ THCFloat2Half(state, self->data, buffer->data, buffer->size); \ THCudaStorage_free(state, buffer); \ } \ } #endif THC_CUDA_STORAGE_IMPLEMENT_COPY(Byte,Byte) THC_CUDA_STORAGE_IMPLEMENT_COPY(Char,Char) THC_CUDA_STORAGE_IMPLEMENT_COPY(Short,Short) THC_CUDA_STORAGE_IMPLEMENT_COPY(Int,Int) THC_CUDA_STORAGE_IMPLEMENT_COPY(Long,Long) THC_CUDA_STORAGE_IMPLEMENT_COPY(Float,) // i.e. float THC_CUDA_STORAGE_IMPLEMENT_COPY(Double,Double) #if TORCH_HIP_VERSION >= 7050 #define FLOAT_COPY(TYPE) TH_CONCAT_3(TH, CReal, Storage_copyCudaFloat) void THCStorage_(copyCudaHalf)(THCState *state, THCStorage *self, struct THCudaHalfStorage *src) { if(THCTypeIdx_(Real) == THCTypeIdxHalf) { THCStorage_(copy)(state, self, (THCStorage*) src); /* cast just removes compiler warning */ } else { THArgCheck(self->size == src->size, 2, "size does not match"); THCudaStorage *buffer = THCudaStorage_newWithSize(state, src->size); THCHalf2Float(state, buffer->data, src->data, src->size); FLOAT_COPY(Real)(state, self, buffer); THCudaStorage_free(state, buffer); } } #undef FLOAT_COPY #endif // TORCH_HIP_VERSION >= 7050 #undef THC_CUDA_STORAGE_IMPLEMENT_COPY #endif
0d02e2cb03622978c07d1a42732100f6b398044e.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCStorageCopy.cu" #else void THCStorage_(rawCopy)(THCState *state, THCStorage *self, real *src) { THCudaCheck(cudaMemcpyAsync(self->data, src, self->size * sizeof(real), cudaMemcpyDeviceToDevice, THCState_getCurrentStream(state))); } void THCStorage_(copy)(THCState *state, THCStorage *self, THCStorage *src) { THArgCheck(self->size == src->size, 2, "size does not match"); THCudaCheck(cudaMemcpyAsync(self->data, src->data, self->size * sizeof(real), cudaMemcpyDeviceToDevice, THCState_getCurrentStream(state))); } void THCStorage_(copyCuda)(THCState *state, THCStorage *self, THCStorage *src) { THArgCheck(self->size == src->size, 2, "size does not match"); THCudaCheck(cudaMemcpyAsync(self->data, src->data, self->size * sizeof(real), cudaMemcpyDeviceToDevice, THCState_getCurrentStream(state))); } // conversions are mediated by the CPU // yes, this is slow; feel free to write CUDA kernels for this #ifndef THC_REAL_IS_HALF #define THC_CUDA_STORAGE_IMPLEMENT_COPY(TYPEC,TYPECUDA) \ void THCStorage_(copyCuda##TYPEC)(THCState *state, THCStorage *self, struct THCuda##TYPECUDA##Storage *src) \ { \ if(THCTypeIdx_(Real) == THCTypeIdx_(TYPEC)) { \ THCStorage_(copy)(state, self, (THCStorage*) src); /* cast just removes compiler warning */ \ } else { \ THArgCheck(self->size == src->size, 2, "size does not match"); \ TH##TYPEC##Storage *buffer1 = TH##TYPEC##Storage_newWithSize(src->size); \ THStorage *buffer2 = THStorage_(newWithSize)(src->size); \ TH##TYPEC##Storage_copyCuda(state, buffer1, src); \ THStorage_(copy##TYPEC)(buffer2, buffer1); \ THCStorage_(copyCPU)(state, self, buffer2); \ TH##TYPEC##Storage_free(buffer1); \ THStorage_(free)(buffer2); \ } \ } #else #define THC_CUDA_STORAGE_IMPLEMENT_COPY(TYPEC,TYPECUDA) \ void THCStorage_(copyCuda##TYPEC)(THCState *state, THCStorage *self, struct THCuda##TYPECUDA##Storage *src) \ { \ THArgCheck(self->size == src->size, 2, "size does not match"); \ if(THCTypeIdx_(TYPEC) == THCTypeIdxFloat) { \ THCFloat2Half(state, self->data, (float*) src->data, src->size); /* cast removes compiler error */ \ } else { \ THCudaStorage *buffer = THCudaStorage_newWithSize(state, src->size); \ THCudaStorage_copyCuda##TYPEC(state, buffer, src); \ THCFloat2Half(state, self->data, buffer->data, buffer->size); \ THCudaStorage_free(state, buffer); \ } \ } #endif THC_CUDA_STORAGE_IMPLEMENT_COPY(Byte,Byte) THC_CUDA_STORAGE_IMPLEMENT_COPY(Char,Char) THC_CUDA_STORAGE_IMPLEMENT_COPY(Short,Short) THC_CUDA_STORAGE_IMPLEMENT_COPY(Int,Int) THC_CUDA_STORAGE_IMPLEMENT_COPY(Long,Long) THC_CUDA_STORAGE_IMPLEMENT_COPY(Float,) // i.e. float THC_CUDA_STORAGE_IMPLEMENT_COPY(Double,Double) #if CUDA_VERSION >= 7050 #define FLOAT_COPY(TYPE) TH_CONCAT_3(TH, CReal, Storage_copyCudaFloat) void THCStorage_(copyCudaHalf)(THCState *state, THCStorage *self, struct THCudaHalfStorage *src) { if(THCTypeIdx_(Real) == THCTypeIdxHalf) { THCStorage_(copy)(state, self, (THCStorage*) src); /* cast just removes compiler warning */ } else { THArgCheck(self->size == src->size, 2, "size does not match"); THCudaStorage *buffer = THCudaStorage_newWithSize(state, src->size); THCHalf2Float(state, buffer->data, src->data, src->size); FLOAT_COPY(Real)(state, self, buffer); THCudaStorage_free(state, buffer); } } #undef FLOAT_COPY #endif // CUDA_VERSION >= 7050 #undef THC_CUDA_STORAGE_IMPLEMENT_COPY #endif
c66ef22cfe0322d5f9051fcf96f90022367c587e.hip
// !!! This is a file automatically generated by hipify!!! /* * Please write your name and net ID below * * Last name: * First name: * Net ID: * */ /* * This file contains the code for doing the heat distribution problem. * You do not need to modify anything except starting gpu_heat_dist() at the bottom * of this file. * In gpu_heat_dist() you can organize your data structure and the call to your * kernel(s) that you need to write too. * * You compile with: * nvcc -o heatdist heatdist.cu */ #include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #include <time.h> /* To index element (i,j) of a 2D array stored as 1D */ #define index(i, j, N) ((i)*(N)) + (j) #include <iostream> using namespace std; /*****************************************************************/ // Function declarations: Feel free to add any functions you want. void seq_heat_dist(float *, unsigned int, unsigned int); void gpu_heat_dist(float *, unsigned int, unsigned int); /*****************************************************************/ int main(int argc, char * argv[]) { // printf("asdfasdfasdfasfd\n"); // std::cout<<"adfasdf============================================"<<std::endl; unsigned int N; /* Dimention of NxN matrix */ int type_of_device = 0; // CPU or GPU int iterations = 0; int i; /* The 2D array of points will be treated as 1D array of NxN elements */ float * playground; // to measure time taken by a specific part of the code double time_taken; clock_t start, end; if(argc != 4) { fprintf(stderr, "usage: heatdist num iterations who\n"); fprintf(stderr, "num = dimension of the square matrix (50 and up)\n"); fprintf(stderr, "iterations = number of iterations till stopping (1 and up)\n"); fprintf(stderr, "who = 0: sequential code on CPU, 1: GPU execution\n"); exit(1); } type_of_device = atoi(argv[3]); N = (unsigned int) atoi(argv[1]); iterations = (unsigned int) atoi(argv[2]); /* Dynamically allocate NxN array of floats */ playground = (float *)calloc(N*N, sizeof(float)); if( !playground ) { fprintf(stderr, " Cannot allocate the %u x %u array\n", N, N); exit(1); } /* Initialize it: calloc already initalized everything to 0 */ // Edge elements to 80F for(i = 0; i < N; i++) playground[index(0,i,N)] = 80; for(i = 0; i < N; i++) playground[index(i,0,N)] = 80; for(i = 0; i < N; i++) playground[index(i,N-1, N)] = 80; for(i = 0; i < N; i++) playground[index(N-1,i,N)] = 80; // from (0,10) to (0,30) inclusive are 150F for(i = 10; i <= 30; i++) playground[index(0,i,N)] = 150; if( !type_of_device ) // The CPU sequential version { start = clock(); seq_heat_dist(playground, N, iterations); end = clock(); } else // The GPU version { start = clock(); gpu_heat_dist(playground, N, iterations); end = clock(); } time_taken = ((double)(end - start))/ CLOCKS_PER_SEC; printf("Time taken for %s is %lf\n", type_of_device == 0? "CPU" : "GPU", time_taken); free(playground); return 0; } /***************** The CPU sequential version (DO NOT CHANGE THAT) **************/ void seq_heat_dist(float * playground, unsigned int N, unsigned int iterations) { // Loop indices int i, j, k; int upper = N-1; // number of bytes to be copied between array temp and array playground unsigned int num_bytes = 0; float * temp; /* Dynamically allocate another array for temp values */ /* Dynamically allocate NxN array of floats */ temp = (float *)calloc(N*N, sizeof(float)); if( !temp ) { fprintf(stderr, " Cannot allocate temp %u x %u array\n", N, N); exit(1); } num_bytes = N*N*sizeof(float); /* Copy initial array in temp */ memcpy((void *)temp, (void *) playground, num_bytes); for( k = 0; k < iterations; k++) { /* Calculate new values and store them in temp */ for(i = 1; i < upper; i++) for(j = 1; j < upper; j++) temp[index(i,j,N)] = (playground[index(i-1,j,N)] + playground[index(i+1,j,N)] + playground[index(i,j-1,N)] + playground[index(i,j+1,N)])/4.0; /* Move new values into old values */ memcpy((void *)playground, (void *) temp, num_bytes); } } /***************** The GPU version: Write your code here *********************/ /* This function can call one or more kenels *********************************/ // __global__ void testLoop(float * tempGround, float * playground, unsigned int N, unsigned int iterations); __global__ void testLoop(float * tempGround, int intN); void gpu_heat_dist(float * playground, unsigned int N, unsigned int iterations) { cout<<"~~~in gpu_heat_dist"<<endl; int numElements = N*N; size_t groundSize = numElements * sizeof(float); float *h_temp = (float *) malloc(groundSize); float *d_temp; float *h_tempResult = (float *)malloc(groundSize); hipMalloc((void **)&d_temp, groundSize); hipMemcpy(d_temp, h_temp, groundSize, hipMemcpyHostToDevice); /////////////////////////////////// int threadNum = 256; int blockNum = (N + threadNum -1)/threadNum; //hipLaunchKernelGGL(( testLoop), dim3(blockNum), dim3(threadNum) , 0, 0, d_temp, numElements); hipLaunchKernelGGL(( testLoop), dim3(blockNum), dim3(threadNum) , 0, 0, d_temp, N); hipMemcpy(h_tempResult, d_temp, groundSize, hipMemcpyDeviceToHost); for(int i = 0; i < N*N; i++) { cout<<"i-> "<<i <<" value->"<< h_tempResult[i]<<endl; } cout<<"N is "<<N<<endl; hipFree(d_temp); } __global__ void testLoop(float * tempGround, int intN) { int ix = threadIdx.x + blockDim.x*blockIdx.x; // int iy = threadIdx.y + blockDim.y*blockIdx.y; if(ix < intN) { for(int i = 0; i < intN; i++) { // tempGround[ix*intN + i] = (float) ix+ 0.777; tempGround[ix*intN + i] = (float) ix+ 0.777; } } // tempGround[ix*10+iy] = (float) iy+ 0.777; }
c66ef22cfe0322d5f9051fcf96f90022367c587e.cu
/* * Please write your name and net ID below * * Last name: * First name: * Net ID: * */ /* * This file contains the code for doing the heat distribution problem. * You do not need to modify anything except starting gpu_heat_dist() at the bottom * of this file. * In gpu_heat_dist() you can organize your data structure and the call to your * kernel(s) that you need to write too. * * You compile with: * nvcc -o heatdist heatdist.cu */ #include <cuda.h> #include <stdlib.h> #include <stdio.h> #include <time.h> /* To index element (i,j) of a 2D array stored as 1D */ #define index(i, j, N) ((i)*(N)) + (j) #include <iostream> using namespace std; /*****************************************************************/ // Function declarations: Feel free to add any functions you want. void seq_heat_dist(float *, unsigned int, unsigned int); void gpu_heat_dist(float *, unsigned int, unsigned int); /*****************************************************************/ int main(int argc, char * argv[]) { // printf("asdfasdfasdfasfd\n"); // std::cout<<"adfasdf============================================"<<std::endl; unsigned int N; /* Dimention of NxN matrix */ int type_of_device = 0; // CPU or GPU int iterations = 0; int i; /* The 2D array of points will be treated as 1D array of NxN elements */ float * playground; // to measure time taken by a specific part of the code double time_taken; clock_t start, end; if(argc != 4) { fprintf(stderr, "usage: heatdist num iterations who\n"); fprintf(stderr, "num = dimension of the square matrix (50 and up)\n"); fprintf(stderr, "iterations = number of iterations till stopping (1 and up)\n"); fprintf(stderr, "who = 0: sequential code on CPU, 1: GPU execution\n"); exit(1); } type_of_device = atoi(argv[3]); N = (unsigned int) atoi(argv[1]); iterations = (unsigned int) atoi(argv[2]); /* Dynamically allocate NxN array of floats */ playground = (float *)calloc(N*N, sizeof(float)); if( !playground ) { fprintf(stderr, " Cannot allocate the %u x %u array\n", N, N); exit(1); } /* Initialize it: calloc already initalized everything to 0 */ // Edge elements to 80F for(i = 0; i < N; i++) playground[index(0,i,N)] = 80; for(i = 0; i < N; i++) playground[index(i,0,N)] = 80; for(i = 0; i < N; i++) playground[index(i,N-1, N)] = 80; for(i = 0; i < N; i++) playground[index(N-1,i,N)] = 80; // from (0,10) to (0,30) inclusive are 150F for(i = 10; i <= 30; i++) playground[index(0,i,N)] = 150; if( !type_of_device ) // The CPU sequential version { start = clock(); seq_heat_dist(playground, N, iterations); end = clock(); } else // The GPU version { start = clock(); gpu_heat_dist(playground, N, iterations); end = clock(); } time_taken = ((double)(end - start))/ CLOCKS_PER_SEC; printf("Time taken for %s is %lf\n", type_of_device == 0? "CPU" : "GPU", time_taken); free(playground); return 0; } /***************** The CPU sequential version (DO NOT CHANGE THAT) **************/ void seq_heat_dist(float * playground, unsigned int N, unsigned int iterations) { // Loop indices int i, j, k; int upper = N-1; // number of bytes to be copied between array temp and array playground unsigned int num_bytes = 0; float * temp; /* Dynamically allocate another array for temp values */ /* Dynamically allocate NxN array of floats */ temp = (float *)calloc(N*N, sizeof(float)); if( !temp ) { fprintf(stderr, " Cannot allocate temp %u x %u array\n", N, N); exit(1); } num_bytes = N*N*sizeof(float); /* Copy initial array in temp */ memcpy((void *)temp, (void *) playground, num_bytes); for( k = 0; k < iterations; k++) { /* Calculate new values and store them in temp */ for(i = 1; i < upper; i++) for(j = 1; j < upper; j++) temp[index(i,j,N)] = (playground[index(i-1,j,N)] + playground[index(i+1,j,N)] + playground[index(i,j-1,N)] + playground[index(i,j+1,N)])/4.0; /* Move new values into old values */ memcpy((void *)playground, (void *) temp, num_bytes); } } /***************** The GPU version: Write your code here *********************/ /* This function can call one or more kenels *********************************/ // __global__ void testLoop(float * tempGround, float * playground, unsigned int N, unsigned int iterations); __global__ void testLoop(float * tempGround, int intN); void gpu_heat_dist(float * playground, unsigned int N, unsigned int iterations) { cout<<"~~~in gpu_heat_dist"<<endl; int numElements = N*N; size_t groundSize = numElements * sizeof(float); float *h_temp = (float *) malloc(groundSize); float *d_temp; float *h_tempResult = (float *)malloc(groundSize); cudaMalloc((void **)&d_temp, groundSize); cudaMemcpy(d_temp, h_temp, groundSize, cudaMemcpyHostToDevice); /////////////////////////////////// int threadNum = 256; int blockNum = (N + threadNum -1)/threadNum; // testLoop<<<blockNum, threadNum >>>(d_temp, numElements); testLoop<<<blockNum, threadNum >>>(d_temp, N); cudaMemcpy(h_tempResult, d_temp, groundSize, cudaMemcpyDeviceToHost); for(int i = 0; i < N*N; i++) { cout<<"i-> "<<i <<" value->"<< h_tempResult[i]<<endl; } cout<<"N is "<<N<<endl; cudaFree(d_temp); } __global__ void testLoop(float * tempGround, int intN) { int ix = threadIdx.x + blockDim.x*blockIdx.x; // int iy = threadIdx.y + blockDim.y*blockIdx.y; if(ix < intN) { for(int i = 0; i < intN; i++) { // tempGround[ix*intN + i] = (float) ix+ 0.777; tempGround[ix*intN + i] = (float) ix+ 0.777; } } // tempGround[ix*10+iy] = (float) iy+ 0.777; }
e9b72cc5470d5bebba482958b2f6185338942f56.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <math.h> #include <stdint.h> #include <vector> #include "include/hip/hip_fp16.h" #include "accumulate_n_v2_impl_hip.cuh" template <typename T> __global__ void AccumulateNV2(const size_t size, const size_t n, T **inputs, T *output) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { T temp = 0; for (size_t num = 0; num < n; num++) { temp += inputs[num][pos]; } output[pos] = temp; } return; } template <typename T> hipError_t CalAccumulateNV2(const size_t size, const size_t n, T **inputs, T *output, const uint32_t &device_id, hipStream_t cuda_stream) { hipLaunchKernelGGL(( AccumulateNV2), dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size, n, inputs, output); CHECK_CUDA_LAUNCH_SUCCESS(); } template CUDA_LIB_EXPORT hipError_t CalAccumulateNV2<uint8_t>(const size_t size, const size_t n, uint8_t **inputs, uint8_t *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalAccumulateNV2<int8_t>(const size_t size, const size_t n, int8_t **inputs, int8_t *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalAccumulateNV2<int32_t>(const size_t size, const size_t n, int32_t **inputs, int32_t *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalAccumulateNV2<half>(const size_t size, const size_t n, half **inputs, half *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalAccumulateNV2<float>(const size_t size, const size_t n, float **inputs, float *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CalAccumulateNV2<double>(const size_t size, const size_t n, double **inputs, double *output, const uint32_t &device_id, hipStream_t cuda_stream);
e9b72cc5470d5bebba482958b2f6185338942f56.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <math.h> #include <stdint.h> #include <vector> #include "include/cuda_fp16.h" #include "accumulate_n_v2_impl.cuh" template <typename T> __global__ void AccumulateNV2(const size_t size, const size_t n, T **inputs, T *output) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { T temp = 0; for (size_t num = 0; num < n; num++) { temp += inputs[num][pos]; } output[pos] = temp; } return; } template <typename T> cudaError_t CalAccumulateNV2(const size_t size, const size_t n, T **inputs, T *output, const uint32_t &device_id, cudaStream_t cuda_stream) { AccumulateNV2<<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0, cuda_stream>>>(size, n, inputs, output); CHECK_CUDA_LAUNCH_SUCCESS(); } template CUDA_LIB_EXPORT cudaError_t CalAccumulateNV2<uint8_t>(const size_t size, const size_t n, uint8_t **inputs, uint8_t *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalAccumulateNV2<int8_t>(const size_t size, const size_t n, int8_t **inputs, int8_t *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalAccumulateNV2<int32_t>(const size_t size, const size_t n, int32_t **inputs, int32_t *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalAccumulateNV2<half>(const size_t size, const size_t n, half **inputs, half *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalAccumulateNV2<float>(const size_t size, const size_t n, float **inputs, float *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CalAccumulateNV2<double>(const size_t size, const size_t n, double **inputs, double *output, const uint32_t &device_id, cudaStream_t cuda_stream);
b67127d50af15eb1a8f30570f3958d476c8cd8a2.hip
// !!! This is a file automatically generated by hipify!!! #include "count.cuh" #include <cstdlib> #include <iostream> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/host_vector.h> #include <time.h> using namespace std; int main(int argc, const char *argv[]) { string N; if (argc > 1) { N = string(argv[1]); } int n = atoi(N.c_str()); thrust::host_vector<int> H(n); srand((unsigned)time(NULL)); for (int i = 0; i < n; i++) { H[i] = (rand() % 101); } thrust::device_vector<int> d_in = H; thrust::device_vector<int> values(n); thrust::device_vector<int> counts(n); hipEvent_t start; hipEvent_t stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); count(d_in, values, counts); hipEventRecord(stop); hipEventSynchronize(stop); // Get the elapsed time in milliseconds float ms; hipEventElapsedTime(&ms, start, stop); cout << values[values.size() - 1] << endl; cout << counts[counts.size() - 1] << endl; cout << ms << endl; return 0; }
b67127d50af15eb1a8f30570f3958d476c8cd8a2.cu
#include "count.cuh" #include <cstdlib> #include <iostream> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/host_vector.h> #include <time.h> using namespace std; int main(int argc, const char *argv[]) { string N; if (argc > 1) { N = string(argv[1]); } int n = atoi(N.c_str()); thrust::host_vector<int> H(n); srand((unsigned)time(NULL)); for (int i = 0; i < n; i++) { H[i] = (rand() % 101); } thrust::device_vector<int> d_in = H; thrust::device_vector<int> values(n); thrust::device_vector<int> counts(n); cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); count(d_in, values, counts); cudaEventRecord(stop); cudaEventSynchronize(stop); // Get the elapsed time in milliseconds float ms; cudaEventElapsedTime(&ms, start, stop); cout << values[values.size() - 1] << endl; cout << counts[counts.size() - 1] << endl; cout << ms << endl; return 0; }
ee2666ef3cf1b6a652f54676c5bfdeaf23da2517.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "matmul_integer.cuh" #include <hipcub/hipcub.hpp> #include "core/providers/cuda/cu_inc/common.cuh" namespace onnxruntime { namespace cuda { template <int TPB> __global__ void ReduceRowSumOnMatrixAKernel(const int8_t* matrix, int32_t* row_sum, const int8_t offset, int32_t K) { int32_t thread_data = 0; const int8_t* row_ptr = matrix + blockIdx.x * K; for (int i = threadIdx.x; i < K; i += TPB) { thread_data += *(row_ptr + i); } using BlockReduce = hipcub::BlockReduce<int32_t, TPB>; __shared__ typename BlockReduce::TempStorage temp_storage; int32_t sum = BlockReduce(temp_storage).Sum(thread_data); if (threadIdx.x == 0) { row_sum[blockIdx.x] = offset * sum; } } Status ReduceRowSumOnMatrixA(hipStream_t stream, const int8_t* matrix, int32_t* row_sum, const int8_t offset, const MatMulComputeHelper& helper) { for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) { hipLaunchKernelGGL(( ReduceRowSumOnMatrixAKernel<static_cast<int>(GridDim::maxThreadsPerBlock)>), dim3(static_cast<int>(helper.M())), dim3(GridDim::maxThreadsPerBlock), 0, stream, matrix + helper.LeftOffsets()[batch], row_sum + batch * helper.M(), offset, static_cast<int>(helper.K())); } return CUDA_CALL(hipGetLastError()); } template <int TPB> __global__ void ReduceColSumOnMatrixBKernel(const int8_t* matrix, int32_t* col_sum, const int8_t offset, int32_t row, int32_t col) { int32_t thread_data = 0; const int8_t* col_ptr = matrix + blockIdx.x; for (int i = threadIdx.x; i < row; i += TPB) { thread_data += *(col_ptr + i * col); } using BlockReduce = hipcub::BlockReduce<int32_t, TPB>; __shared__ typename BlockReduce::TempStorage temp_storage; int32_t sum = BlockReduce(temp_storage).Sum(thread_data); if (threadIdx.x == 0) { col_sum[blockIdx.x] = offset * sum; } } Status ReduceColSumOnMatrixB(hipStream_t stream, const int8_t* matrix, int32_t* col_sum, const int8_t offset, const MatMulComputeHelper& helper) { for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) { hipLaunchKernelGGL(( ReduceColSumOnMatrixBKernel<static_cast<int>(GridDim::maxThreadsPerBlock)>), dim3(static_cast<int>(helper.N())), dim3(GridDim::maxThreadsPerBlock), 0, stream, matrix + helper.RightOffsets()[batch], col_sum + batch * helper.N(), offset, static_cast<int32_t>(helper.K()), static_cast<int32_t>(helper.N())); } return CUDA_CALL(hipGetLastError()); } __global__ void ComputeOffsetOfMatrixAB(const int32_t* row_sum, const int32_t* col_sum, int32_t* output, int32_t K_A_B, int32_t N) { for (int32_t i = threadIdx.x; i < N; i += blockDim.x) { *(output + blockIdx.x * N + i) = K_A_B - row_sum[blockIdx.x] - col_sum[i]; } } __global__ void ComputeOffsetOfMatrixA(const int32_t* col_sum, int32_t* output, int32_t N) { for (int32_t i = threadIdx.x; i < N; i += blockDim.x) { *(output + blockIdx.x * N + i) = -col_sum[i]; } } __global__ void ComputeOffsetOfMatrixB(const int32_t* row_sum, int32_t* output, int32_t N) { for (int32_t i = threadIdx.x; i < N; i += blockDim.x) { *(output + blockIdx.x * N + i) = -row_sum[blockIdx.x]; } } Status OffsetOutput(hipStream_t stream, const int32_t* row_sum, const int32_t* col_sum, int32_t* output, const int8_t a_offset, const int8_t b_offset, const MatMulComputeHelper& helper) { if (a_offset && b_offset) { for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) { hipLaunchKernelGGL(( ComputeOffsetOfMatrixAB), dim3(static_cast<int>(helper.M())), dim3(GridDim::maxThreadsPerBlock), 0, stream, row_sum + batch * helper.M(), col_sum + batch * helper.N(), output + helper.OutputOffsets()[batch], static_cast<int32_t>(helper.K()) * a_offset * b_offset, static_cast<int32_t>(helper.N())); } } else if (a_offset) { for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) { hipLaunchKernelGGL(( ComputeOffsetOfMatrixA), dim3(static_cast<int>(helper.M())), dim3(GridDim::maxThreadsPerBlock), 0, stream, col_sum + batch * helper.N(), output + helper.OutputOffsets()[batch], static_cast<int32_t>(helper.N())); } } else if (b_offset) { for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) { hipLaunchKernelGGL(( ComputeOffsetOfMatrixB), dim3(static_cast<int>(helper.M())), dim3(GridDim::maxThreadsPerBlock), 0, stream, row_sum + batch * helper.M(), output + helper.OutputOffsets()[batch], static_cast<int32_t>(helper.N())); } } return CUDA_CALL(hipGetLastError()); } } // namespace cuda } // namespace onnxruntime
ee2666ef3cf1b6a652f54676c5bfdeaf23da2517.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "matmul_integer.cuh" #include <cub/cub.cuh> #include "core/providers/cuda/cu_inc/common.cuh" namespace onnxruntime { namespace cuda { template <int TPB> __global__ void ReduceRowSumOnMatrixAKernel(const int8_t* matrix, int32_t* row_sum, const int8_t offset, int32_t K) { int32_t thread_data = 0; const int8_t* row_ptr = matrix + blockIdx.x * K; for (int i = threadIdx.x; i < K; i += TPB) { thread_data += *(row_ptr + i); } using BlockReduce = cub::BlockReduce<int32_t, TPB>; __shared__ typename BlockReduce::TempStorage temp_storage; int32_t sum = BlockReduce(temp_storage).Sum(thread_data); if (threadIdx.x == 0) { row_sum[blockIdx.x] = offset * sum; } } Status ReduceRowSumOnMatrixA(cudaStream_t stream, const int8_t* matrix, int32_t* row_sum, const int8_t offset, const MatMulComputeHelper& helper) { for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) { ReduceRowSumOnMatrixAKernel<static_cast<int>(GridDim::maxThreadsPerBlock)><<<static_cast<int>(helper.M()), GridDim::maxThreadsPerBlock, 0, stream>>>(matrix + helper.LeftOffsets()[batch], row_sum + batch * helper.M(), offset, static_cast<int>(helper.K())); } return CUDA_CALL(cudaGetLastError()); } template <int TPB> __global__ void ReduceColSumOnMatrixBKernel(const int8_t* matrix, int32_t* col_sum, const int8_t offset, int32_t row, int32_t col) { int32_t thread_data = 0; const int8_t* col_ptr = matrix + blockIdx.x; for (int i = threadIdx.x; i < row; i += TPB) { thread_data += *(col_ptr + i * col); } using BlockReduce = cub::BlockReduce<int32_t, TPB>; __shared__ typename BlockReduce::TempStorage temp_storage; int32_t sum = BlockReduce(temp_storage).Sum(thread_data); if (threadIdx.x == 0) { col_sum[blockIdx.x] = offset * sum; } } Status ReduceColSumOnMatrixB(cudaStream_t stream, const int8_t* matrix, int32_t* col_sum, const int8_t offset, const MatMulComputeHelper& helper) { for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) { ReduceColSumOnMatrixBKernel<static_cast<int>(GridDim::maxThreadsPerBlock)><<<static_cast<int>(helper.N()), GridDim::maxThreadsPerBlock, 0, stream>>>(matrix + helper.RightOffsets()[batch], col_sum + batch * helper.N(), offset, static_cast<int32_t>(helper.K()), static_cast<int32_t>(helper.N())); } return CUDA_CALL(cudaGetLastError()); } __global__ void ComputeOffsetOfMatrixAB(const int32_t* row_sum, const int32_t* col_sum, int32_t* output, int32_t K_A_B, int32_t N) { for (int32_t i = threadIdx.x; i < N; i += blockDim.x) { *(output + blockIdx.x * N + i) = K_A_B - row_sum[blockIdx.x] - col_sum[i]; } } __global__ void ComputeOffsetOfMatrixA(const int32_t* col_sum, int32_t* output, int32_t N) { for (int32_t i = threadIdx.x; i < N; i += blockDim.x) { *(output + blockIdx.x * N + i) = -col_sum[i]; } } __global__ void ComputeOffsetOfMatrixB(const int32_t* row_sum, int32_t* output, int32_t N) { for (int32_t i = threadIdx.x; i < N; i += blockDim.x) { *(output + blockIdx.x * N + i) = -row_sum[blockIdx.x]; } } Status OffsetOutput(cudaStream_t stream, const int32_t* row_sum, const int32_t* col_sum, int32_t* output, const int8_t a_offset, const int8_t b_offset, const MatMulComputeHelper& helper) { if (a_offset && b_offset) { for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) { ComputeOffsetOfMatrixAB<<<static_cast<int>(helper.M()), GridDim::maxThreadsPerBlock, 0, stream>>>( row_sum + batch * helper.M(), col_sum + batch * helper.N(), output + helper.OutputOffsets()[batch], static_cast<int32_t>(helper.K()) * a_offset * b_offset, static_cast<int32_t>(helper.N())); } } else if (a_offset) { for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) { ComputeOffsetOfMatrixA<<<static_cast<int>(helper.M()), GridDim::maxThreadsPerBlock, 0, stream>>>( col_sum + batch * helper.N(), output + helper.OutputOffsets()[batch], static_cast<int32_t>(helper.N())); } } else if (b_offset) { for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) { ComputeOffsetOfMatrixB<<<static_cast<int>(helper.M()), GridDim::maxThreadsPerBlock, 0, stream>>>( row_sum + batch * helper.M(), output + helper.OutputOffsets()[batch], static_cast<int32_t>(helper.N())); } } return CUDA_CALL(cudaGetLastError()); } } // namespace cuda } // namespace onnxruntime
67441b217ef8987762a51c440eab683df44208ce.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "MultiplyGPUMult.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; hipMalloc(&a, XSIZE*YSIZE); float *b = NULL; hipMalloc(&b, XSIZE*YSIZE); float *c = NULL; hipMalloc(&c, XSIZE*YSIZE); int t = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( MultiplyGPUMult), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,t); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( MultiplyGPUMult), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,t); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( MultiplyGPUMult), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,t); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
67441b217ef8987762a51c440eab683df44208ce.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "MultiplyGPUMult.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); float *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); float *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); int t = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); MultiplyGPUMult<<<gridBlock,threadBlock>>>(a,b,c,t); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { MultiplyGPUMult<<<gridBlock,threadBlock>>>(a,b,c,t); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { MultiplyGPUMult<<<gridBlock,threadBlock>>>(a,b,c,t); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2585621c1b9c03115eedd2749136162985524015.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <iomanip> // CPU #include "libCSV/csv.hpp" #include "libalg/CPUMatrix.hpp" #include "libalg/alg.hpp" #include "libalg/print.hpp" #include "libalg/broadcasting.hpp" #include "error.hpp" // GPU #include "error.cuh" #include "libgpualg/ope.cuh" // TODO: export this in static lib, was linking failing or invalid device pointer // MAIN int main(int argc, char **argv) { runtime_assert(argc == 4, "Usage: ./testgpuope file1 meanaxis op"); // reading file, cpu operations std::string h{}; size_t nblines, nbcols; double *h_A = readCSV(argv[1], h, nblines, nbcols); std::cerr << nblines << nbcols << std::endl; auto A = CPUMatrix(h_A, nblines, nbcols); //std::cerr << A << std::endl; int axis = std::stoi(argv[2]); auto cpuMean = A.mean(axis); // transpose if axis is 1 if (axis == 1) cpuMean = cpuMean.transpose(); // left operand double *d_A; size_t d_apitch; unsigned int a_0 = A.getDim0(), a_1 = A.getDim1(); //size_t width = nbcols, height = nblines; hipMallocPitch(&d_A, &d_apitch, a_1 * sizeof(double), a_0 * sizeof(double)); cudaCheckError(); hipMemcpy2D(d_A, d_apitch, A.getArray(), a_1 * sizeof(double), a_1 * sizeof(double), a_0, hipMemcpyHostToDevice); cudaCheckError(); // right operand double *d_B; size_t d_bpitch; unsigned int b_0 = cpuMean.getDim0(), b_1 = cpuMean.getDim1(); hipMallocPitch(&d_B, &d_bpitch, b_1 * sizeof(double), b_0 * sizeof(double)); cudaCheckError(); hipMemcpy2D(d_B, d_bpitch, cpuMean.getArray(), b_1 * sizeof(double), b_1 * sizeof(double), b_0, hipMemcpyHostToDevice); cudaCheckError(); // result double *d_R = d_A; // in place operation size_t d_rpitch = d_apitch; size_t r_0, r_1; runtime_assert(get_broadcastable_size(a_0, a_1, b_0, b_1, &r_0, &r_1), "Invalid size for broadcasting !"); runtime_assert(r_0 == a_0 && r_1 == a_1, "Invalid broadcasting for inplace operation !"); // Launch the kernel dim3 blocksize(32,32); // 1024 threads per block TODO: change to test int nbblocksx = ::ceil((float)r_1 / blocksize.x); int nbblocksy = ::ceil((float)r_0 / blocksize.y); dim3 gridsize(nbblocksx, nbblocksy); runtime_assert(gridsize.x * gridsize.y * blocksize.x * blocksize.y >= r_0 * r_1, "Not enough threads !"); std::cerr << d_apitch << std::endl; std::cerr << d_bpitch << std::endl; std::cerr << b_0 << "," << b_1 << std::endl; enum MatrixOP op = MatrixOP::ADD; if (argv[3][0] == '-') { std::cerr << "SUBTRACT !" << std::endl; A -= cpuMean; op = MatrixOP::SUBTRACT; } else if (argv[3][0] == '+') { std::cerr << "ADD !" << std::endl; A += cpuMean; op = MatrixOP::ADD; } else if (argv[3][0] == 'x') { std::cerr << "MULT !" << std::endl; A *= cpuMean; op = MatrixOP::MULT; } else if (argv[3][0] == '/') { std::cerr << "DIVIDE !" << std::endl; A /= cpuMean; op = MatrixOP::DIVIDE; } else { std::cerr << "Invalid op" << std::endl; return EXIT_FAILURE; } matrix_op<double>(gridsize, blocksize, d_A, d_B, d_R, op, a_0, a_1, d_apitch, b_0, b_1, d_bpitch, r_0, r_1, d_rpitch); std::cerr << "FINISHED !" << std::endl; // host result double *h_r = (double*)malloc(r_0 * d_rpitch); runtime_assert(h_r != nullptr, "Alloc error !"); // copy back to host hipMemcpy(h_r, d_R, r_0 * d_rpitch, hipMemcpyDeviceToHost); cudaCheckError(); // checking result //std::cerr << cpuMean << std::endl; //std::cerr << A << std::endl; double *h_Rcpu = A.getArray(); runtime_assert(r_0 == A.getDim0() && r_1 == A.getDim1(), "Invalid shapes !"); for (size_t i = 0; i < r_0; ++i) { for (size_t j = 0; j < r_1; ++j) { // std::cerr << h_r[i * (d_rpitch / sizeof(double)) + j] << " "; if (h_r[j + i * (d_rpitch / sizeof(double))] != h_Rcpu[j + i * r_1]) { std::cerr << i << "," << j << " : Difference : " << "GPU: " << h_r[j + i * (d_rpitch / sizeof(double))] << std::endl << "CPU: " << h_Rcpu[j + i * r_1] << std::endl; return EXIT_FAILURE; // Free... } } // std::cerr << std::endl; } std::cerr << "SUCCESS !" << std::endl; // free memory hipFree(d_A); cudaCheckError(); hipFree(d_B); cudaCheckError(); free(h_r); }
2585621c1b9c03115eedd2749136162985524015.cu
#include <stdio.h> #include <iostream> #include <iomanip> // CPU #include "libCSV/csv.hpp" #include "libalg/CPUMatrix.hpp" #include "libalg/alg.hpp" #include "libalg/print.hpp" #include "libalg/broadcasting.hpp" #include "error.hpp" // GPU #include "error.cuh" #include "libgpualg/ope.cuh" // TODO: export this in static lib, was linking failing or invalid device pointer // MAIN int main(int argc, char **argv) { runtime_assert(argc == 4, "Usage: ./testgpuope file1 meanaxis op"); // reading file, cpu operations std::string h{}; size_t nblines, nbcols; double *h_A = readCSV(argv[1], h, nblines, nbcols); std::cerr << nblines << nbcols << std::endl; auto A = CPUMatrix(h_A, nblines, nbcols); //std::cerr << A << std::endl; int axis = std::stoi(argv[2]); auto cpuMean = A.mean(axis); // transpose if axis is 1 if (axis == 1) cpuMean = cpuMean.transpose(); // left operand double *d_A; size_t d_apitch; unsigned int a_0 = A.getDim0(), a_1 = A.getDim1(); //size_t width = nbcols, height = nblines; cudaMallocPitch(&d_A, &d_apitch, a_1 * sizeof(double), a_0 * sizeof(double)); cudaCheckError(); cudaMemcpy2D(d_A, d_apitch, A.getArray(), a_1 * sizeof(double), a_1 * sizeof(double), a_0, cudaMemcpyHostToDevice); cudaCheckError(); // right operand double *d_B; size_t d_bpitch; unsigned int b_0 = cpuMean.getDim0(), b_1 = cpuMean.getDim1(); cudaMallocPitch(&d_B, &d_bpitch, b_1 * sizeof(double), b_0 * sizeof(double)); cudaCheckError(); cudaMemcpy2D(d_B, d_bpitch, cpuMean.getArray(), b_1 * sizeof(double), b_1 * sizeof(double), b_0, cudaMemcpyHostToDevice); cudaCheckError(); // result double *d_R = d_A; // in place operation size_t d_rpitch = d_apitch; size_t r_0, r_1; runtime_assert(get_broadcastable_size(a_0, a_1, b_0, b_1, &r_0, &r_1), "Invalid size for broadcasting !"); runtime_assert(r_0 == a_0 && r_1 == a_1, "Invalid broadcasting for inplace operation !"); // Launch the kernel dim3 blocksize(32,32); // 1024 threads per block TODO: change to test int nbblocksx = std::ceil((float)r_1 / blocksize.x); int nbblocksy = std::ceil((float)r_0 / blocksize.y); dim3 gridsize(nbblocksx, nbblocksy); runtime_assert(gridsize.x * gridsize.y * blocksize.x * blocksize.y >= r_0 * r_1, "Not enough threads !"); std::cerr << d_apitch << std::endl; std::cerr << d_bpitch << std::endl; std::cerr << b_0 << "," << b_1 << std::endl; enum MatrixOP op = MatrixOP::ADD; if (argv[3][0] == '-') { std::cerr << "SUBTRACT !" << std::endl; A -= cpuMean; op = MatrixOP::SUBTRACT; } else if (argv[3][0] == '+') { std::cerr << "ADD !" << std::endl; A += cpuMean; op = MatrixOP::ADD; } else if (argv[3][0] == 'x') { std::cerr << "MULT !" << std::endl; A *= cpuMean; op = MatrixOP::MULT; } else if (argv[3][0] == '/') { std::cerr << "DIVIDE !" << std::endl; A /= cpuMean; op = MatrixOP::DIVIDE; } else { std::cerr << "Invalid op" << std::endl; return EXIT_FAILURE; } matrix_op<double>(gridsize, blocksize, d_A, d_B, d_R, op, a_0, a_1, d_apitch, b_0, b_1, d_bpitch, r_0, r_1, d_rpitch); std::cerr << "FINISHED !" << std::endl; // host result double *h_r = (double*)malloc(r_0 * d_rpitch); runtime_assert(h_r != nullptr, "Alloc error !"); // copy back to host cudaMemcpy(h_r, d_R, r_0 * d_rpitch, cudaMemcpyDeviceToHost); cudaCheckError(); // checking result //std::cerr << cpuMean << std::endl; //std::cerr << A << std::endl; double *h_Rcpu = A.getArray(); runtime_assert(r_0 == A.getDim0() && r_1 == A.getDim1(), "Invalid shapes !"); for (size_t i = 0; i < r_0; ++i) { for (size_t j = 0; j < r_1; ++j) { // std::cerr << h_r[i * (d_rpitch / sizeof(double)) + j] << " "; if (h_r[j + i * (d_rpitch / sizeof(double))] != h_Rcpu[j + i * r_1]) { std::cerr << i << "," << j << " : Difference : " << "GPU: " << h_r[j + i * (d_rpitch / sizeof(double))] << std::endl << "CPU: " << h_Rcpu[j + i * r_1] << std::endl; return EXIT_FAILURE; // Free... } } // std::cerr << std::endl; } std::cerr << "SUCCESS !" << std::endl; // free memory cudaFree(d_A); cudaCheckError(); cudaFree(d_B); cudaCheckError(); free(h_r); }
27f0f95c9d5b633e51c280500a02985e46bd817b.hip
// !!! This is a file automatically generated by hipify!!! // Measure cuda memory use // // photonpy - Single molecule localization microscopy library // Jelmer Cnossen 2018-2021 #include "CudaUtils.h" #include "ThreadUtils.h" void EmptyKernel(hipStream_t s) { LaunchKernel(1, [=]__device__(int i) {}, 0, s); } CDLL_EXPORT int CudaGetNumDevices() { int c; hipGetDeviceCount(&c); return c; } CDLL_EXPORT bool CudaSetDevice(int index) { return hipSetDevice(index) == hipSuccess; } CDLL_EXPORT bool CudaGetDeviceInfo(int index, int& numMultiprocessors, char* name, int namelen) { hipDeviceProp_t prop; if (hipGetDeviceProperties(&prop, index) != hipSuccess) return false; numMultiprocessors = prop.multiProcessorCount; strcpy_s(name, namelen, prop.name); return true; } static std::mutex pinnedMemMutex, devicePitchedMemMutex, deviceMemMutex; static uint64_t pinnedMemAmount=0, devicePitchedMemAmount=0, deviceMemAmount = 0, devicePitchedNumAllocs=0; CDLL_EXPORT void CudaGetMemoryUse(uint64_t& pinnedBytes, uint64_t& devicePitchedBytes, uint64_t& deviceBytes, uint64_t& pitchedNumAllocs) { pinnedBytes = pinnedMemAmount; devicePitchedBytes = devicePitchedMemAmount; deviceBytes = deviceMemAmount; pitchedNumAllocs = devicePitchedNumAllocs; } int CudaMemoryCounter::AddPinnedMemory(int amount) { return LockedFunction(pinnedMemMutex, [&]() { pinnedMemAmount += amount; return pinnedMemAmount; }); } int CudaMemoryCounter::AddDevicePitchedMemory(int amount) { return LockedFunction(devicePitchedMemMutex, [&]() { if (amount > 0) devicePitchedNumAllocs++; else devicePitchedNumAllocs--; devicePitchedMemAmount += amount; return devicePitchedMemAmount; }); } int CudaMemoryCounter::AddDeviceMemory(int amount) { return LockedFunction(deviceMemMutex, [&]() { deviceMemAmount += amount; return deviceMemAmount; }); }
27f0f95c9d5b633e51c280500a02985e46bd817b.cu
// Measure cuda memory use // // photonpy - Single molecule localization microscopy library // © Jelmer Cnossen 2018-2021 #include "CudaUtils.h" #include "ThreadUtils.h" void EmptyKernel(cudaStream_t s) { LaunchKernel(1, [=]__device__(int i) {}, 0, s); } CDLL_EXPORT int CudaGetNumDevices() { int c; cudaGetDeviceCount(&c); return c; } CDLL_EXPORT bool CudaSetDevice(int index) { return cudaSetDevice(index) == cudaSuccess; } CDLL_EXPORT bool CudaGetDeviceInfo(int index, int& numMultiprocessors, char* name, int namelen) { cudaDeviceProp prop; if (cudaGetDeviceProperties(&prop, index) != cudaSuccess) return false; numMultiprocessors = prop.multiProcessorCount; strcpy_s(name, namelen, prop.name); return true; } static std::mutex pinnedMemMutex, devicePitchedMemMutex, deviceMemMutex; static uint64_t pinnedMemAmount=0, devicePitchedMemAmount=0, deviceMemAmount = 0, devicePitchedNumAllocs=0; CDLL_EXPORT void CudaGetMemoryUse(uint64_t& pinnedBytes, uint64_t& devicePitchedBytes, uint64_t& deviceBytes, uint64_t& pitchedNumAllocs) { pinnedBytes = pinnedMemAmount; devicePitchedBytes = devicePitchedMemAmount; deviceBytes = deviceMemAmount; pitchedNumAllocs = devicePitchedNumAllocs; } int CudaMemoryCounter::AddPinnedMemory(int amount) { return LockedFunction(pinnedMemMutex, [&]() { pinnedMemAmount += amount; return pinnedMemAmount; }); } int CudaMemoryCounter::AddDevicePitchedMemory(int amount) { return LockedFunction(devicePitchedMemMutex, [&]() { if (amount > 0) devicePitchedNumAllocs++; else devicePitchedNumAllocs--; devicePitchedMemAmount += amount; return devicePitchedMemAmount; }); } int CudaMemoryCounter::AddDeviceMemory(int amount) { return LockedFunction(deviceMemMutex, [&]() { deviceMemAmount += amount; return deviceMemAmount; }); }
fca434969a55c11f2390a82092161a3dfb27bf4c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "lite/backends/cuda/cuda_utils.h" #include "lite/core/op_registry.h" #include "lite/core/target_wrapper.h" #include "lite/kernels/cuda/sequence_pool_compute.h" namespace paddle { namespace lite { namespace kernels { namespace cuda { #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) template <typename Dtype> __global__ void seq_pool_average_kernel(Dtype* dst, const Dtype* src_in, const int batch_size, const uint64_t* seq_offset, const int slice_size) { int total = slice_size * batch_size; CUDA_KERNEL_LOOP(tid, total) { int out_batch_id = tid / slice_size; int out_id = tid % slice_size; int in_slice_num = static_cast<int>(seq_offset[out_batch_id + 1] - seq_offset[out_batch_id]); int in_offset = static_cast<int>(seq_offset[out_batch_id] * slice_size); src_in += in_offset + out_id; Dtype sum = (Dtype)0; for (int i = 0; i < in_slice_num; ++i) { sum += src_in[i * slice_size]; } dst[out_batch_id * slice_size + out_id] = sum / in_slice_num; } } template <typename Dtype> __global__ void seq_pool_sum_kernel(Dtype* dst, const Dtype* src_in, const int batch_size, const uint64_t* seq_offset, const int slice_size) { int total = slice_size * batch_size; CUDA_KERNEL_LOOP(tid, total) { int out_batch_id = tid / slice_size; int out_id = tid % slice_size; int in_slice_num = static_cast<int>(seq_offset[out_batch_id + 1] - seq_offset[out_batch_id]); int in_offset = static_cast<int>(seq_offset[out_batch_id] * slice_size); src_in += in_offset + out_id; Dtype sum = (Dtype)0; for (int i = 0; i < in_slice_num; ++i) { sum += src_in[i * slice_size]; } dst[out_batch_id * slice_size + out_id] = sum; } } template <typename Dtype> __global__ void seq_pool_sqrt_kernel(Dtype* dst, const Dtype* src_in, const int batch_size, const uint64_t* seq_offset, const int slice_size) { int total = slice_size * batch_size; CUDA_KERNEL_LOOP(tid, total) { int out_batch_id = tid / slice_size; int out_id = tid % slice_size; int in_slice_num = static_cast<int>(seq_offset[out_batch_id + 1] - seq_offset[out_batch_id]); int in_offset = static_cast<int>(seq_offset[out_batch_id] * slice_size); src_in += in_offset + out_id; Dtype sum = (Dtype)0; for (int i = 0; i < in_slice_num; ++i) { sum += src_in[i * slice_size]; } dst[out_batch_id * slice_size + out_id] = sum * rsqrtf(in_slice_num); } } template <typename Dtype> __global__ void seq_pool_max_kernel(Dtype* dst, const Dtype* src_in, const int batch_size, const uint64_t* seq_offset, const int slice_size) { int total = slice_size * batch_size; CUDA_KERNEL_LOOP(tid, total) { int out_batch_id = tid / slice_size; int out_id = tid % slice_size; int in_slice_num = static_cast<int>(seq_offset[out_batch_id + 1] - seq_offset[out_batch_id]); int in_offset = static_cast<int>(seq_offset[out_batch_id] * slice_size); src_in += in_offset + out_id; Dtype max = src_in[0]; for (int i = 1; i < in_slice_num; ++i) { Dtype val = src_in[i * slice_size]; if (val > max) { max = val; } } dst[out_batch_id * slice_size + out_id] = max; } } template <typename Dtype> __global__ void seq_pool_last_kernel(Dtype* dst, const Dtype* src_in, const int batch_size, const uint64_t* seq_offset, const int slice_size) { int total = slice_size * batch_size; CUDA_KERNEL_LOOP(tid, total) { int out_batch_id = tid / slice_size; int out_id = tid % slice_size; int in_offset = (static_cast<int>(seq_offset[out_batch_id + 1]) - 1) * slice_size; dst[tid] = src_in[in_offset + out_id]; } } template <typename Dtype> __global__ void seq_pool_first_kernel(Dtype* dst, const Dtype* src_in, const int batch_size, const uint64_t* seq_offset, const int slice_size) { int total = slice_size * batch_size; CUDA_KERNEL_LOOP(tid, total) { int out_batch_id = tid / slice_size; int out_id = tid % slice_size; int in_offset = static_cast<int>(seq_offset[out_batch_id] * slice_size); dst[tid] = src_in[in_offset + out_id]; } } void SequencePoolCompute::Run() { auto& param = this->Param<param_t>(); auto& ctx = this->ctx_->template As<CUDAContext>(); auto stream = ctx.exec_stream(); std::vector<uint64_t> seq_offset = param.X->lod()[0]; int batch_size = param.X->lod()[0].size() - 1; int slice_size = param.Out->dims().production() / batch_size; float* out_data = param.Out->mutable_data<float>(TARGET(kCUDA)); const float* in_data = param.X->data<float>(); seq_offset_D.Resize({static_cast<int64_t>(seq_offset.size())}); TargetWrapperCuda::MemcpyAsync( seq_offset_D.mutable_data<uint64_t>(TARGET(kCUDA)), seq_offset.data(), sizeof(uint64_t) * seq_offset.size(), IoDirection::HtoD, stream); if (param.pool_type == "MAX") { hipLaunchKernelGGL(( seq_pool_max_kernel<float>), dim3(CUDA_GET_BLOCKS(batch_size * slice_size)), dim3(CUDA_NUM_THREADS), 0, stream, out_data, in_data, batch_size, seq_offset_D.data<uint64_t>(), slice_size); } else if (param.pool_type == "AVERAGE") { hipLaunchKernelGGL(( seq_pool_average_kernel<float>), dim3(CUDA_GET_BLOCKS(batch_size * slice_size)), dim3(CUDA_NUM_THREADS), 0, stream, out_data, in_data, batch_size, seq_offset_D.data<uint64_t>(), slice_size); } else if (param.pool_type == "SUM") { hipLaunchKernelGGL(( seq_pool_sum_kernel<float>), dim3(CUDA_GET_BLOCKS(batch_size * slice_size)), dim3(CUDA_NUM_THREADS), 0, stream, out_data, in_data, batch_size, seq_offset_D.data<uint64_t>(), slice_size); } else if (param.pool_type == "SQRT") { hipLaunchKernelGGL(( seq_pool_sqrt_kernel<float>), dim3(CUDA_GET_BLOCKS(batch_size * slice_size)), dim3(CUDA_NUM_THREADS), 0, stream, out_data, in_data, batch_size, seq_offset_D.data<uint64_t>(), slice_size); } else if (param.pool_type == "FIRST") { hipLaunchKernelGGL(( seq_pool_first_kernel<float>), dim3(CUDA_GET_BLOCKS(batch_size * slice_size)), dim3(CUDA_NUM_THREADS), 0, stream, out_data, in_data, batch_size, seq_offset_D.data<uint64_t>(), slice_size); } else if (param.pool_type == "LAST") { hipLaunchKernelGGL(( seq_pool_last_kernel<float>), dim3(CUDA_GET_BLOCKS(batch_size * slice_size)), dim3(CUDA_NUM_THREADS), 0, stream, out_data, in_data, batch_size, seq_offset_D.data<uint64_t>(), slice_size); } else { LOG(ERROR) << "pool type " << param.pool_type << " is not supoorted."; } std::vector<uint64_t> offset_new(static_cast<uint64_t>(batch_size + 1)); for (int i = 0; i <= batch_size; ++i) { offset_new[i] = i; } std::vector<std::vector<uint64_t>> voffset_new; voffset_new.push_back(offset_new); param.Out->set_lod(voffset_new); hipError_t error = hipGetLastError(); if (error != hipSuccess) LOG(INFO) << hipGetErrorString(error); } } // namespace cuda } // namespace kernels } // namespace lite } // namespace paddle REGISTER_LITE_KERNEL(sequence_pool, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::SequencePoolCompute, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindOutput("MaxIndex", {LiteType::GetTensorTy(TARGET(kCUDA))}) .Finalize();
fca434969a55c11f2390a82092161a3dfb27bf4c.cu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "lite/backends/cuda/cuda_utils.h" #include "lite/core/op_registry.h" #include "lite/core/target_wrapper.h" #include "lite/kernels/cuda/sequence_pool_compute.h" namespace paddle { namespace lite { namespace kernels { namespace cuda { #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) template <typename Dtype> __global__ void seq_pool_average_kernel(Dtype* dst, const Dtype* src_in, const int batch_size, const uint64_t* seq_offset, const int slice_size) { int total = slice_size * batch_size; CUDA_KERNEL_LOOP(tid, total) { int out_batch_id = tid / slice_size; int out_id = tid % slice_size; int in_slice_num = static_cast<int>(seq_offset[out_batch_id + 1] - seq_offset[out_batch_id]); int in_offset = static_cast<int>(seq_offset[out_batch_id] * slice_size); src_in += in_offset + out_id; Dtype sum = (Dtype)0; for (int i = 0; i < in_slice_num; ++i) { sum += src_in[i * slice_size]; } dst[out_batch_id * slice_size + out_id] = sum / in_slice_num; } } template <typename Dtype> __global__ void seq_pool_sum_kernel(Dtype* dst, const Dtype* src_in, const int batch_size, const uint64_t* seq_offset, const int slice_size) { int total = slice_size * batch_size; CUDA_KERNEL_LOOP(tid, total) { int out_batch_id = tid / slice_size; int out_id = tid % slice_size; int in_slice_num = static_cast<int>(seq_offset[out_batch_id + 1] - seq_offset[out_batch_id]); int in_offset = static_cast<int>(seq_offset[out_batch_id] * slice_size); src_in += in_offset + out_id; Dtype sum = (Dtype)0; for (int i = 0; i < in_slice_num; ++i) { sum += src_in[i * slice_size]; } dst[out_batch_id * slice_size + out_id] = sum; } } template <typename Dtype> __global__ void seq_pool_sqrt_kernel(Dtype* dst, const Dtype* src_in, const int batch_size, const uint64_t* seq_offset, const int slice_size) { int total = slice_size * batch_size; CUDA_KERNEL_LOOP(tid, total) { int out_batch_id = tid / slice_size; int out_id = tid % slice_size; int in_slice_num = static_cast<int>(seq_offset[out_batch_id + 1] - seq_offset[out_batch_id]); int in_offset = static_cast<int>(seq_offset[out_batch_id] * slice_size); src_in += in_offset + out_id; Dtype sum = (Dtype)0; for (int i = 0; i < in_slice_num; ++i) { sum += src_in[i * slice_size]; } dst[out_batch_id * slice_size + out_id] = sum * rsqrtf(in_slice_num); } } template <typename Dtype> __global__ void seq_pool_max_kernel(Dtype* dst, const Dtype* src_in, const int batch_size, const uint64_t* seq_offset, const int slice_size) { int total = slice_size * batch_size; CUDA_KERNEL_LOOP(tid, total) { int out_batch_id = tid / slice_size; int out_id = tid % slice_size; int in_slice_num = static_cast<int>(seq_offset[out_batch_id + 1] - seq_offset[out_batch_id]); int in_offset = static_cast<int>(seq_offset[out_batch_id] * slice_size); src_in += in_offset + out_id; Dtype max = src_in[0]; for (int i = 1; i < in_slice_num; ++i) { Dtype val = src_in[i * slice_size]; if (val > max) { max = val; } } dst[out_batch_id * slice_size + out_id] = max; } } template <typename Dtype> __global__ void seq_pool_last_kernel(Dtype* dst, const Dtype* src_in, const int batch_size, const uint64_t* seq_offset, const int slice_size) { int total = slice_size * batch_size; CUDA_KERNEL_LOOP(tid, total) { int out_batch_id = tid / slice_size; int out_id = tid % slice_size; int in_offset = (static_cast<int>(seq_offset[out_batch_id + 1]) - 1) * slice_size; dst[tid] = src_in[in_offset + out_id]; } } template <typename Dtype> __global__ void seq_pool_first_kernel(Dtype* dst, const Dtype* src_in, const int batch_size, const uint64_t* seq_offset, const int slice_size) { int total = slice_size * batch_size; CUDA_KERNEL_LOOP(tid, total) { int out_batch_id = tid / slice_size; int out_id = tid % slice_size; int in_offset = static_cast<int>(seq_offset[out_batch_id] * slice_size); dst[tid] = src_in[in_offset + out_id]; } } void SequencePoolCompute::Run() { auto& param = this->Param<param_t>(); auto& ctx = this->ctx_->template As<CUDAContext>(); auto stream = ctx.exec_stream(); std::vector<uint64_t> seq_offset = param.X->lod()[0]; int batch_size = param.X->lod()[0].size() - 1; int slice_size = param.Out->dims().production() / batch_size; float* out_data = param.Out->mutable_data<float>(TARGET(kCUDA)); const float* in_data = param.X->data<float>(); seq_offset_D.Resize({static_cast<int64_t>(seq_offset.size())}); TargetWrapperCuda::MemcpyAsync( seq_offset_D.mutable_data<uint64_t>(TARGET(kCUDA)), seq_offset.data(), sizeof(uint64_t) * seq_offset.size(), IoDirection::HtoD, stream); if (param.pool_type == "MAX") { seq_pool_max_kernel<float><<<CUDA_GET_BLOCKS(batch_size * slice_size), CUDA_NUM_THREADS, 0, stream>>>(out_data, in_data, batch_size, seq_offset_D.data<uint64_t>(), slice_size); } else if (param.pool_type == "AVERAGE") { seq_pool_average_kernel<float><<<CUDA_GET_BLOCKS(batch_size * slice_size), CUDA_NUM_THREADS, 0, stream>>>(out_data, in_data, batch_size, seq_offset_D.data<uint64_t>(), slice_size); } else if (param.pool_type == "SUM") { seq_pool_sum_kernel<float><<<CUDA_GET_BLOCKS(batch_size * slice_size), CUDA_NUM_THREADS, 0, stream>>>(out_data, in_data, batch_size, seq_offset_D.data<uint64_t>(), slice_size); } else if (param.pool_type == "SQRT") { seq_pool_sqrt_kernel<float><<<CUDA_GET_BLOCKS(batch_size * slice_size), CUDA_NUM_THREADS, 0, stream>>>(out_data, in_data, batch_size, seq_offset_D.data<uint64_t>(), slice_size); } else if (param.pool_type == "FIRST") { seq_pool_first_kernel<float><<<CUDA_GET_BLOCKS(batch_size * slice_size), CUDA_NUM_THREADS, 0, stream>>>(out_data, in_data, batch_size, seq_offset_D.data<uint64_t>(), slice_size); } else if (param.pool_type == "LAST") { seq_pool_last_kernel<float><<<CUDA_GET_BLOCKS(batch_size * slice_size), CUDA_NUM_THREADS, 0, stream>>>(out_data, in_data, batch_size, seq_offset_D.data<uint64_t>(), slice_size); } else { LOG(ERROR) << "pool type " << param.pool_type << " is not supoorted."; } std::vector<uint64_t> offset_new(static_cast<uint64_t>(batch_size + 1)); for (int i = 0; i <= batch_size; ++i) { offset_new[i] = i; } std::vector<std::vector<uint64_t>> voffset_new; voffset_new.push_back(offset_new); param.Out->set_lod(voffset_new); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) LOG(INFO) << cudaGetErrorString(error); } } // namespace cuda } // namespace kernels } // namespace lite } // namespace paddle REGISTER_LITE_KERNEL(sequence_pool, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::SequencePoolCompute, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindOutput("MaxIndex", {LiteType::GetTensorTy(TARGET(kCUDA))}) .Finalize();
7f19bcd2b61b74778f75791992d45bbfba5bf584.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cassert> #include "kernels.h" namespace lib2 { __device__ void some_dev_func() {} __global__ void kernel_test() { printf("hello from lib2 kernel\n"); some_dev_func(); } void doEntryPoint() { std::cout << "hello from lib2\n"; auto check_error = [](auto code) { if (code != hipSuccess) { std::cout << hipGetErrorString(code) << std::endl; assert(false); } }; hipLaunchKernelGGL(( kernel_test), dim3(1),dim3(1), 0, 0, ); check_error(hipGetLastError()); hipDeviceSynchronize(); } }
7f19bcd2b61b74778f75791992d45bbfba5bf584.cu
#include <iostream> #include <cassert> #include "kernels.h" namespace lib2 { __device__ void some_dev_func() {} __global__ void kernel_test() { printf("hello from lib2 kernel\n"); some_dev_func(); } void doEntryPoint() { std::cout << "hello from lib2\n"; auto check_error = [](auto code) { if (code != cudaSuccess) { std::cout << cudaGetErrorString(code) << std::endl; assert(false); } }; kernel_test<<<1,1>>>(); check_error(cudaGetLastError()); cudaDeviceSynchronize(); } }
2d71ee8cbf740d57c663312d0baba9c14785c2e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "BoruvkaUMinho_GPU.cuh" texture<unsigned int, 1, hipReadModeElementType> tex_psrc; texture<unsigned int, 1, hipReadModeElementType> tex_outdegree; texture<unsigned int, 1, hipReadModeElementType> tex_edgessrcdst; texture<unsigned int, 1, hipReadModeElementType> tex_edgessrcwt; __global__ void find_min_per_vertex(CSR_Graph g, unsigned int *vertex_minedge){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= g.nnodes) return; unsigned min_edge = 0; unsigned min_weight = UINT_MAX; unsigned min_dst = g.nnodes; unsigned edge = tex1Dfetch(tex_psrc, id); unsigned last_edge = edge + tex1Dfetch(tex_outdegree, id); for(; edge < last_edge; ++edge) { unsigned wt = tex1Dfetch(tex_edgessrcwt, edge); unsigned dst = tex1Dfetch(tex_edgessrcdst, edge); //if(dst != g.nnodes) { if(wt < min_weight || (wt == min_weight && dst < min_dst)) { min_weight = wt; min_edge = edge; min_dst = dst; } } //else if (dst == min_dst && wt >= min_weight) //{ // g.edgessrcdst[edge] = g.nnodes; //} } vertex_minedge[id] = min_edge; } __global__ void initialize_color(unsigned int nnodes, unsigned int *color, unsigned int *vertex_minedge){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= nnodes) return; unsigned edge = vertex_minedge[id]; if(edge == 0) color[id] = id; else color[id] = tex1Dfetch(tex_edgessrcdst, edge); } __global__ void propagate_color(unsigned int nnodes, unsigned int *color, unsigned int *changed){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= nnodes) return; unsigned int my_color = color[id]; unsigned int other_color = color[my_color]; if(my_color != other_color) { color[id] = other_color; *changed = true; } } __global__ void remove_duplicates(CSR_Graph g, unsigned int *vertex_minedge){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= g.nnodes) return; unsigned int edge = vertex_minedge[id]; if(edge == 0) return; unsigned int dst = tex1Dfetch(tex_edgessrcdst, edge); unsigned int other_edge = vertex_minedge[dst]; if(other_edge == 0) return; unsigned int other_dst = tex1Dfetch(tex_edgessrcdst, other_edge); if(id == other_dst && id > dst) // found loop and maintain edge by smaller vertex id { vertex_minedge[dst] = 0; } } __global__ void mark_mst_edges(unsigned int nnodes, unsigned int *selected_edges, unsigned int *vertex_minedge, unsigned int *map_edges){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= nnodes) return; unsigned int edge = vertex_minedge[id]; selected_edges[map_edges[edge]] = 1; } __global__ void create_new_vertex_id(CSR_Graph g, unsigned int *color, unsigned int *new_vertex, unsigned int *next_nnodes){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= g.nnodes) return; if(id == color[id] && tex1Dfetch(tex_outdegree, id) > 0) // representative thread { new_vertex[id] = 1; } else new_vertex[id] = 0; } __global__ void count_new_edges(CSR_Graph g, CSR_Graph next, unsigned int *color, unsigned int *new_vertex){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= g.nnodes) return; unsigned my_color = color[id]; // count how many edges I will be adding to supervertex unsigned new_edges = 0; unsigned edge = tex1Dfetch(tex_psrc, id); unsigned last_edge = edge + tex1Dfetch(tex_outdegree, id); for(; edge < last_edge; ++edge) { unsigned dst = tex1Dfetch(tex_edgessrcdst, edge); if(/*dst != g.nnodes &&*/ my_color != color[dst]) ++new_edges; } unsigned supervertex_id = new_vertex[my_color]; atomicAdd(&(next.outdegree[supervertex_id]), new_edges); } __global__ void setup_psrc(CSR_Graph next, unsigned int *next_nedges){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= next.nnodes) return; ++next.psrc[id]; } __global__ void insert_new_edges(CSR_Graph g, unsigned int *next_edgessrcdst, unsigned int *next_edgessrcwt, unsigned int *color, unsigned int *new_vertex, unsigned int *topedge_per_vertex, unsigned int *old_map_edges, unsigned int *new_map_edges){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= g.nnodes) return; unsigned my_color = color[id]; unsigned supervertex_id = new_vertex[my_color]; unsigned edge = tex1Dfetch(tex_psrc, id); unsigned last_edge = edge + tex1Dfetch(tex_outdegree, id); for(; edge < last_edge; ++edge) { unsigned dst = tex1Dfetch(tex_edgessrcdst, edge); if(dst != g.nnodes) { unsigned other_color = color[dst]; if(my_color != other_color) { unsigned top_edge = atomicInc(&(topedge_per_vertex[supervertex_id]), UINT_MAX); //next_edgessrcdst[top_edge] = other_supervertex; next_edgessrcdst[top_edge] = new_vertex[other_color]; next_edgessrcwt[top_edge] = tex1Dfetch(tex_edgessrcwt, edge); new_map_edges[top_edge] = old_map_edges[edge]; } } } } __global__ void load_weights(CSR_Graph g, unsigned int *selected_edges, unsigned int *vertex_minweight){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= g.nedges + 1) return; if(selected_edges[id] == 1) { vertex_minweight[id] = g.edgessrcwt[id]; } } MGPU_MEM(unsigned int) BoruvkaUMinho_GPU(CSR_Graph *h_graph, unsigned block_size){ mgpu::ContextPtr context = mgpu::CreateCudaDevice(0, NULL, true); detect_devices(); hipDeviceSetCacheConfig(hipFuncCachePreferL1); //CSR_Graph *h_graph = new CSR_Graph(argv[1]); std::vector<CSR_Graph*> d_graph; d_graph.push_back(new CSR_Graph(h_graph->nnodes, h_graph->nedges, DEVICE)); d_graph[0]->d_allocate(); unsigned problem_size = h_graph->nnodes; unsigned edges_size = h_graph->nedges; //unsigned block_size = 1024; unsigned int *next_nnodes, *next_nedges; hipMalloc((void **)&next_nnodes, sizeof(unsigned int)); hipMalloc((void **)&next_nedges, sizeof(unsigned int)); unsigned int *d_changed, h_changed; if(hipMalloc((void **)&d_changed, sizeof(unsigned int)) != hipSuccess) { CudaTest(const_cast<char*>("allocating changed failed")); } MGPU_MEM(unsigned int) vertex_minedge = context->Malloc<unsigned int>(problem_size); MGPU_MEM(unsigned int) vertex_minweight = context->Fill<unsigned int>(edges_size+1, 0); MGPU_MEM(unsigned int) color = context->Malloc<unsigned int>(problem_size); MGPU_MEM(unsigned int) new_vertex = context->Malloc<unsigned int>(problem_size); MGPU_MEM(unsigned int) supervertex_flag = context->Malloc<unsigned int>(problem_size); MGPU_MEM(unsigned int) topedge_per_vertex = context->Malloc<unsigned int>(problem_size); MGPU_MEM(unsigned int) map_edges = context->FillAscending<unsigned int>(edges_size + 1, 0, 1); MGPU_MEM(unsigned int) selected_edges = context->Fill<unsigned int>(edges_size + 1, 0); MGPU_MEM(unsigned int) new_map_edges = context->FillAscending<unsigned int>(edges_size + 1, 0, 1); double starttime, endtime; float time; float timings[19]; for(unsigned j = 0; j < 19; ++j) { timings[j] = 0.0f; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); unsigned int iteration = 0; long unsigned int total_weight = 0; starttime = rtclock(); hipEventRecord(start, 0); h_graph->copyHostToDevice(d_graph[0]); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timings[18] += time; CudaTest(const_cast<char*>("copy CSR_Graph host to device failed")); do{ //toString<<<1,1>>>(*d_graph[iteration]); unsigned n_blocks = compute_n_blocks(problem_size, block_size); printf("Graph has %u nodes and %u edges\n", problem_size, edges_size); //SegSortPairsFromIndices(d_graph[iteration]->edgessrcwt, d_graph[iteration]->edgessrcdst, edges_size+1, d_graph[iteration]->psrc, problem_size+1, *context); hipBindTexture(0, tex_psrc, d_graph[iteration]->psrc, sizeof(unsigned int) * problem_size); CudaTest(const_cast<char*>("bind tex_psrc failed")); hipBindTexture(0, tex_outdegree, d_graph[iteration]->outdegree, sizeof(unsigned int) * problem_size); CudaTest(const_cast<char*>("bind tex_outdegree failed")); hipBindTexture(0, tex_edgessrcdst, d_graph[iteration]->edgessrcdst, sizeof(unsigned int) * (edges_size + 1)); CudaTest(const_cast<char*>("bind tex_edgessrcdst failed")); hipBindTexture(0, tex_edgessrcwt, d_graph[iteration]->edgessrcwt, sizeof(unsigned int) * (edges_size + 1)); CudaTest(const_cast<char*>("bind tex_edgessrcwt failed")); hipEventRecord(start, 0); hipLaunchKernelGGL(( find_min_per_vertex), dim3(n_blocks), dim3(block_size), 0, 0, *d_graph[iteration], vertex_minedge->get()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timings[0] += time; CudaTest(const_cast<char*>("find_min_per_vertex failed")); // depends on find_min_per_vertex hipEventRecord(start, 0); hipLaunchKernelGGL(( remove_duplicates), dim3(n_blocks), dim3(block_size), 0, 0, *d_graph[iteration], vertex_minedge->get()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timings[16] += time; CudaTest(const_cast<char*>("remove_duplicates failed")); hipEventRecord(start, 0); hipLaunchKernelGGL(( initialize_color), dim3(n_blocks), dim3(block_size), 0, 0, d_graph[iteration]->nnodes, color->get(), vertex_minedge->get()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timings[1] += time; CudaTest(const_cast<char*>("initialize_color color failed")); do{ hipEventRecord(start, 0); hipMemset(d_changed, 0, sizeof(unsigned int)); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timings[2] += time; CudaTest(const_cast<char*>("memset d_changed failed")); // depends on initialize color // depends on find_min_per_vertex hipEventRecord(start, 0); hipLaunchKernelGGL(( propagate_color), dim3(n_blocks), dim3(block_size), 0, 0, d_graph[iteration]->nnodes, color->get(), d_changed); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timings[3] += time; CudaTest(const_cast<char*>("propagate_color failed")); hipEventRecord(start, 0); hipMemcpy(&h_changed, d_changed, sizeof(h_changed), hipMemcpyDeviceToHost); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timings[4] += time; CudaTest(const_cast<char*>("copy d_changed failed")); } while(h_changed); /////////////////////// // saving selected edges and weights /////////////////////// hipEventRecord(start, 0); hipLaunchKernelGGL(( mark_mst_edges), dim3(n_blocks), dim3(block_size), 0, 0, d_graph[iteration]->nnodes, selected_edges->get(), vertex_minedge->get(), map_edges->get()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timings[17] += time; CudaTest(const_cast<char*>("mark_mst_edges failed")); /////////////////////// // allocate new device graph /////////////////////// hipEventRecord(start, 0); d_graph.push_back(new CSR_Graph(0, 0, DEVICE)); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timings[7] += time; CudaTest(const_cast<char*>("push_back failed")); /////////////////////// // creating supervertices /////////////////////// //depends on propagate colors hipEventRecord(start, 0); hipMemset(next_nnodes, 0, sizeof(unsigned int)); hipLaunchKernelGGL(( create_new_vertex_id), dim3(n_blocks), dim3(block_size), 0, 0, *d_graph[iteration], color->get(), supervertex_flag->get(), next_nnodes); CudaTest(const_cast<char*>("create_new_vertex_id failed")); mgpu::Scan<mgpu::MgpuScanTypeExc>(supervertex_flag->get(), problem_size, (unsigned int)0, mgpu::plus<unsigned int>(), (unsigned int*)0, &(d_graph[iteration+1]->nnodes), new_vertex->get(), *context); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); //hipMemcpy(&(d_graph[iteration+1]->nnodes), next_nnodes, sizeof(unsigned int), hipMemcpyDeviceToHost); timings[10] += time; CudaTest(const_cast<char*>("mgpu::scan failed")); unsigned new_nnodes; new_nnodes = d_graph[iteration+1]->nnodes; if(unlikely(new_nnodes <= 1)) { hipUnbindTexture(tex_psrc); hipUnbindTexture(tex_outdegree); hipUnbindTexture(tex_edgessrcdst); hipUnbindTexture(tex_edgessrcwt); //if(iteration > 0) d_graph[iteration]->d_deallocate(); d_graph[iteration]->d_deallocate(); break; } d_graph[iteration+1]->d_allocate_nodes(); hipDeviceSynchronize(); /////////////////////// // inserting new contracted edges /////////////////////// // depends on propagate colors // depends on create_new_vertex_id hipEventRecord(start, 0); hipLaunchKernelGGL(( count_new_edges), dim3(n_blocks), dim3(block_size), 0, 0, *d_graph[iteration], *d_graph[iteration+1], color->get(), new_vertex->get()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timings[12] += time; CudaTest(const_cast<char*>("count_new_edges failed")); // depends on count_new_edges hipEventRecord(start, 0); hipMemset(next_nedges, 0, sizeof(unsigned int)); mgpu::Scan<mgpu::MgpuScanTypeExc>(d_graph[iteration+1]->outdegree, new_nnodes, (unsigned int)0, mgpu::plus<unsigned int>(), (unsigned int*)0, &(d_graph[iteration+1]->nedges), d_graph[iteration+1]->psrc, *context); CudaTest(const_cast<char*>("mgpu::Scan failed")); hipLaunchKernelGGL(( setup_psrc), dim3(compute_n_blocks(new_nnodes, block_size)), dim3(block_size), 0, 0, *d_graph[iteration+1], next_nedges); CudaTest(const_cast<char*>("setup_psrc failed")); //hipMemcpy(&(d_graph[iteration+1]->nedges), next_nedges, sizeof(unsigned int), hipMemcpyDeviceToHost); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timings[13] += time; d_graph[iteration+1]->d_allocate_edges(); hipDeviceSynchronize(); hipEventRecord(start, 0); hipMemcpy(topedge_per_vertex->get(), d_graph[iteration+1]->psrc, sizeof(unsigned int) * new_nnodes, hipMemcpyDeviceToDevice); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timings[14] += time; CudaTest(const_cast<char*>("copy topedge_per_vertex failed")); // depends on topedge_per_vertex memcpy // depends on setup_psrc hipEventRecord(start, 0); hipLaunchKernelGGL(( insert_new_edges), dim3(n_blocks), dim3(block_size), 0, 0, *d_graph[iteration], d_graph[iteration+1]->edgessrcdst, d_graph[iteration+1]->edgessrcwt, color->get(), new_vertex->get(), topedge_per_vertex->get(), map_edges->get(), new_map_edges->get()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timings[15] += time; CudaTest(const_cast<char*>("insert_new_edges failed")); edges_size = d_graph[iteration+1]->nedges; hipEventRecord(start, 0); hipMemcpy(map_edges->get(), new_map_edges->get(), sizeof(unsigned int) * (edges_size + 1), hipMemcpyDeviceToDevice); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); timings[6] += time; CudaTest(const_cast<char*>("copy map_edges failed")); problem_size = new_nnodes; hipUnbindTexture(tex_psrc); hipUnbindTexture(tex_outdegree); hipUnbindTexture(tex_edgessrcdst); hipUnbindTexture(tex_edgessrcwt); if(iteration > 0) { //hipDeviceSynchronize(); d_graph[iteration]->d_deallocate(); //hipDeviceSynchronize(); } ++iteration; } while(true); endtime = rtclock(); // the selected MST edges are in the selected_edges array // hipEventRecord(start, 0); // load_weights<<<compute_n_blocks(h_graph->nedges + 1, block_size), block_size>>>(*d_graph[0], selected_edges->get(), vertex_minweight->get()); // mgpu::Reduce(vertex_minweight->get(), h_graph->nedges + 1, (long unsigned int)0, mgpu::plus<long unsigned int>(), (long unsigned int*)0, &total_weight, *context); // hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); // timings[5] += time; // CudaTest(const_cast<char*>("mgpu::Reduce failed")); // unsigned int mst_edges = 0; // mgpu::Reduce(selected_edges->get(), h_graph->nedges + 1, (unsigned int)0, mgpu::plus<unsigned int>(), (unsigned int*)0, &mst_edges, *context); printf("%.1f\t ms on copying source graph to GPU\n", timings[18]); printf("%.1f\t ms on find_min_per_vertex\n", timings[0]); printf("%.1f\t ms on initialize_color\n", timings[1]); printf("%.1f\t ms on memset d_changed\n", timings[2]); printf("%.1f\t ms on propagate_color\n", timings[3]); printf("%.1f\t ms on copy d_changed\n", timings[4]); printf("%.1f\t ms on remove_duplicates\n", timings[16]); printf("%.1f\t ms on mark mst edges\n", timings[17]); printf("%.1f\t ms on push_back\n", timings[7]); printf("%.1f\t ms on create_new_vertex_id\n", timings[10]); printf("%.1f\t ms on count_new_edges\n", timings[12]); printf("%.1f\t ms on setup_psrc\n", timings[13]); printf("%.1f\t ms on copy topedge_per_vertex\n", timings[14]); printf("%.1f\t ms on insert_new_edges\n", timings[15]); printf("%.1f\t ms on copy map_edges\n", timings[6]); printf("%.3lf\t ms total execution time\n", 1000 * (endtime - starttime)); //printf("\t%.1f ms on weight computation\n", timings[5]); //printf("total mst weight %lu (not counting mirrored edges (/2): %lu) and %u edges\n", total_weight*2, total_weight, mst_edges-1); return selected_edges; }
2d71ee8cbf740d57c663312d0baba9c14785c2e2.cu
#include "BoruvkaUMinho_GPU.cuh" texture<unsigned int, 1, cudaReadModeElementType> tex_psrc; texture<unsigned int, 1, cudaReadModeElementType> tex_outdegree; texture<unsigned int, 1, cudaReadModeElementType> tex_edgessrcdst; texture<unsigned int, 1, cudaReadModeElementType> tex_edgessrcwt; __global__ void find_min_per_vertex(CSR_Graph g, unsigned int *vertex_minedge){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= g.nnodes) return; unsigned min_edge = 0; unsigned min_weight = UINT_MAX; unsigned min_dst = g.nnodes; unsigned edge = tex1Dfetch(tex_psrc, id); unsigned last_edge = edge + tex1Dfetch(tex_outdegree, id); for(; edge < last_edge; ++edge) { unsigned wt = tex1Dfetch(tex_edgessrcwt, edge); unsigned dst = tex1Dfetch(tex_edgessrcdst, edge); //if(dst != g.nnodes) { if(wt < min_weight || (wt == min_weight && dst < min_dst)) { min_weight = wt; min_edge = edge; min_dst = dst; } } //else if (dst == min_dst && wt >= min_weight) //{ // g.edgessrcdst[edge] = g.nnodes; //} } vertex_minedge[id] = min_edge; } __global__ void initialize_color(unsigned int nnodes, unsigned int *color, unsigned int *vertex_minedge){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= nnodes) return; unsigned edge = vertex_minedge[id]; if(edge == 0) color[id] = id; else color[id] = tex1Dfetch(tex_edgessrcdst, edge); } __global__ void propagate_color(unsigned int nnodes, unsigned int *color, unsigned int *changed){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= nnodes) return; unsigned int my_color = color[id]; unsigned int other_color = color[my_color]; if(my_color != other_color) { color[id] = other_color; *changed = true; } } __global__ void remove_duplicates(CSR_Graph g, unsigned int *vertex_minedge){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= g.nnodes) return; unsigned int edge = vertex_minedge[id]; if(edge == 0) return; unsigned int dst = tex1Dfetch(tex_edgessrcdst, edge); unsigned int other_edge = vertex_minedge[dst]; if(other_edge == 0) return; unsigned int other_dst = tex1Dfetch(tex_edgessrcdst, other_edge); if(id == other_dst && id > dst) // found loop and maintain edge by smaller vertex id { vertex_minedge[dst] = 0; } } __global__ void mark_mst_edges(unsigned int nnodes, unsigned int *selected_edges, unsigned int *vertex_minedge, unsigned int *map_edges){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= nnodes) return; unsigned int edge = vertex_minedge[id]; selected_edges[map_edges[edge]] = 1; } __global__ void create_new_vertex_id(CSR_Graph g, unsigned int *color, unsigned int *new_vertex, unsigned int *next_nnodes){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= g.nnodes) return; if(id == color[id] && tex1Dfetch(tex_outdegree, id) > 0) // representative thread { new_vertex[id] = 1; } else new_vertex[id] = 0; } __global__ void count_new_edges(CSR_Graph g, CSR_Graph next, unsigned int *color, unsigned int *new_vertex){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= g.nnodes) return; unsigned my_color = color[id]; // count how many edges I will be adding to supervertex unsigned new_edges = 0; unsigned edge = tex1Dfetch(tex_psrc, id); unsigned last_edge = edge + tex1Dfetch(tex_outdegree, id); for(; edge < last_edge; ++edge) { unsigned dst = tex1Dfetch(tex_edgessrcdst, edge); if(/*dst != g.nnodes &&*/ my_color != color[dst]) ++new_edges; } unsigned supervertex_id = new_vertex[my_color]; atomicAdd(&(next.outdegree[supervertex_id]), new_edges); } __global__ void setup_psrc(CSR_Graph next, unsigned int *next_nedges){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= next.nnodes) return; ++next.psrc[id]; } __global__ void insert_new_edges(CSR_Graph g, unsigned int *next_edgessrcdst, unsigned int *next_edgessrcwt, unsigned int *color, unsigned int *new_vertex, unsigned int *topedge_per_vertex, unsigned int *old_map_edges, unsigned int *new_map_edges){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= g.nnodes) return; unsigned my_color = color[id]; unsigned supervertex_id = new_vertex[my_color]; unsigned edge = tex1Dfetch(tex_psrc, id); unsigned last_edge = edge + tex1Dfetch(tex_outdegree, id); for(; edge < last_edge; ++edge) { unsigned dst = tex1Dfetch(tex_edgessrcdst, edge); if(dst != g.nnodes) { unsigned other_color = color[dst]; if(my_color != other_color) { unsigned top_edge = atomicInc(&(topedge_per_vertex[supervertex_id]), UINT_MAX); //next_edgessrcdst[top_edge] = other_supervertex; next_edgessrcdst[top_edge] = new_vertex[other_color]; next_edgessrcwt[top_edge] = tex1Dfetch(tex_edgessrcwt, edge); new_map_edges[top_edge] = old_map_edges[edge]; } } } } __global__ void load_weights(CSR_Graph g, unsigned int *selected_edges, unsigned int *vertex_minweight){ unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if(id >= g.nedges + 1) return; if(selected_edges[id] == 1) { vertex_minweight[id] = g.edgessrcwt[id]; } } MGPU_MEM(unsigned int) BoruvkaUMinho_GPU(CSR_Graph *h_graph, unsigned block_size){ mgpu::ContextPtr context = mgpu::CreateCudaDevice(0, NULL, true); detect_devices(); cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); //CSR_Graph *h_graph = new CSR_Graph(argv[1]); std::vector<CSR_Graph*> d_graph; d_graph.push_back(new CSR_Graph(h_graph->nnodes, h_graph->nedges, DEVICE)); d_graph[0]->d_allocate(); unsigned problem_size = h_graph->nnodes; unsigned edges_size = h_graph->nedges; //unsigned block_size = 1024; unsigned int *next_nnodes, *next_nedges; cudaMalloc((void **)&next_nnodes, sizeof(unsigned int)); cudaMalloc((void **)&next_nedges, sizeof(unsigned int)); unsigned int *d_changed, h_changed; if(cudaMalloc((void **)&d_changed, sizeof(unsigned int)) != cudaSuccess) { CudaTest(const_cast<char*>("allocating changed failed")); } MGPU_MEM(unsigned int) vertex_minedge = context->Malloc<unsigned int>(problem_size); MGPU_MEM(unsigned int) vertex_minweight = context->Fill<unsigned int>(edges_size+1, 0); MGPU_MEM(unsigned int) color = context->Malloc<unsigned int>(problem_size); MGPU_MEM(unsigned int) new_vertex = context->Malloc<unsigned int>(problem_size); MGPU_MEM(unsigned int) supervertex_flag = context->Malloc<unsigned int>(problem_size); MGPU_MEM(unsigned int) topedge_per_vertex = context->Malloc<unsigned int>(problem_size); MGPU_MEM(unsigned int) map_edges = context->FillAscending<unsigned int>(edges_size + 1, 0, 1); MGPU_MEM(unsigned int) selected_edges = context->Fill<unsigned int>(edges_size + 1, 0); MGPU_MEM(unsigned int) new_map_edges = context->FillAscending<unsigned int>(edges_size + 1, 0, 1); double starttime, endtime; float time; float timings[19]; for(unsigned j = 0; j < 19; ++j) { timings[j] = 0.0f; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); unsigned int iteration = 0; long unsigned int total_weight = 0; starttime = rtclock(); cudaEventRecord(start, 0); h_graph->copyHostToDevice(d_graph[0]); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timings[18] += time; CudaTest(const_cast<char*>("copy CSR_Graph host to device failed")); do{ //toString<<<1,1>>>(*d_graph[iteration]); unsigned n_blocks = compute_n_blocks(problem_size, block_size); printf("Graph has %u nodes and %u edges\n", problem_size, edges_size); //SegSortPairsFromIndices(d_graph[iteration]->edgessrcwt, d_graph[iteration]->edgessrcdst, edges_size+1, d_graph[iteration]->psrc, problem_size+1, *context); cudaBindTexture(0, tex_psrc, d_graph[iteration]->psrc, sizeof(unsigned int) * problem_size); CudaTest(const_cast<char*>("bind tex_psrc failed")); cudaBindTexture(0, tex_outdegree, d_graph[iteration]->outdegree, sizeof(unsigned int) * problem_size); CudaTest(const_cast<char*>("bind tex_outdegree failed")); cudaBindTexture(0, tex_edgessrcdst, d_graph[iteration]->edgessrcdst, sizeof(unsigned int) * (edges_size + 1)); CudaTest(const_cast<char*>("bind tex_edgessrcdst failed")); cudaBindTexture(0, tex_edgessrcwt, d_graph[iteration]->edgessrcwt, sizeof(unsigned int) * (edges_size + 1)); CudaTest(const_cast<char*>("bind tex_edgessrcwt failed")); cudaEventRecord(start, 0); find_min_per_vertex<<<n_blocks, block_size>>>(*d_graph[iteration], vertex_minedge->get()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timings[0] += time; CudaTest(const_cast<char*>("find_min_per_vertex failed")); // depends on find_min_per_vertex cudaEventRecord(start, 0); remove_duplicates<<<n_blocks, block_size>>>(*d_graph[iteration], vertex_minedge->get()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timings[16] += time; CudaTest(const_cast<char*>("remove_duplicates failed")); cudaEventRecord(start, 0); initialize_color<<<n_blocks, block_size>>>(d_graph[iteration]->nnodes, color->get(), vertex_minedge->get()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timings[1] += time; CudaTest(const_cast<char*>("initialize_color color failed")); do{ cudaEventRecord(start, 0); cudaMemset(d_changed, 0, sizeof(unsigned int)); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timings[2] += time; CudaTest(const_cast<char*>("memset d_changed failed")); // depends on initialize color // depends on find_min_per_vertex cudaEventRecord(start, 0); propagate_color<<<n_blocks, block_size>>>(d_graph[iteration]->nnodes, color->get(), d_changed); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timings[3] += time; CudaTest(const_cast<char*>("propagate_color failed")); cudaEventRecord(start, 0); cudaMemcpy(&h_changed, d_changed, sizeof(h_changed), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timings[4] += time; CudaTest(const_cast<char*>("copy d_changed failed")); } while(h_changed); /////////////////////// // saving selected edges and weights /////////////////////// cudaEventRecord(start, 0); mark_mst_edges<<<n_blocks, block_size>>>(d_graph[iteration]->nnodes, selected_edges->get(), vertex_minedge->get(), map_edges->get()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timings[17] += time; CudaTest(const_cast<char*>("mark_mst_edges failed")); /////////////////////// // allocate new device graph /////////////////////// cudaEventRecord(start, 0); d_graph.push_back(new CSR_Graph(0, 0, DEVICE)); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timings[7] += time; CudaTest(const_cast<char*>("push_back failed")); /////////////////////// // creating supervertices /////////////////////// //depends on propagate colors cudaEventRecord(start, 0); cudaMemset(next_nnodes, 0, sizeof(unsigned int)); create_new_vertex_id<<<n_blocks, block_size>>>(*d_graph[iteration], color->get(), supervertex_flag->get(), next_nnodes); CudaTest(const_cast<char*>("create_new_vertex_id failed")); mgpu::Scan<mgpu::MgpuScanTypeExc>(supervertex_flag->get(), problem_size, (unsigned int)0, mgpu::plus<unsigned int>(), (unsigned int*)0, &(d_graph[iteration+1]->nnodes), new_vertex->get(), *context); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); //cudaMemcpy(&(d_graph[iteration+1]->nnodes), next_nnodes, sizeof(unsigned int), cudaMemcpyDeviceToHost); timings[10] += time; CudaTest(const_cast<char*>("mgpu::scan failed")); unsigned new_nnodes; new_nnodes = d_graph[iteration+1]->nnodes; if(unlikely(new_nnodes <= 1)) { cudaUnbindTexture(tex_psrc); cudaUnbindTexture(tex_outdegree); cudaUnbindTexture(tex_edgessrcdst); cudaUnbindTexture(tex_edgessrcwt); //if(iteration > 0) d_graph[iteration]->d_deallocate(); d_graph[iteration]->d_deallocate(); break; } d_graph[iteration+1]->d_allocate_nodes(); cudaDeviceSynchronize(); /////////////////////// // inserting new contracted edges /////////////////////// // depends on propagate colors // depends on create_new_vertex_id cudaEventRecord(start, 0); count_new_edges<<<n_blocks, block_size>>>(*d_graph[iteration], *d_graph[iteration+1], color->get(), new_vertex->get()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timings[12] += time; CudaTest(const_cast<char*>("count_new_edges failed")); // depends on count_new_edges cudaEventRecord(start, 0); cudaMemset(next_nedges, 0, sizeof(unsigned int)); mgpu::Scan<mgpu::MgpuScanTypeExc>(d_graph[iteration+1]->outdegree, new_nnodes, (unsigned int)0, mgpu::plus<unsigned int>(), (unsigned int*)0, &(d_graph[iteration+1]->nedges), d_graph[iteration+1]->psrc, *context); CudaTest(const_cast<char*>("mgpu::Scan failed")); setup_psrc<<<compute_n_blocks(new_nnodes, block_size), block_size>>>(*d_graph[iteration+1], next_nedges); CudaTest(const_cast<char*>("setup_psrc failed")); //cudaMemcpy(&(d_graph[iteration+1]->nedges), next_nedges, sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timings[13] += time; d_graph[iteration+1]->d_allocate_edges(); cudaDeviceSynchronize(); cudaEventRecord(start, 0); cudaMemcpy(topedge_per_vertex->get(), d_graph[iteration+1]->psrc, sizeof(unsigned int) * new_nnodes, cudaMemcpyDeviceToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timings[14] += time; CudaTest(const_cast<char*>("copy topedge_per_vertex failed")); // depends on topedge_per_vertex memcpy // depends on setup_psrc cudaEventRecord(start, 0); insert_new_edges<<<n_blocks, block_size>>>(*d_graph[iteration], d_graph[iteration+1]->edgessrcdst, d_graph[iteration+1]->edgessrcwt, color->get(), new_vertex->get(), topedge_per_vertex->get(), map_edges->get(), new_map_edges->get()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timings[15] += time; CudaTest(const_cast<char*>("insert_new_edges failed")); edges_size = d_graph[iteration+1]->nedges; cudaEventRecord(start, 0); cudaMemcpy(map_edges->get(), new_map_edges->get(), sizeof(unsigned int) * (edges_size + 1), cudaMemcpyDeviceToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timings[6] += time; CudaTest(const_cast<char*>("copy map_edges failed")); problem_size = new_nnodes; cudaUnbindTexture(tex_psrc); cudaUnbindTexture(tex_outdegree); cudaUnbindTexture(tex_edgessrcdst); cudaUnbindTexture(tex_edgessrcwt); if(iteration > 0) { //cudaDeviceSynchronize(); d_graph[iteration]->d_deallocate(); //cudaDeviceSynchronize(); } ++iteration; } while(true); endtime = rtclock(); // the selected MST edges are in the selected_edges array // cudaEventRecord(start, 0); // load_weights<<<compute_n_blocks(h_graph->nedges + 1, block_size), block_size>>>(*d_graph[0], selected_edges->get(), vertex_minweight->get()); // mgpu::Reduce(vertex_minweight->get(), h_graph->nedges + 1, (long unsigned int)0, mgpu::plus<long unsigned int>(), (long unsigned int*)0, &total_weight, *context); // cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); // timings[5] += time; // CudaTest(const_cast<char*>("mgpu::Reduce failed")); // unsigned int mst_edges = 0; // mgpu::Reduce(selected_edges->get(), h_graph->nedges + 1, (unsigned int)0, mgpu::plus<unsigned int>(), (unsigned int*)0, &mst_edges, *context); printf("%.1f\t ms on copying source graph to GPU\n", timings[18]); printf("%.1f\t ms on find_min_per_vertex\n", timings[0]); printf("%.1f\t ms on initialize_color\n", timings[1]); printf("%.1f\t ms on memset d_changed\n", timings[2]); printf("%.1f\t ms on propagate_color\n", timings[3]); printf("%.1f\t ms on copy d_changed\n", timings[4]); printf("%.1f\t ms on remove_duplicates\n", timings[16]); printf("%.1f\t ms on mark mst edges\n", timings[17]); printf("%.1f\t ms on push_back\n", timings[7]); printf("%.1f\t ms on create_new_vertex_id\n", timings[10]); printf("%.1f\t ms on count_new_edges\n", timings[12]); printf("%.1f\t ms on setup_psrc\n", timings[13]); printf("%.1f\t ms on copy topedge_per_vertex\n", timings[14]); printf("%.1f\t ms on insert_new_edges\n", timings[15]); printf("%.1f\t ms on copy map_edges\n", timings[6]); printf("%.3lf\t ms total execution time\n", 1000 * (endtime - starttime)); //printf("\t%.1f ms on weight computation\n", timings[5]); //printf("total mst weight %lu (not counting mirrored edges (/2): %lu) and %u edges\n", total_weight*2, total_weight, mst_edges-1); return selected_edges; }
a6c1b3713c7896f627ad32e9fbb47476029d2885.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "efficient.h" #define blockSize 256 #define blockSizeHalf 128 namespace StreamCompaction { namespace Efficient { //code from http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html __global__ void prescan(int n, int *odata, const int *idata) { extern __shared__ int temp[]; int tid = threadIdx.x; int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = 1; temp[2 * tid] = idata[2 * index ]; temp[2 * tid + 1] = idata[2 * index + 1 ]; for (int d = n >> 1; d > 0; d >>= 1) { __syncthreads(); if (tid < d) { int ai = offset * (2 * tid + 1) - 1; int bi = offset * (2 * tid + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } if (tid == 0) { temp[n - 1] = 0; } for (int d = 1; d < n; d *= 2) { offset >>= 1; __syncthreads(); if (tid < d) { int ai = offset * (2 * tid + 1) - 1; int bi = offset * (2 * tid + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); odata[2 * index ] = temp[2 * tid]; odata[2 * index + 1 ] = temp[2 * tid + 1]; } __global__ void sumEachBlock(int n, int *datasum, int *idata, int *odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if( index < n ) { datasum[index] = idata[(index + 1) * blockSize - 1] + odata[(index + 1) * blockSize - 1]; } } __global__ void addIncrements(int n, int *data, int *increments) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if( index < n ) { data[index] = data[index] + increments[blockIdx.x]; } } // scan on multiple blocks, algorithm from CIS565 lecture slides void scan(int n, int *odata, int *idata) { int blocksPerGrid = (n + blockSize - 1) / blockSize; int n_new = blocksPerGrid * blockSize; int *dev_idata; int *dev_odata = odata; hipMalloc((void**)&dev_idata, n_new * sizeof(int)); hipMemset(dev_idata, 0, n_new * sizeof(int)); hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyDeviceToDevice); //prescan<<<blocksPerGrid, blockSize>>>(blockSize, dev_odata, dev_idata); hipLaunchKernelGGL(( prescan), dim3(blocksPerGrid), dim3(blockSizeHalf), blockSize * sizeof(int), 0, blockSize, dev_odata, dev_idata); if( blocksPerGrid > 1) { int *dev_sum, *dev_sum_scan; hipMalloc((void**)&dev_sum, blocksPerGrid * sizeof(int)); hipMalloc((void**)&dev_sum_scan, blocksPerGrid * sizeof(int)); int blocksPerGrid_new = (blocksPerGrid + blockSize - 1) / blockSize; hipLaunchKernelGGL(( sumEachBlock), dim3(blocksPerGrid_new), dim3(blockSize), 0, 0, blocksPerGrid, dev_sum, dev_odata, dev_idata); scan(blocksPerGrid, dev_sum_scan, dev_sum); hipLaunchKernelGGL(( addIncrements), dim3(blocksPerGrid), dim3(blockSize), 0, 0, n_new, dev_odata, dev_sum_scan); hipFree(dev_sum); hipFree(dev_sum_scan); } hipFree(dev_idata); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, Ray *odata, Ray *idata) { Ray *dev_idata = idata; Ray *dev_odata = odata; int *dev_bools; int *dev_indices; int hst_bools[n]; int hst_indices[n]; dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); //hipMalloc((void**)&dev_idata, n * sizeof(Ray)); //hipMalloc((void**)&dev_odata, n * sizeof(Ray)); hipMalloc((void**)&dev_bools, n * sizeof(int)); hipMalloc((void**)&dev_indices, n * sizeof(int)); hipMemset(dev_indices, 0, n * sizeof(int)); //hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( Common::kernMapToBoolean), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, n, dev_bools, dev_idata); hipMemcpy(hst_bools, dev_bools, n * sizeof(int), hipMemcpyDeviceToHost); scan(n, dev_indices, dev_bools); //scan(n, hst_indices, hst_bools); //printf("n is %d \n", n); hipMemcpy(hst_indices, dev_indices, n * sizeof(int), hipMemcpyDeviceToHost); hipLaunchKernelGGL(( Common::kernScatter), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, n, dev_odata, dev_idata, dev_bools, dev_indices); //hipFree(dev_idata); //hipFree(dev_odata); hipFree(dev_bools); hipFree(dev_indices); if(hst_bools[n-1] == 0) { return hst_indices[n-1]; } else { return hst_indices[n-1] + 1; } //return n; } } }
a6c1b3713c7896f627ad32e9fbb47476029d2885.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "efficient.h" #define blockSize 256 #define blockSizeHalf 128 namespace StreamCompaction { namespace Efficient { //code from http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html __global__ void prescan(int n, int *odata, const int *idata) { extern __shared__ int temp[]; int tid = threadIdx.x; int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = 1; temp[2 * tid] = idata[2 * index ]; temp[2 * tid + 1] = idata[2 * index + 1 ]; for (int d = n >> 1; d > 0; d >>= 1) { __syncthreads(); if (tid < d) { int ai = offset * (2 * tid + 1) - 1; int bi = offset * (2 * tid + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } if (tid == 0) { temp[n - 1] = 0; } for (int d = 1; d < n; d *= 2) { offset >>= 1; __syncthreads(); if (tid < d) { int ai = offset * (2 * tid + 1) - 1; int bi = offset * (2 * tid + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); odata[2 * index ] = temp[2 * tid]; odata[2 * index + 1 ] = temp[2 * tid + 1]; } __global__ void sumEachBlock(int n, int *datasum, int *idata, int *odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if( index < n ) { datasum[index] = idata[(index + 1) * blockSize - 1] + odata[(index + 1) * blockSize - 1]; } } __global__ void addIncrements(int n, int *data, int *increments) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if( index < n ) { data[index] = data[index] + increments[blockIdx.x]; } } // scan on multiple blocks, algorithm from CIS565 lecture slides void scan(int n, int *odata, int *idata) { int blocksPerGrid = (n + blockSize - 1) / blockSize; int n_new = blocksPerGrid * blockSize; int *dev_idata; int *dev_odata = odata; cudaMalloc((void**)&dev_idata, n_new * sizeof(int)); cudaMemset(dev_idata, 0, n_new * sizeof(int)); cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyDeviceToDevice); //prescan<<<blocksPerGrid, blockSize>>>(blockSize, dev_odata, dev_idata); prescan<<<blocksPerGrid, blockSizeHalf, blockSize * sizeof(int)>>>(blockSize, dev_odata, dev_idata); if( blocksPerGrid > 1) { int *dev_sum, *dev_sum_scan; cudaMalloc((void**)&dev_sum, blocksPerGrid * sizeof(int)); cudaMalloc((void**)&dev_sum_scan, blocksPerGrid * sizeof(int)); int blocksPerGrid_new = (blocksPerGrid + blockSize - 1) / blockSize; sumEachBlock<<<blocksPerGrid_new, blockSize>>>(blocksPerGrid, dev_sum, dev_odata, dev_idata); scan(blocksPerGrid, dev_sum_scan, dev_sum); addIncrements<<<blocksPerGrid, blockSize>>>(n_new, dev_odata, dev_sum_scan); cudaFree(dev_sum); cudaFree(dev_sum_scan); } cudaFree(dev_idata); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, Ray *odata, Ray *idata) { Ray *dev_idata = idata; Ray *dev_odata = odata; int *dev_bools; int *dev_indices; int hst_bools[n]; int hst_indices[n]; dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); //cudaMalloc((void**)&dev_idata, n * sizeof(Ray)); //cudaMalloc((void**)&dev_odata, n * sizeof(Ray)); cudaMalloc((void**)&dev_bools, n * sizeof(int)); cudaMalloc((void**)&dev_indices, n * sizeof(int)); cudaMemset(dev_indices, 0, n * sizeof(int)); //cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice); Common::kernMapToBoolean<<<fullBlocksPerGrid, blockSize>>>(n, dev_bools, dev_idata); cudaMemcpy(hst_bools, dev_bools, n * sizeof(int), cudaMemcpyDeviceToHost); scan(n, dev_indices, dev_bools); //scan(n, hst_indices, hst_bools); //printf("n is %d \n", n); cudaMemcpy(hst_indices, dev_indices, n * sizeof(int), cudaMemcpyDeviceToHost); Common::kernScatter<<<fullBlocksPerGrid, blockSize>>>(n, dev_odata, dev_idata, dev_bools, dev_indices); //cudaFree(dev_idata); //cudaFree(dev_odata); cudaFree(dev_bools); cudaFree(dev_indices); if(hst_bools[n-1] == 0) { return hst_indices[n-1]; } else { return hst_indices[n-1] + 1; } //return n; } } }
3a153e1879c7bb8880522db5df4b9533cc5b7ff4.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <math.h> #include <stdlib.h> #include <string.h> #include "parboil.h" #include "UDTypes.h" #include "scanLargeArray.h" #include "GPU_kernels.cu_include" #include "CPU_kernels.h" #define USE_CUDPP 0 #if USE_CUDPP #include "cudpp.h" #else #include "sort.h" #include "scanLargeArray.h" #endif #define BLOCKSIZE 512 #define PI 3.14159265359 #define CUERR \ do { \ hipError_t err; \ if ((err = hipGetLastError()) != hipSuccess) { \ printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__); \ return; \ } \ } while (0) // Compare function used for Qsort for CPU computation int compare (const void * a, const void * b) { return ( *(int*)a - *(int*)b ); } /*********************************************************************** * CUDA_interface is the main function for GPU execution. This * implementation uses compact binning to distribute input elements * into unit-cubed sized bins. The bins are then visited by GPU * threads, where every thread computes the value of one (or small set) * of output elements by computing the contributions of elements in * neighboring bins to these output elements. * * The bins have a limited bin size and everything beyond that bin size * is offloaded to the CPU to be computed in parallel with the GPU * gridding. ***********************************************************************/ void CUDA_interface ( struct pb_TimerSet* timers, unsigned int n, // Number of input elements parameters params, // Parameter struct which defines output gridSize, cutoff distance, etc. ReconstructionSample* sample, // Array of input elements float* LUT, // Precomputed LUT table of Kaiser-Bessel function. // Used for computation on CPU instead of using the function every time int sizeLUT, // Size of LUT cmplx* gridData, // Array of output grid points. Each element has a real and imaginary component float* sampleDensity // Array of same size as gridData couting the number of contributions // to each grid point in the gridData array ){ /* Initializing all variables */ dim3 dims (8,4,2); //size of a gridding block on the GPU /* x, y, z dimensions of the output grid (gridData) */ int size_x = params.gridSize[0]; int size_y = params.gridSize[1]; int size_z = params.gridSize[2]; int size_xy = size_y*size_x; int gridNumElems = size_x * size_y * size_z; // Total number of grid points float beta = PI * sqrt(4*params.kernelWidth*params.kernelWidth/(params.oversample*params.oversample) * (params.oversample-.5)*(params.oversample-.5)-.8); float cutoff = float(params.kernelWidth)/2.0; // cutoff radius float cutoff2 = cutoff*cutoff; // square of cutoff radius float _1overCutoff2 = 1/cutoff2; // 1 over square of cutoff radius // Padding used to align the structure of arrays used for the sorted input elements int npad = 0; if (n % 64 != 0){ npad = 64 - (n%64); } /* Declarations of host data structures */ cmplx* gridData_CPU; float* sampleDensity_CPU; int* indices_CPU; /* Declarations of device data structures */ ReconstructionSample* sample_d = NULL; // Device array for original input array float* sortedSample_d = NULL; // Device array of the sorted (into bins) input elements. // This array is accessed by sortedSampleSoA_d in a structure // of arrays manner. float2* gridData_d = NULL; // Device array for output grid float* sampleDensity_d = NULL; // Device array for output sample density unsigned int* idxKey_d = NULL; // Array of bin indeces generated in the binning kernel // and used to sort the input elements into their // corresponding bins unsigned int* idxValue_d = NULL; // This array holds the indices of input elements in the // the original array. This array is sorted using the // the idxKey_d array, and once sorted, it is used in // the reorder kernel to move the actual elements into // their corresponding bins. sampleArrayStruct sortedSampleSoA_d; // Structure of Arrays which holds the sorted input elements. // Uses sortedSample_d as the underlying physical data // structures unsigned int* binCount_d = NULL; // Zero-initialized array which counts the number of elements // put in each bin. Based on this array, we determine which // elements get offloaded to the CPU unsigned int* binStartAddr_d = NULL; // Array of start offset of each of the compact bins /* Allocating device memory */ //pb_SwitchToTimer(timers, pb_TimerID_COPY); hipMalloc((void**)&sortedSample_d, (n+npad)*sizeof(ReconstructionSample)); hipMalloc((void**)&binStartAddr_d, (gridNumElems+1)*sizeof(unsigned int)); hipMalloc((void**)&sample_d, n*sizeof(ReconstructionSample)); hipMalloc((void**)&idxKey_d, (((n+3)/4)*4)*sizeof(unsigned int)); //Pad to nearest multiple of 4 to hipMalloc((void**)&idxValue_d, (((n+3)/4)*4)*sizeof(unsigned int)); //satisfy a property of the sorting kernel. /*The CUDPP library features highly optimizes implementations for radix sort and prefix sum. However for portability reasons, we implemented our own, slightly less optimized versions of these operations. When performing prefix sum using CUDPP, the output array has to be different from the input array, which is why we would allocate an array for binCount_d. For our implementation, we allow the input and output arrays to be the same, therefore we reuse the binCount_d array to get the starting offset of each bin. */ #if USE_CUDPP hipMalloc((void**)&binCount_d, (gridNumElems+1)*sizeof(unsigned int)); #else binCount_d = binStartAddr_d; #endif CUERR; /* Transfering data from Host to Device */ hipMemcpyToSymbol(cutoff2_c, &cutoff2, sizeof(float), 0); hipMemcpyToSymbol(cutoff_c, &cutoff, sizeof(float), 0); hipMemcpyToSymbol(gridSize_c, params.gridSize, 3*sizeof(int), 0); hipMemcpyToSymbol(size_xy_c, &size_xy, sizeof(int), 0); hipMemcpyToSymbol(_1overCutoff2_c, &_1overCutoff2, sizeof(float), 0); hipMemcpy(sample_d, sample, n*sizeof(ReconstructionSample), hipMemcpyHostToDevice); hipMemset(binCount_d, 0, (gridNumElems+1)*sizeof(unsigned int)); // Initialize padding to max integer value, so that when sorted, // these elements get pushed to the end of the array. hipMemset(idxKey_d+n, 0xFF, (((n+3)&~(3))-n)*sizeof(unsigned int)); sortedSampleSoA_d.data = (float2*)(sortedSample_d); sortedSampleSoA_d.loc = (float4*)(sortedSample_d+2*(n+npad)); //pb_SwitchToTimer(timers, pb_TimerID_KERNEL); /* STEP 1: Perform binning. This kernel determines which output bin each input element * goes into. Any excess (beyond binsize) is put in the CPU bin */ dim3 block1 (BLOCKSIZE); dim3 grid1 ((n+BLOCKSIZE-1)/BLOCKSIZE); #ifdef _BINNING #ifdef _SYM klee_make_symbolic(sample_d, n*sizeof(ReconstructionSample), "sample_d_input"); #endif hipLaunchKernelGGL(( binning_kernel), dim3(grid1), dim3(block1), 0, 0, n, sample_d, idxKey_d, idxValue_d, binCount_d, params.binsize, gridNumElems); #endif /* STEP 2: Sort the index-value pair generate in the binning kernel */ #if USE_CUDPP CUDPPConfiguration config; config.datatype = CUDPP_UINT; config.algorithm = CUDPP_SORT_RADIX; config.options = CUDPP_OPTION_KEY_VALUE_PAIRS; CUDPPHandle sortplan = 0; CUDPPResult result = cudppPlan(&sortplan, config, n, 1, 0); int precision = 0; int numElems = gridNumElems; while (numElems > 0){ numElems >>= 1; precision++; } cudppSort(sortplan, idxKey_d, idxValue_d, int(precision), n); result = cudppDestroyPlan(sortplan); #else //sort(n, gridNumElems+1, idxKey_d, idxValue_d); #endif /* STEP 3: Reorder the input data, based on the sorted values from Step 2. * this step also involves changing the data from array of structs to a struct * of arrays. Also in this kernel, we populate an array with the starting index * of every output bin features in the input array, based on the sorted indices * from Step 2. * At the end of this step, we copy the start address and list of input elements * that will be computed on the CPU. */ #ifdef _REORDER hipLaunchKernelGGL(( reorder_kernel), dim3(grid1),dim3(block1), 0, 0, n, idxValue_d, sample_d, sortedSampleSoA_d); #endif //pb_SwitchToTimer(timers, pb_TimerID_COPY); hipFree(idxKey_d); hipFree(sample_d); //pb_SwitchToTimer(timers, pb_TimerID_KERNEL); /* STEP 4: In this step we generate the ADD scan of the array of starting indices * of the output bins. The result is an array that contains the starting address of * every output bin. */ #if USE_CUDPP config.datatype = CUDPP_UINT; config.algorithm = CUDPP_SCAN; config.options = CUDPP_OPTION_EXCLUSIVE; config.op=CUDPP_ADD; CUDPPHandle scanplan = 0; result = cudppPlan(&scanplan, config, gridNumElems+1, 1, 0); cudppScan(scanplan, binCount_d, binStartAddr_d, gridNumElems+1); result = cudppDestroyPlan(scanplan); #else //scanLargeArray(gridNumElems+1, binCount_d); #endif //pb_SwitchToTimer(timers, pb_TimerID_COPY); // Copy back to the CPU the indices of the input elements that will be processed on the CPU int cpuStart; hipMemcpy(&cpuStart, binCount_d+gridNumElems, sizeof(unsigned int), hipMemcpyDeviceToHost); int CPUbin_size = int(n)-int(cpuStart); int* CPUbin; hipHostMalloc((void**)&CPUbin,CPUbin_size*sizeof(unsigned int)); hipMemcpy(CPUbin, idxValue_d+cpuStart, CPUbin_size*sizeof(unsigned int), hipMemcpyDeviceToHost); hipFree(idxValue_d); #if USE_CUDPP hipFree(binCount_d); #endif /* STEP 5: Perform the binning on the GPU. The results are computed in a gather fashion * where each thread computes the value of one output element by reading the relevant * bins. */ hipMalloc((void**)&gridData_d, gridNumElems*sizeof(float2)); hipMalloc((void**)&sampleDensity_d, gridNumElems*sizeof(float)); CUERR; hipMemset(gridData_d, 0, gridNumElems*sizeof(float2)); hipMemset(sampleDensity_d, 0, gridNumElems*sizeof(float)); //pb_SwitchToTimer(timers, pb_TimerID_KERNEL); #ifdef _GRIDDING dim3 block2 (dims.x,dims.y,dims.z); dim3 grid2 (size_x/dims.x, (size_y*size_z)/(4*dims.y*dims.z)); hipLaunchKernelGGL(( gridding_GPU), dim3(grid2), dim3(block2), 0, 0, sortedSampleSoA_d, binStartAddr_d, gridData_d, sampleDensity_d, beta); #endif //pb_SwitchToTimer(timers, pb_TimerID_COMPUTE); #ifndef _SYM qsort(CPUbin, CPUbin_size, sizeof(int), compare); //Sorting helps cache locality of input element array int num = gridding_CPU(n, params, sample, CPUbin, CPUbin_size, LUT, sizeLUT, &gridData_CPU, &sampleDensity_CPU, &indices_CPU); //pb_SwitchToTimer(timers, pb_TimerID_COPY); /* Copying the results from the Device to the Host */ hipMemcpy(sampleDensity, sampleDensity_d, gridNumElems*sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(gridData, gridData_d, gridNumElems*sizeof(float2),hipMemcpyDeviceToHost); //pb_SwitchToTimer(timers, pb_TimerID_COMPUTE); /* STEP 6: Computing the contributions of the sample points handled by the Host * and adding those to the GPU results. */ for (int i=0; i< num; i++){ gridData[indices_CPU[i]].real += gridData_CPU[i].real; gridData[indices_CPU[i]].imag += gridData_CPU[i].imag; sampleDensity[indices_CPU[i]] += sampleDensity_CPU[i]; } if (gridData_CPU != NULL){ free(indices_CPU); free(gridData_CPU); free(sampleDensity_CPU); } #endif //pb_SwitchToTimer(timers, pb_TimerID_COPY); hipHostFree(CPUbin); hipFree(gridData_d); hipFree(sampleDensity_d); hipFree(binCount_d); hipFree(sortedSample_d); //pb_SwitchToTimer(timers, pb_TimerID_NONE); return; }
3a153e1879c7bb8880522db5df4b9533cc5b7ff4.cu
/*************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ #include <stdio.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <math.h> #include <stdlib.h> #include <string.h> #include "parboil.h" #include "UDTypes.h" #include "scanLargeArray.h" #include "GPU_kernels.cu_include" #include "CPU_kernels.h" #define USE_CUDPP 0 #if USE_CUDPP #include "cudpp.h" #else #include "sort.h" #include "scanLargeArray.h" #endif #define BLOCKSIZE 512 #define PI 3.14159265359 #define CUERR \ do { \ cudaError_t err; \ if ((err = cudaGetLastError()) != cudaSuccess) { \ printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__); \ return; \ } \ } while (0) // Compare function used for Qsort for CPU computation int compare (const void * a, const void * b) { return ( *(int*)a - *(int*)b ); } /*********************************************************************** * CUDA_interface is the main function for GPU execution. This * implementation uses compact binning to distribute input elements * into unit-cubed sized bins. The bins are then visited by GPU * threads, where every thread computes the value of one (or small set) * of output elements by computing the contributions of elements in * neighboring bins to these output elements. * * The bins have a limited bin size and everything beyond that bin size * is offloaded to the CPU to be computed in parallel with the GPU * gridding. ***********************************************************************/ void CUDA_interface ( struct pb_TimerSet* timers, unsigned int n, // Number of input elements parameters params, // Parameter struct which defines output gridSize, cutoff distance, etc. ReconstructionSample* sample, // Array of input elements float* LUT, // Precomputed LUT table of Kaiser-Bessel function. // Used for computation on CPU instead of using the function every time int sizeLUT, // Size of LUT cmplx* gridData, // Array of output grid points. Each element has a real and imaginary component float* sampleDensity // Array of same size as gridData couting the number of contributions // to each grid point in the gridData array ){ /* Initializing all variables */ dim3 dims (8,4,2); //size of a gridding block on the GPU /* x, y, z dimensions of the output grid (gridData) */ int size_x = params.gridSize[0]; int size_y = params.gridSize[1]; int size_z = params.gridSize[2]; int size_xy = size_y*size_x; int gridNumElems = size_x * size_y * size_z; // Total number of grid points float beta = PI * sqrt(4*params.kernelWidth*params.kernelWidth/(params.oversample*params.oversample) * (params.oversample-.5)*(params.oversample-.5)-.8); float cutoff = float(params.kernelWidth)/2.0; // cutoff radius float cutoff2 = cutoff*cutoff; // square of cutoff radius float _1overCutoff2 = 1/cutoff2; // 1 over square of cutoff radius // Padding used to align the structure of arrays used for the sorted input elements int npad = 0; if (n % 64 != 0){ npad = 64 - (n%64); } /* Declarations of host data structures */ cmplx* gridData_CPU; float* sampleDensity_CPU; int* indices_CPU; /* Declarations of device data structures */ ReconstructionSample* sample_d = NULL; // Device array for original input array float* sortedSample_d = NULL; // Device array of the sorted (into bins) input elements. // This array is accessed by sortedSampleSoA_d in a structure // of arrays manner. float2* gridData_d = NULL; // Device array for output grid float* sampleDensity_d = NULL; // Device array for output sample density unsigned int* idxKey_d = NULL; // Array of bin indeces generated in the binning kernel // and used to sort the input elements into their // corresponding bins unsigned int* idxValue_d = NULL; // This array holds the indices of input elements in the // the original array. This array is sorted using the // the idxKey_d array, and once sorted, it is used in // the reorder kernel to move the actual elements into // their corresponding bins. sampleArrayStruct sortedSampleSoA_d; // Structure of Arrays which holds the sorted input elements. // Uses sortedSample_d as the underlying physical data // structures unsigned int* binCount_d = NULL; // Zero-initialized array which counts the number of elements // put in each bin. Based on this array, we determine which // elements get offloaded to the CPU unsigned int* binStartAddr_d = NULL; // Array of start offset of each of the compact bins /* Allocating device memory */ //pb_SwitchToTimer(timers, pb_TimerID_COPY); cudaMalloc((void**)&sortedSample_d, (n+npad)*sizeof(ReconstructionSample)); cudaMalloc((void**)&binStartAddr_d, (gridNumElems+1)*sizeof(unsigned int)); cudaMalloc((void**)&sample_d, n*sizeof(ReconstructionSample)); cudaMalloc((void**)&idxKey_d, (((n+3)/4)*4)*sizeof(unsigned int)); //Pad to nearest multiple of 4 to cudaMalloc((void**)&idxValue_d, (((n+3)/4)*4)*sizeof(unsigned int)); //satisfy a property of the sorting kernel. /*The CUDPP library features highly optimizes implementations for radix sort and prefix sum. However for portability reasons, we implemented our own, slightly less optimized versions of these operations. When performing prefix sum using CUDPP, the output array has to be different from the input array, which is why we would allocate an array for binCount_d. For our implementation, we allow the input and output arrays to be the same, therefore we reuse the binCount_d array to get the starting offset of each bin. */ #if USE_CUDPP cudaMalloc((void**)&binCount_d, (gridNumElems+1)*sizeof(unsigned int)); #else binCount_d = binStartAddr_d; #endif CUERR; /* Transfering data from Host to Device */ cudaMemcpyToSymbol(cutoff2_c, &cutoff2, sizeof(float), 0); cudaMemcpyToSymbol(cutoff_c, &cutoff, sizeof(float), 0); cudaMemcpyToSymbol(gridSize_c, params.gridSize, 3*sizeof(int), 0); cudaMemcpyToSymbol(size_xy_c, &size_xy, sizeof(int), 0); cudaMemcpyToSymbol(_1overCutoff2_c, &_1overCutoff2, sizeof(float), 0); cudaMemcpy(sample_d, sample, n*sizeof(ReconstructionSample), cudaMemcpyHostToDevice); cudaMemset(binCount_d, 0, (gridNumElems+1)*sizeof(unsigned int)); // Initialize padding to max integer value, so that when sorted, // these elements get pushed to the end of the array. cudaMemset(idxKey_d+n, 0xFF, (((n+3)&~(3))-n)*sizeof(unsigned int)); sortedSampleSoA_d.data = (float2*)(sortedSample_d); sortedSampleSoA_d.loc = (float4*)(sortedSample_d+2*(n+npad)); //pb_SwitchToTimer(timers, pb_TimerID_KERNEL); /* STEP 1: Perform binning. This kernel determines which output bin each input element * goes into. Any excess (beyond binsize) is put in the CPU bin */ dim3 block1 (BLOCKSIZE); dim3 grid1 ((n+BLOCKSIZE-1)/BLOCKSIZE); #ifdef _BINNING #ifdef _SYM klee_make_symbolic(sample_d, n*sizeof(ReconstructionSample), "sample_d_input"); #endif binning_kernel<<<grid1, block1>>>(n, sample_d, idxKey_d, idxValue_d, binCount_d, params.binsize, gridNumElems); #endif /* STEP 2: Sort the index-value pair generate in the binning kernel */ #if USE_CUDPP CUDPPConfiguration config; config.datatype = CUDPP_UINT; config.algorithm = CUDPP_SORT_RADIX; config.options = CUDPP_OPTION_KEY_VALUE_PAIRS; CUDPPHandle sortplan = 0; CUDPPResult result = cudppPlan(&sortplan, config, n, 1, 0); int precision = 0; int numElems = gridNumElems; while (numElems > 0){ numElems >>= 1; precision++; } cudppSort(sortplan, idxKey_d, idxValue_d, int(precision), n); result = cudppDestroyPlan(sortplan); #else //sort(n, gridNumElems+1, idxKey_d, idxValue_d); #endif /* STEP 3: Reorder the input data, based on the sorted values from Step 2. * this step also involves changing the data from array of structs to a struct * of arrays. Also in this kernel, we populate an array with the starting index * of every output bin features in the input array, based on the sorted indices * from Step 2. * At the end of this step, we copy the start address and list of input elements * that will be computed on the CPU. */ #ifdef _REORDER reorder_kernel<<<grid1,block1>>>(n, idxValue_d, sample_d, sortedSampleSoA_d); #endif //pb_SwitchToTimer(timers, pb_TimerID_COPY); cudaFree(idxKey_d); cudaFree(sample_d); //pb_SwitchToTimer(timers, pb_TimerID_KERNEL); /* STEP 4: In this step we generate the ADD scan of the array of starting indices * of the output bins. The result is an array that contains the starting address of * every output bin. */ #if USE_CUDPP config.datatype = CUDPP_UINT; config.algorithm = CUDPP_SCAN; config.options = CUDPP_OPTION_EXCLUSIVE; config.op=CUDPP_ADD; CUDPPHandle scanplan = 0; result = cudppPlan(&scanplan, config, gridNumElems+1, 1, 0); cudppScan(scanplan, binCount_d, binStartAddr_d, gridNumElems+1); result = cudppDestroyPlan(scanplan); #else //scanLargeArray(gridNumElems+1, binCount_d); #endif //pb_SwitchToTimer(timers, pb_TimerID_COPY); // Copy back to the CPU the indices of the input elements that will be processed on the CPU int cpuStart; cudaMemcpy(&cpuStart, binCount_d+gridNumElems, sizeof(unsigned int), cudaMemcpyDeviceToHost); int CPUbin_size = int(n)-int(cpuStart); int* CPUbin; cudaMallocHost((void**)&CPUbin,CPUbin_size*sizeof(unsigned int)); cudaMemcpy(CPUbin, idxValue_d+cpuStart, CPUbin_size*sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaFree(idxValue_d); #if USE_CUDPP cudaFree(binCount_d); #endif /* STEP 5: Perform the binning on the GPU. The results are computed in a gather fashion * where each thread computes the value of one output element by reading the relevant * bins. */ cudaMalloc((void**)&gridData_d, gridNumElems*sizeof(float2)); cudaMalloc((void**)&sampleDensity_d, gridNumElems*sizeof(float)); CUERR; cudaMemset(gridData_d, 0, gridNumElems*sizeof(float2)); cudaMemset(sampleDensity_d, 0, gridNumElems*sizeof(float)); //pb_SwitchToTimer(timers, pb_TimerID_KERNEL); #ifdef _GRIDDING dim3 block2 (dims.x,dims.y,dims.z); dim3 grid2 (size_x/dims.x, (size_y*size_z)/(4*dims.y*dims.z)); gridding_GPU<<<grid2, block2>>>(sortedSampleSoA_d, binStartAddr_d, gridData_d, sampleDensity_d, beta); #endif //pb_SwitchToTimer(timers, pb_TimerID_COMPUTE); #ifndef _SYM qsort(CPUbin, CPUbin_size, sizeof(int), compare); //Sorting helps cache locality of input element array int num = gridding_CPU(n, params, sample, CPUbin, CPUbin_size, LUT, sizeLUT, &gridData_CPU, &sampleDensity_CPU, &indices_CPU); //pb_SwitchToTimer(timers, pb_TimerID_COPY); /* Copying the results from the Device to the Host */ cudaMemcpy(sampleDensity, sampleDensity_d, gridNumElems*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(gridData, gridData_d, gridNumElems*sizeof(float2),cudaMemcpyDeviceToHost); //pb_SwitchToTimer(timers, pb_TimerID_COMPUTE); /* STEP 6: Computing the contributions of the sample points handled by the Host * and adding those to the GPU results. */ for (int i=0; i< num; i++){ gridData[indices_CPU[i]].real += gridData_CPU[i].real; gridData[indices_CPU[i]].imag += gridData_CPU[i].imag; sampleDensity[indices_CPU[i]] += sampleDensity_CPU[i]; } if (gridData_CPU != NULL){ free(indices_CPU); free(gridData_CPU); free(sampleDensity_CPU); } #endif //pb_SwitchToTimer(timers, pb_TimerID_COPY); cudaFreeHost(CPUbin); cudaFree(gridData_d); cudaFree(sampleDensity_d); cudaFree(binCount_d); cudaFree(sortedSample_d); //pb_SwitchToTimer(timers, pb_TimerID_NONE); return; }
33b19d21975f7ad4aacd019f830ad065aa5acc01.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <sys/time.h> __global__ void initialDataGPU(float *ip, int nx, int ny){ unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; int size = nx * ny; if (idx < size){ ip[idx] = (float)(idx) - 3000.0f; } } __global__ void del_fake_shadow(float *ip, int nx, int ny){ unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; int size = nx * ny; if (idx < size){ if (ip[idx] < -1000.0f){ ip[idx] = -1000.0f; } else if (ip[idx] > 1000.0f){ ip[idx] = 1000.0f; } } } double cpuSecond(){ struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1e-6); } void printMatrix(float *C, const int nx, const int ny){ float *ic = C; printf("\nMatrix: (%d,%d)\n", nx, ny); for (int iy = 0; iy < 10; ++iy) { for (int ix = 0; ix < nx; ++ix) { printf("%.2f\n", ic[ix]); } ic += nx; } } int main() { //prepare part int dev = 0; hipSetDevice(dev); int nx = 512; int ny = 512; int nxy = nx * ny; int nBytes = nxy * sizeof(float); float *h_A, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); float *d_A, *d_B; hipMalloc((void **)&d_A, nBytes); hipMalloc((void **)&d_B, nBytes); int dimx = 1; int dimy = 1; dim3 block(dimx, dimy); dim3 grid((nx+block.x-1)/block.x, ny); hipLaunchKernelGGL(( initialDataGPU), dim3(grid), dim3(block), 0, 0, d_A, nx, ny); hipMemcpy(h_A, d_A, nxy, hipMemcpyDeviceToHost); double iStart = cpuSecond(); hipLaunchKernelGGL(( del_fake_shadow), dim3(grid), dim3(block), 0, 0, d_A, nx, ny); hipMemcpy(gpuRef, d_A, nxy, hipMemcpyDeviceToHost); double iElapse = cpuSecond() - iStart; printf("del fake use %f", iElapse); return 0; }
33b19d21975f7ad4aacd019f830ad065aa5acc01.cu
#include <stdio.h> #include <cuda_runtime.h> #include <sys/time.h> __global__ void initialDataGPU(float *ip, int nx, int ny){ unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; int size = nx * ny; if (idx < size){ ip[idx] = (float)(idx) - 3000.0f; } } __global__ void del_fake_shadow(float *ip, int nx, int ny){ unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; int size = nx * ny; if (idx < size){ if (ip[idx] < -1000.0f){ ip[idx] = -1000.0f; } else if (ip[idx] > 1000.0f){ ip[idx] = 1000.0f; } } } double cpuSecond(){ struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1e-6); } void printMatrix(float *C, const int nx, const int ny){ float *ic = C; printf("\nMatrix: (%d,%d)\n", nx, ny); for (int iy = 0; iy < 10; ++iy) { for (int ix = 0; ix < nx; ++ix) { printf("%.2f\n", ic[ix]); } ic += nx; } } int main() { //prepare part int dev = 0; cudaSetDevice(dev); int nx = 512; int ny = 512; int nxy = nx * ny; int nBytes = nxy * sizeof(float); float *h_A, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); float *d_A, *d_B; cudaMalloc((void **)&d_A, nBytes); cudaMalloc((void **)&d_B, nBytes); int dimx = 1; int dimy = 1; dim3 block(dimx, dimy); dim3 grid((nx+block.x-1)/block.x, ny); initialDataGPU<<<grid, block>>>(d_A, nx, ny); cudaMemcpy(h_A, d_A, nxy, cudaMemcpyDeviceToHost); double iStart = cpuSecond(); del_fake_shadow<<<grid, block>>>(d_A, nx, ny); cudaMemcpy(gpuRef, d_A, nxy, cudaMemcpyDeviceToHost); double iElapse = cpuSecond() - iStart; printf("del fake use %f", iElapse); return 0; }
9387d8cc797957eb702579fcf1b07795d2b34b8c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "brick-cuda.h" #include "head.h" #include "headcu.h" #define out(i, j) out_arr[j][i] #define in(i, j) in_arr[j][i] __global__ void arr_kernel(bElem *in_ptr, bElem *out_ptr, bElem *c) { auto in_arr = (bElem (*)[STRIDE]) in_ptr; auto out_arr = (bElem (*)[STRIDE]) out_ptr; #include "arrcusched.h" { #include "kernel.h" } } #undef out #undef in __global__ void brick_kernel(unsigned (*grid)[STRIDE/TILEI], Brick2D in, Brick2D out, bElem *c) { #include "bricusched.h" brick("kernel.py", BVEC, (TILEJ, TILEI), (BFOLD), b); } int main() { // allocations bElem *c = randomArray({17}); bElem *c_dev; copyToDevice({17}, c_dev, c); auto in_arr = randomArray({STRIDE, STRIDE}); bElem *in_dev; copyToDevice({STRIDE, STRIDE}, in_dev, in_arr); auto out_arr = zeroArray({STRIDE, STRIDE}); bElem *out_dev; copyToDevice({STRIDE, STRIDE}, out_dev, out_arr); { auto compute = [&]() -> void { dim3 block(N/TILEI, N/TILEJ), thread(_TILEI, _TILEJ); hipLaunchKernelGGL(( arr_kernel), dim3(block), dim3(thread) , 0, 0, in_dev, out_dev, c_dev); }; #ifndef TYPE #include "cutiming.h" #else compute(); #endif copyFromDevice({STRIDE, STRIDE}, out_arr, out_dev); } #if TYPE == 1 { unsigned *grid_ptr; unsigned bSize = TILEJ * TILEI; auto bInfo = init_grid<2>(grid_ptr, {STRIDE/TILEJ, STRIDE/TILEI}); unsigned *grid_dev; copyToDevice({STRIDE/TILEJ, STRIDE/TILEI}, grid_dev, grid_ptr); auto bStorage = BrickStorage::allocate(bInfo.nbricks, bSize * 2); Brick<Dim<TILEJ, TILEI>, Dim<BFOLD>> in_bri(&bInfo, &bStorage, 0); Brick<Dim<TILEJ, TILEI>, Dim<BFOLD>> out_bri(&bInfo, &bStorage, bSize); BrickInfo<2> *bInfo_dev; auto _bInfo_dev = movBrickInfo(bInfo, hipMemcpyHostToDevice); { unsigned size = sizeof(BrickInfo<2>); hipMalloc(&bInfo_dev, size); hipMemcpy(bInfo_dev, &_bInfo_dev, size, hipMemcpyHostToDevice); } copyBrick<2>({STRIDE, STRIDE}, in_arr, grid_ptr, in_bri); BrickStorage *bStorage_dev; BrickStorage _bStorage_dev = movBrickStorage(bStorage, hipMemcpyHostToDevice); { unsigned size = sizeof(BrickStorage); hipMalloc(&bStorage_dev, size); hipMemcpy(bStorage_dev, &_bStorage_dev, size, hipMemcpyHostToDevice); } auto compute = [&]() -> void { Brick2D bIn(bInfo_dev, &_bStorage_dev, 0); Brick2D bOut(bInfo_dev, &_bStorage_dev, bSize); bIn.bStorage = bStorage_dev; bOut.bStorage = bStorage_dev; auto grid = (unsigned (*)[STRIDE/TILEI]) grid_dev; dim3 block(N/TILEI, N/TILEJ), thread(32); hipLaunchKernelGGL(( brick_kernel), dim3(block), dim3(thread) , 0, 0, grid, bIn, bOut, c_dev); }; #include "cutiming.h" hipDeviceSynchronize(); hipMemcpy(bStorage.dat, _bStorage_dev.dat, bStorage.chunks * bStorage.step * sizeof(bElem), hipMemcpyDeviceToHost); if (!compareBrick<2>({STRIDE, STRIDE}, out_arr, grid_ptr, out_bri)) return 1; } #endif return 0; }
9387d8cc797957eb702579fcf1b07795d2b34b8c.cu
#include "brick-cuda.h" #include "head.h" #include "headcu.h" #define out(i, j) out_arr[j][i] #define in(i, j) in_arr[j][i] __global__ void arr_kernel(bElem *in_ptr, bElem *out_ptr, bElem *c) { auto in_arr = (bElem (*)[STRIDE]) in_ptr; auto out_arr = (bElem (*)[STRIDE]) out_ptr; #include "arrcusched.h" { #include "kernel.h" } } #undef out #undef in __global__ void brick_kernel(unsigned (*grid)[STRIDE/TILEI], Brick2D in, Brick2D out, bElem *c) { #include "bricusched.h" brick("kernel.py", BVEC, (TILEJ, TILEI), (BFOLD), b); } int main() { // allocations bElem *c = randomArray({17}); bElem *c_dev; copyToDevice({17}, c_dev, c); auto in_arr = randomArray({STRIDE, STRIDE}); bElem *in_dev; copyToDevice({STRIDE, STRIDE}, in_dev, in_arr); auto out_arr = zeroArray({STRIDE, STRIDE}); bElem *out_dev; copyToDevice({STRIDE, STRIDE}, out_dev, out_arr); { auto compute = [&]() -> void { dim3 block(N/TILEI, N/TILEJ), thread(_TILEI, _TILEJ); arr_kernel<<< block, thread >>>(in_dev, out_dev, c_dev); }; #ifndef TYPE #include "cutiming.h" #else compute(); #endif copyFromDevice({STRIDE, STRIDE}, out_arr, out_dev); } #if TYPE == 1 { unsigned *grid_ptr; unsigned bSize = TILEJ * TILEI; auto bInfo = init_grid<2>(grid_ptr, {STRIDE/TILEJ, STRIDE/TILEI}); unsigned *grid_dev; copyToDevice({STRIDE/TILEJ, STRIDE/TILEI}, grid_dev, grid_ptr); auto bStorage = BrickStorage::allocate(bInfo.nbricks, bSize * 2); Brick<Dim<TILEJ, TILEI>, Dim<BFOLD>> in_bri(&bInfo, &bStorage, 0); Brick<Dim<TILEJ, TILEI>, Dim<BFOLD>> out_bri(&bInfo, &bStorage, bSize); BrickInfo<2> *bInfo_dev; auto _bInfo_dev = movBrickInfo(bInfo, cudaMemcpyHostToDevice); { unsigned size = sizeof(BrickInfo<2>); cudaMalloc(&bInfo_dev, size); cudaMemcpy(bInfo_dev, &_bInfo_dev, size, cudaMemcpyHostToDevice); } copyBrick<2>({STRIDE, STRIDE}, in_arr, grid_ptr, in_bri); BrickStorage *bStorage_dev; BrickStorage _bStorage_dev = movBrickStorage(bStorage, cudaMemcpyHostToDevice); { unsigned size = sizeof(BrickStorage); cudaMalloc(&bStorage_dev, size); cudaMemcpy(bStorage_dev, &_bStorage_dev, size, cudaMemcpyHostToDevice); } auto compute = [&]() -> void { Brick2D bIn(bInfo_dev, &_bStorage_dev, 0); Brick2D bOut(bInfo_dev, &_bStorage_dev, bSize); bIn.bStorage = bStorage_dev; bOut.bStorage = bStorage_dev; auto grid = (unsigned (*)[STRIDE/TILEI]) grid_dev; dim3 block(N/TILEI, N/TILEJ), thread(32); brick_kernel<<< block, thread >>>(grid, bIn, bOut, c_dev); }; #include "cutiming.h" cudaDeviceSynchronize(); cudaMemcpy(bStorage.dat, _bStorage_dev.dat, bStorage.chunks * bStorage.step * sizeof(bElem), cudaMemcpyDeviceToHost); if (!compareBrick<2>({STRIDE, STRIDE}, out_arr, grid_ptr, out_bri)) return 1; } #endif return 0; }
7c1f095d955a18dbc22d943d726464102a15d790.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * SmoothL1LossLayer.cpp * * Created on: Nov 23, 2016 * Author: jkim */ #include <vector> #include "SmoothL1LossLayer.h" #include "MathFunctions.h" #include "PropMgmt.h" #include "SysLog.h" #include "MemoryMgmt.h" #define SMOOTHL1LOSSLAYER_LOG 0 using namespace std; template <typename Dtype> int SmoothL1LossLayer<Dtype>::INNER_ID = 11010; template <typename Dtype> SmoothL1LossLayer<Dtype>::SmoothL1LossLayer() : SmoothL1LossLayer(NULL) {} template <typename Dtype> SmoothL1LossLayer<Dtype>::SmoothL1LossLayer(_SmoothL1LossPropLayer* prop) : LossLayer<Dtype>(), diff("diff"), errors("errors"), ones("ones") { this->type = Layer<Dtype>::SmoothL1Loss; if (prop) { this->prop = NULL; SNEW(this->prop, _SmoothL1LossPropLayer); SASSUME0(this->prop != NULL); *(this->prop) = *(prop); } else { this->prop = NULL; } } template <typename Dtype> SmoothL1LossLayer<Dtype>::~SmoothL1LossLayer() { if (this->prop != NULL) SFREE(this->prop); } template <typename Dtype> void SmoothL1LossLayer<Dtype>::reshape() { bool adjusted = Layer<Dtype>::_adjustInputShape(); if (adjusted) { this->hasWeights = (this->_inputData.size() >= 3); if (this->hasWeights) { SASSERT(this->_inputData.size() == 4, "If weights are used, must specify both inside and outside weights"); } this->_outputData[0]->reshape({1, 1, 1, 1}); this->_outputData[0]->mutable_host_grad()[0] = GET_PROP(prop, Loss, lossWeight); #if SMOOTHL1LOSSLAYER_LOG printf("<%s> layer' output-0 has reshaped as: %dx%dx%dx%d\n", GET_PROP(prop, Loss, name).c_str(), 1, 1, 1, 1); #endif } const uint32_t inputSize = this->_inputData.size(); for (uint32_t i = 0; i < inputSize; i++) { if (!Layer<Dtype>::_isInputShapeChanged(i)) continue; const vector<uint32_t>& inputDataShape = this->_inputData[i]->getShape(); this->_inputShape[i] = inputDataShape; // rpn_bbox_pred if (i == 0) { this->diff.reshape(inputDataShape); this->errors.reshape(inputDataShape); // vector of ones used to sum this->ones.reshape(inputDataShape); this->ones.reset_host_data(false, 1.0f); } // rpn_bbox_targets else if (i == 1) { // XXX: FullyConnectedLayer output (batches, 1, rows, 1) , // bbox_targets shape if (this->_inputData[0]->getShape() != this->_inputData[1]->getShape()) { this->_inputData[1]->reshape({this->_inputData[1]->getShape(2), 1, this->_inputData[1]->getShape(3), 1}); assert(this->_inputData[0]->getShape() == this->_inputData[1]->getShape()); } //assert(this->_inputData[0]->channels() == this->_inputData[1]->channels()); //assert(this->_inputData[0]->height() == this->_inputData[1]->height()); //assert(this->_inputData[0]->width() == this->_inputData[1]->width()); } // rpn_bbox_inside_weights else if (i == 2) { if (this->hasWeights) { if (this->_inputData[0]->getShape() != this->_inputData[2]->getShape()) { this->_inputData[2]->reshape({this->_inputData[2]->getShape(2), 1, this->_inputData[2]->getShape(3), 1}); assert(this->_inputData[0]->getShape() == this->_inputData[2]->getShape()); } //assert(this->_inputData[0]->channels() == this->_inputData[2]->channels()); //assert(this->_inputData[0]->height() == this->_inputData[2]->height()); //assert(this->_inputData[0]->width() == this->_inputData[2]->width()); } } // rpn_bbox_outside_weights else if (i == 3) { if (this->hasWeights) { if (this->_inputData[0]->getShape() != this->_inputData[3]->getShape()) { this->_inputData[3]->reshape({this->_inputData[3]->getShape(2), 1, this->_inputData[3]->getShape(3), 1}); assert(this->_inputData[0]->getShape() == this->_inputData[3]->getShape()); } //assert(this->_inputData[0]->channels() == this->_inputData[3]->channels()); //assert(this->_inputData[0]->height() == this->_inputData[3]->height()); //assert(this->_inputData[0]->width() == this->_inputData[3]->width()); } } } } template <typename Dtype> __global__ void SmoothL1Forward(const uint32_t n, const Dtype* in, Dtype* out, Dtype sigma2) { // f(x) = 0.5 * (sigma2 * x)^2 if |x| < 1 / sigma2 / sigma2 // |x| - 0.5 / sigma2 / sigma2 otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1.0 / sigma2) { out[index] = 0.5 * val * val * sigma2; } else { out[index] = abs_val - 0.5 / sigma2; } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::feedforward() { reshape(); const uint32_t count = this->_inputData[0]->getCount(); // prediction (inputData[0]) - target (inputData[1]) => diff soooa_gpu_sub( count, this->_inputData[0]->device_data(), this->_inputData[1]->device_data(), this->diff.mutable_device_data()); // d := b0 - b1 #if SMOOTHL1LOSSLAYER_LOG this->_printOn(); this->_inputData[0]->print_data(); this->_inputData[1]->print_data(); this->diff.print_data(); this->_printOff(); #endif if (this->hasWeights) { #if SMOOTHL1LOSSLAYER_LOG this->_printOn(); this->_inputData[2]->print_data(); this->diff.print_data(); this->_printOff(); #endif // apply "inside" weights soooa_gpu_mul( count, this->_inputData[2]->device_data(), this->diff.device_data(), this->diff.mutable_device_data()); // d := w_in * (b0 - b1) #if SMOOTHL1LOSSLAYER_LOG this->_printOn(); this->diff.print_data(); this->_printOff(); #endif } // smoothL1Forward const float sigma2 = GET_PROP(prop, SmoothL1Loss, sigma) * GET_PROP(prop, SmoothL1Loss, sigma); hipLaunchKernelGGL(( SmoothL1Forward<Dtype>), dim3(SOOOA_GET_BLOCKS(count)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0, count, this->diff.device_data(), this->errors.mutable_device_data(), sigma2); CUDA_POST_KERNEL_CHECK; #if SMOOTHL1LOSSLAYER_LOG this->_printOn(); this->diff.print_data(); this->errors.print_data(); this->_printOff(); #endif if (this->hasWeights) { #if SMOOTHL1LOSSLAYER_LOG this->_printOn(); this->_inputData[3]->print_data(); this->errors.print_data(); this->_printOff(); #endif // apply "outside" weights soooa_gpu_mul( count, this->_inputData[3]->device_data(), this->errors.device_data(), this->errors.mutable_device_data()); // d := w_out * SmoothL1(w_in * (b0 - b1)) #if SMOOTHL1LOSSLAYER_LOG this->_printOn(); this->errors.print_data(); this->_printOff(); #endif } const uint32_t firstAxis = GET_PROP(prop, SmoothL1Loss, firstAxis); const float lossWeight = GET_PROP(prop, Loss, lossWeight); Dtype loss; soooa_gpu_dot(count, this->ones.device_data(), this->errors.device_data(), &loss); this->_outputData[0]->mutable_host_data()[0] = loss * Dtype(lossWeight) / this->_inputData[0]->getShape(firstAxis); //this->_outputData[0]->mutable_host_data()[0] = loss * Dtype(this->lossWeight); //cout << "smoothl1loss: " << this->_outputData[0]->host_data()[0] << endl; } template <typename Dtype> __global__ void SmoothL1Backward(const uint32_t n, const Dtype* in, Dtype* out, Dtype sigma2) { // f'(x) = sigma2 * sigma2 * x if |x| < 1 / sigma2 / sigma2 // = sign(x) otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1.0 / sigma2) { out[index] = sigma2 * val; } else { out[index] = (Dtype(0) < val) - (val < Dtype(0)); } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::backpropagation() { // after forwards, diff holds w_in * (b0 - b1) const float sigma2 = GET_PROP(prop, SmoothL1Loss, sigma) * GET_PROP(prop, SmoothL1Loss, sigma); const uint32_t count = this->diff.getCount(); hipLaunchKernelGGL(( SmoothL1Backward<Dtype>), dim3(SOOOA_GET_BLOCKS(count)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0, count, this->diff.device_data(), this->diff.mutable_device_data(), sigma2); CUDA_POST_KERNEL_CHECK; const vector<bool> propDown = GET_PROP(prop, SmoothL1Loss, propDown); const uint32_t firstAxis = GET_PROP(prop, SmoothL1Loss, firstAxis); for (uint32_t i = 0; i < 2; i++) { if (propDown[i]) { const Dtype sign = (i == 0) ? 1 : -1; // XXX: caffe, top[0]->cpu_diff()[0] set // 1 1.0f //const Dtype alpha = sign * this->_outputData[0]->host_grad()[0] / // this->_inputData[i]->batches(); const Dtype alpha = sign * GET_PROP(prop, Loss, lossWeight) / this->_inputData[i]->getShape(firstAxis); soooa_gpu_axpby( count, alpha, this->diff.device_data(), Dtype(0), this->_inputData[i]->mutable_device_grad()); //this->_printOn(); //this->_inputData[i]->print_grad({}, false, -1); //this->_printOff(); if (this->hasWeights) { // Scale by "inside" weight soooa_gpu_mul( count, this->_inputData[2]->device_data(), this->_inputData[i]->device_grad(), this->_inputData[i]->mutable_device_grad()); // Scale by "outside" weight soooa_gpu_mul( count, this->_inputData[3]->device_data(), this->_inputData[i]->device_grad(), this->_inputData[i]->mutable_device_grad()); } } } /* if (GET_PROP(prop, SmoothL1Loss, name) == "rpn_loss_bbox") { this->_printOn(); this->_inputData[i]->print_grad({}, false); this->_printOff(); } */ } template <typename Dtype> Dtype SmoothL1LossLayer<Dtype>::cost() { return this->_outputData[0]->host_data()[0]; } /**************************************************************************** * layer callback functions ****************************************************************************/ template<typename Dtype> void* SmoothL1LossLayer<Dtype>::initLayer() { SmoothL1LossLayer* layer = NULL; SNEW(layer, SmoothL1LossLayer<Dtype>); SASSUME0(layer != NULL); return (void*)layer; } template<typename Dtype> void SmoothL1LossLayer<Dtype>::destroyLayer(void* instancePtr) { SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr; SDELETE(layer); } template<typename Dtype> void SmoothL1LossLayer<Dtype>::setInOutTensor(void* instancePtr, void* tensorPtr, bool isInput, int index) { if (isInput) { SASSERT0(index < 4); } else { SASSERT0(index == 0); } SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr; if (isInput) { SASSERT0(layer->_inputData.size() == index); layer->_inputData.push_back((Data<Dtype>*)tensorPtr); } else { SASSERT0(layer->_outputData.size() == index); layer->_outputData.push_back((Data<Dtype>*)tensorPtr); } } template<typename Dtype> bool SmoothL1LossLayer<Dtype>::allocLayerTensors(void* instancePtr) { SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr; layer->reshape(); return true; } template<typename Dtype> void SmoothL1LossLayer<Dtype>::forwardTensor(void* instancePtr, int miniBatchIdx) { SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr; layer->feedforward(); } template<typename Dtype> void SmoothL1LossLayer<Dtype>::backwardTensor(void* instancePtr) { SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr; layer->backpropagation(); } template<typename Dtype> void SmoothL1LossLayer<Dtype>::learnTensor(void* instancePtr) { SASSERT0(false); } template class SmoothL1LossLayer<float>;
7c1f095d955a18dbc22d943d726464102a15d790.cu
/* * SmoothL1LossLayer.cpp * * Created on: Nov 23, 2016 * Author: jkim */ #include <vector> #include "SmoothL1LossLayer.h" #include "MathFunctions.h" #include "PropMgmt.h" #include "SysLog.h" #include "MemoryMgmt.h" #define SMOOTHL1LOSSLAYER_LOG 0 using namespace std; template <typename Dtype> int SmoothL1LossLayer<Dtype>::INNER_ID = 11010; template <typename Dtype> SmoothL1LossLayer<Dtype>::SmoothL1LossLayer() : SmoothL1LossLayer(NULL) {} template <typename Dtype> SmoothL1LossLayer<Dtype>::SmoothL1LossLayer(_SmoothL1LossPropLayer* prop) : LossLayer<Dtype>(), diff("diff"), errors("errors"), ones("ones") { this->type = Layer<Dtype>::SmoothL1Loss; if (prop) { this->prop = NULL; SNEW(this->prop, _SmoothL1LossPropLayer); SASSUME0(this->prop != NULL); *(this->prop) = *(prop); } else { this->prop = NULL; } } template <typename Dtype> SmoothL1LossLayer<Dtype>::~SmoothL1LossLayer() { if (this->prop != NULL) SFREE(this->prop); } template <typename Dtype> void SmoothL1LossLayer<Dtype>::reshape() { bool adjusted = Layer<Dtype>::_adjustInputShape(); if (adjusted) { this->hasWeights = (this->_inputData.size() >= 3); if (this->hasWeights) { SASSERT(this->_inputData.size() == 4, "If weights are used, must specify both inside and outside weights"); } this->_outputData[0]->reshape({1, 1, 1, 1}); this->_outputData[0]->mutable_host_grad()[0] = GET_PROP(prop, Loss, lossWeight); #if SMOOTHL1LOSSLAYER_LOG printf("<%s> layer' output-0 has reshaped as: %dx%dx%dx%d\n", GET_PROP(prop, Loss, name).c_str(), 1, 1, 1, 1); #endif } const uint32_t inputSize = this->_inputData.size(); for (uint32_t i = 0; i < inputSize; i++) { if (!Layer<Dtype>::_isInputShapeChanged(i)) continue; const vector<uint32_t>& inputDataShape = this->_inputData[i]->getShape(); this->_inputShape[i] = inputDataShape; // rpn_bbox_pred if (i == 0) { this->diff.reshape(inputDataShape); this->errors.reshape(inputDataShape); // vector of ones used to sum this->ones.reshape(inputDataShape); this->ones.reset_host_data(false, 1.0f); } // rpn_bbox_targets else if (i == 1) { // XXX: FullyConnectedLayer의 output이 (batches, 1, rows, 1)의 현 구조를 반영, // 강제로 bbox_targets의 shape를 조정 if (this->_inputData[0]->getShape() != this->_inputData[1]->getShape()) { this->_inputData[1]->reshape({this->_inputData[1]->getShape(2), 1, this->_inputData[1]->getShape(3), 1}); assert(this->_inputData[0]->getShape() == this->_inputData[1]->getShape()); } //assert(this->_inputData[0]->channels() == this->_inputData[1]->channels()); //assert(this->_inputData[0]->height() == this->_inputData[1]->height()); //assert(this->_inputData[0]->width() == this->_inputData[1]->width()); } // rpn_bbox_inside_weights else if (i == 2) { if (this->hasWeights) { if (this->_inputData[0]->getShape() != this->_inputData[2]->getShape()) { this->_inputData[2]->reshape({this->_inputData[2]->getShape(2), 1, this->_inputData[2]->getShape(3), 1}); assert(this->_inputData[0]->getShape() == this->_inputData[2]->getShape()); } //assert(this->_inputData[0]->channels() == this->_inputData[2]->channels()); //assert(this->_inputData[0]->height() == this->_inputData[2]->height()); //assert(this->_inputData[0]->width() == this->_inputData[2]->width()); } } // rpn_bbox_outside_weights else if (i == 3) { if (this->hasWeights) { if (this->_inputData[0]->getShape() != this->_inputData[3]->getShape()) { this->_inputData[3]->reshape({this->_inputData[3]->getShape(2), 1, this->_inputData[3]->getShape(3), 1}); assert(this->_inputData[0]->getShape() == this->_inputData[3]->getShape()); } //assert(this->_inputData[0]->channels() == this->_inputData[3]->channels()); //assert(this->_inputData[0]->height() == this->_inputData[3]->height()); //assert(this->_inputData[0]->width() == this->_inputData[3]->width()); } } } } template <typename Dtype> __global__ void SmoothL1Forward(const uint32_t n, const Dtype* in, Dtype* out, Dtype sigma2) { // f(x) = 0.5 * (sigma2 * x)^2 if |x| < 1 / sigma2 / sigma2 // |x| - 0.5 / sigma2 / sigma2 otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1.0 / sigma2) { out[index] = 0.5 * val * val * sigma2; } else { out[index] = abs_val - 0.5 / sigma2; } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::feedforward() { reshape(); const uint32_t count = this->_inputData[0]->getCount(); // prediction (inputData[0]) - target (inputData[1]) => diff soooa_gpu_sub( count, this->_inputData[0]->device_data(), this->_inputData[1]->device_data(), this->diff.mutable_device_data()); // d := b0 - b1 #if SMOOTHL1LOSSLAYER_LOG this->_printOn(); this->_inputData[0]->print_data(); this->_inputData[1]->print_data(); this->diff.print_data(); this->_printOff(); #endif if (this->hasWeights) { #if SMOOTHL1LOSSLAYER_LOG this->_printOn(); this->_inputData[2]->print_data(); this->diff.print_data(); this->_printOff(); #endif // apply "inside" weights soooa_gpu_mul( count, this->_inputData[2]->device_data(), this->diff.device_data(), this->diff.mutable_device_data()); // d := w_in * (b0 - b1) #if SMOOTHL1LOSSLAYER_LOG this->_printOn(); this->diff.print_data(); this->_printOff(); #endif } // smoothL1Forward const float sigma2 = GET_PROP(prop, SmoothL1Loss, sigma) * GET_PROP(prop, SmoothL1Loss, sigma); SmoothL1Forward<Dtype><<<SOOOA_GET_BLOCKS(count), SOOOA_CUDA_NUM_THREADS>>>( count, this->diff.device_data(), this->errors.mutable_device_data(), sigma2); CUDA_POST_KERNEL_CHECK; #if SMOOTHL1LOSSLAYER_LOG this->_printOn(); this->diff.print_data(); this->errors.print_data(); this->_printOff(); #endif if (this->hasWeights) { #if SMOOTHL1LOSSLAYER_LOG this->_printOn(); this->_inputData[3]->print_data(); this->errors.print_data(); this->_printOff(); #endif // apply "outside" weights soooa_gpu_mul( count, this->_inputData[3]->device_data(), this->errors.device_data(), this->errors.mutable_device_data()); // d := w_out * SmoothL1(w_in * (b0 - b1)) #if SMOOTHL1LOSSLAYER_LOG this->_printOn(); this->errors.print_data(); this->_printOff(); #endif } const uint32_t firstAxis = GET_PROP(prop, SmoothL1Loss, firstAxis); const float lossWeight = GET_PROP(prop, Loss, lossWeight); Dtype loss; soooa_gpu_dot(count, this->ones.device_data(), this->errors.device_data(), &loss); this->_outputData[0]->mutable_host_data()[0] = loss * Dtype(lossWeight) / this->_inputData[0]->getShape(firstAxis); //this->_outputData[0]->mutable_host_data()[0] = loss * Dtype(this->lossWeight); //cout << "smoothl1loss: " << this->_outputData[0]->host_data()[0] << endl; } template <typename Dtype> __global__ void SmoothL1Backward(const uint32_t n, const Dtype* in, Dtype* out, Dtype sigma2) { // f'(x) = sigma2 * sigma2 * x if |x| < 1 / sigma2 / sigma2 // = sign(x) otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1.0 / sigma2) { out[index] = sigma2 * val; } else { out[index] = (Dtype(0) < val) - (val < Dtype(0)); } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::backpropagation() { // after forwards, diff holds w_in * (b0 - b1) const float sigma2 = GET_PROP(prop, SmoothL1Loss, sigma) * GET_PROP(prop, SmoothL1Loss, sigma); const uint32_t count = this->diff.getCount(); SmoothL1Backward<Dtype><<<SOOOA_GET_BLOCKS(count), SOOOA_CUDA_NUM_THREADS>>>( count, this->diff.device_data(), this->diff.mutable_device_data(), sigma2); CUDA_POST_KERNEL_CHECK; const vector<bool> propDown = GET_PROP(prop, SmoothL1Loss, propDown); const uint32_t firstAxis = GET_PROP(prop, SmoothL1Loss, firstAxis); for (uint32_t i = 0; i < 2; i++) { if (propDown[i]) { const Dtype sign = (i == 0) ? 1 : -1; // XXX: caffe, top[0]->cpu_diff()[0]에 대해서 set하는 부분을 찾을 수 없고 // 현재 특수한 값이 들어 있는 것이 아닌 1의 값이 들어있어 상수 1.0f으로 대체 //const Dtype alpha = sign * this->_outputData[0]->host_grad()[0] / // this->_inputData[i]->batches(); const Dtype alpha = sign * GET_PROP(prop, Loss, lossWeight) / this->_inputData[i]->getShape(firstAxis); soooa_gpu_axpby( count, alpha, this->diff.device_data(), Dtype(0), this->_inputData[i]->mutable_device_grad()); //this->_printOn(); //this->_inputData[i]->print_grad({}, false, -1); //this->_printOff(); if (this->hasWeights) { // Scale by "inside" weight soooa_gpu_mul( count, this->_inputData[2]->device_data(), this->_inputData[i]->device_grad(), this->_inputData[i]->mutable_device_grad()); // Scale by "outside" weight soooa_gpu_mul( count, this->_inputData[3]->device_data(), this->_inputData[i]->device_grad(), this->_inputData[i]->mutable_device_grad()); } } } /* if (GET_PROP(prop, SmoothL1Loss, name) == "rpn_loss_bbox") { this->_printOn(); this->_inputData[i]->print_grad({}, false); this->_printOff(); } */ } template <typename Dtype> Dtype SmoothL1LossLayer<Dtype>::cost() { return this->_outputData[0]->host_data()[0]; } /**************************************************************************** * layer callback functions ****************************************************************************/ template<typename Dtype> void* SmoothL1LossLayer<Dtype>::initLayer() { SmoothL1LossLayer* layer = NULL; SNEW(layer, SmoothL1LossLayer<Dtype>); SASSUME0(layer != NULL); return (void*)layer; } template<typename Dtype> void SmoothL1LossLayer<Dtype>::destroyLayer(void* instancePtr) { SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr; SDELETE(layer); } template<typename Dtype> void SmoothL1LossLayer<Dtype>::setInOutTensor(void* instancePtr, void* tensorPtr, bool isInput, int index) { if (isInput) { SASSERT0(index < 4); } else { SASSERT0(index == 0); } SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr; if (isInput) { SASSERT0(layer->_inputData.size() == index); layer->_inputData.push_back((Data<Dtype>*)tensorPtr); } else { SASSERT0(layer->_outputData.size() == index); layer->_outputData.push_back((Data<Dtype>*)tensorPtr); } } template<typename Dtype> bool SmoothL1LossLayer<Dtype>::allocLayerTensors(void* instancePtr) { SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr; layer->reshape(); return true; } template<typename Dtype> void SmoothL1LossLayer<Dtype>::forwardTensor(void* instancePtr, int miniBatchIdx) { SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr; layer->feedforward(); } template<typename Dtype> void SmoothL1LossLayer<Dtype>::backwardTensor(void* instancePtr) { SmoothL1LossLayer<Dtype>* layer = (SmoothL1LossLayer<Dtype>*)instancePtr; layer->backpropagation(); } template<typename Dtype> void SmoothL1LossLayer<Dtype>::learnTensor(void* instancePtr) { SASSERT0(false); } template class SmoothL1LossLayer<float>;
5ea73172ee7225d89ca38ea134b689b23d803f08.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <dslash_quda.h> #include <read_gauge.h> #include <gauge_field.h> #include <clover_field.h> #include <fermion_force_quda.h> #include <force_common.h> #include <hw_quda.h> #if defined(GPU_FERMION_FORCE) namespace quda { namespace fermionforce { #include <dslash_constants.h> #include <dslash_textures.h> } using namespace fermionforce; #define BLOCK_DIM 64 #define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var, Vh) #define LOAD_HW_SINGLE(hw_even, hw_odd, idx, var, oddness) do{ \ Float2* hw = (oddness)?hw_odd:hw_even; \ var##0 = hw[idx + 0*Vh]; \ var##1 = hw[idx + 1*Vh]; \ var##2 = hw[idx + 2*Vh]; \ var##3 = hw[idx + 3*Vh]; \ var##4 = hw[idx + 4*Vh]; \ var##5 = hw[idx + 5*Vh]; \ }while(0) #define WRITE_HW_SINGLE(hw_even, hw_odd, idx, var, oddness) do{ \ Float2* hw = (oddness)?hw_odd:hw_even; \ hw[idx + 0*Vh] = var##0; \ hw[idx + 1*Vh] = var##1; \ hw[idx + 2*Vh] = var##2; \ hw[idx + 3*Vh] = var##3; \ hw[idx + 4*Vh] = var##4; \ hw[idx + 5*Vh] = var##5; \ }while(0) #define LOAD_HW(hw_eve, hw_odd, idx, var, oddness) LOAD_HW_SINGLE(hw_eve, hw_odd, idx, var, oddness) #define WRITE_HW(hw_even, hw_odd, idx, var, oddness) WRITE_HW_SINGLE(hw_even, hw_odd, idx, var, oddness) #define LOAD_MATRIX(src, dir, idx, var) LOAD_MATRIX_12_SINGLE(src, dir, idx, var, Vh) #define FF_SITE_MATRIX_LOAD_TEX 1 #define linkEvenTex siteLink0TexSingle_recon #define linkOddTex siteLink1TexSingle_recon #if (FF_SITE_MATRIX_LOAD_TEX == 1) #define FF_LOAD_MATRIX(dir, idx, var, oddness) LOAD_MATRIX_12_SINGLE_TEX(((oddness)?linkOddTex:linkEvenTex), dir, idx, var, Vh) #else #define FF_LOAD_MATRIX(dir, idx, var, oddness) LOAD_MATRIX_12_SINGLE(((oddness)?linkOdd:linkEven), dir, idx, var, Vh) #endif #define linka00_re LINKA0.x #define linka00_im LINKA0.y #define linka01_re LINKA0.z #define linka01_im LINKA0.w #define linka02_re LINKA1.x #define linka02_im LINKA1.y #define linka10_re LINKA1.z #define linka10_im LINKA1.w #define linka11_re LINKA2.x #define linka11_im LINKA2.y #define linka12_re LINKA2.z #define linka12_im LINKA2.w #define linka20_re LINKA3.x #define linka20_im LINKA3.y #define linka21_re LINKA3.z #define linka21_im LINKA3.w #define linka22_re LINKA4.x #define linka22_im LINKA4.y #define linkb00_re LINKB0.x #define linkb00_im LINKB0.y #define linkb01_re LINKB0.z #define linkb01_im LINKB0.w #define linkb02_re LINKB1.x #define linkb02_im LINKB1.y #define linkb10_re LINKB1.z #define linkb10_im LINKB1.w #define linkb11_re LINKB2.x #define linkb11_im LINKB2.y #define linkb12_re LINKB2.z #define linkb12_im LINKB2.w #define linkb20_re LINKB3.x #define linkb20_im LINKB3.y #define linkb21_re LINKB3.z #define linkb21_im LINKB3.w #define linkb22_re LINKB4.x #define linkb22_im LINKB4.y #define MAT_MUL_HW(M, HW, HWOUT) \ HWOUT##00_re = (M##00_re * HW##00_re - M##00_im * HW##00_im) \ + (M##01_re * HW##01_re - M##01_im * HW##01_im) \ + (M##02_re * HW##02_re - M##02_im * HW##02_im); \ HWOUT##00_im = (M##00_re * HW##00_im + M##00_im * HW##00_re) \ + (M##01_re * HW##01_im + M##01_im * HW##01_re) \ + (M##02_re * HW##02_im + M##02_im * HW##02_re); \ HWOUT##01_re = (M##10_re * HW##00_re - M##10_im * HW##00_im) \ + (M##11_re * HW##01_re - M##11_im * HW##01_im) \ + (M##12_re * HW##02_re - M##12_im * HW##02_im); \ HWOUT##01_im = (M##10_re * HW##00_im + M##10_im * HW##00_re) \ + (M##11_re * HW##01_im + M##11_im * HW##01_re) \ + (M##12_re * HW##02_im + M##12_im * HW##02_re); \ HWOUT##02_re = (M##20_re * HW##00_re - M##20_im * HW##00_im) \ + (M##21_re * HW##01_re - M##21_im * HW##01_im) \ + (M##22_re * HW##02_re - M##22_im * HW##02_im); \ HWOUT##02_im = (M##20_re * HW##00_im + M##20_im * HW##00_re) \ + (M##21_re * HW##01_im + M##21_im * HW##01_re) \ + (M##22_re * HW##02_im + M##22_im * HW##02_re); \ HWOUT##10_re = (M##00_re * HW##10_re - M##00_im * HW##10_im) \ + (M##01_re * HW##11_re - M##01_im * HW##11_im) \ + (M##02_re * HW##12_re - M##02_im * HW##12_im); \ HWOUT##10_im = (M##00_re * HW##10_im + M##00_im * HW##10_re) \ + (M##01_re * HW##11_im + M##01_im * HW##11_re) \ + (M##02_re * HW##12_im + M##02_im * HW##12_re); \ HWOUT##11_re = (M##10_re * HW##10_re - M##10_im * HW##10_im) \ + (M##11_re * HW##11_re - M##11_im * HW##11_im) \ + (M##12_re * HW##12_re - M##12_im * HW##12_im); \ HWOUT##11_im = (M##10_re * HW##10_im + M##10_im * HW##10_re) \ + (M##11_re * HW##11_im + M##11_im * HW##11_re) \ + (M##12_re * HW##12_im + M##12_im * HW##12_re); \ HWOUT##12_re = (M##20_re * HW##10_re - M##20_im * HW##10_im) \ + (M##21_re * HW##11_re - M##21_im * HW##11_im) \ + (M##22_re * HW##12_re - M##22_im * HW##12_im); \ HWOUT##12_im = (M##20_re * HW##10_im + M##20_im * HW##10_re) \ + (M##21_re * HW##11_im + M##21_im * HW##11_re) \ + (M##22_re * HW##12_im + M##22_im * HW##12_re); #define ADJ_MAT_MUL_HW(M, HW, HWOUT) \ HWOUT##00_re = (M##00_re * HW##00_re + M##00_im * HW##00_im) \ + (M##10_re * HW##01_re + M##10_im * HW##01_im) \ + (M##20_re * HW##02_re + M##20_im * HW##02_im); \ HWOUT##00_im = (M##00_re * HW##00_im - M##00_im * HW##00_re) \ + (M##10_re * HW##01_im - M##10_im * HW##01_re) \ + (M##20_re * HW##02_im - M##20_im * HW##02_re); \ HWOUT##01_re = (M##01_re * HW##00_re + M##01_im * HW##00_im) \ + (M##11_re * HW##01_re + M##11_im * HW##01_im) \ + (M##21_re * HW##02_re + M##21_im * HW##02_im); \ HWOUT##01_im = (M##01_re * HW##00_im - M##01_im * HW##00_re) \ + (M##11_re * HW##01_im - M##11_im * HW##01_re) \ + (M##21_re * HW##02_im - M##21_im * HW##02_re); \ HWOUT##02_re = (M##02_re * HW##00_re + M##02_im * HW##00_im) \ + (M##12_re * HW##01_re + M##12_im * HW##01_im) \ + (M##22_re * HW##02_re + M##22_im * HW##02_im); \ HWOUT##02_im = (M##02_re * HW##00_im - M##02_im * HW##00_re) \ + (M##12_re * HW##01_im - M##12_im * HW##01_re) \ + (M##22_re * HW##02_im - M##22_im * HW##02_re); \ HWOUT##10_re = (M##00_re * HW##10_re + M##00_im * HW##10_im) \ + (M##10_re * HW##11_re + M##10_im * HW##11_im) \ + (M##20_re * HW##12_re + M##20_im * HW##12_im); \ HWOUT##10_im = (M##00_re * HW##10_im - M##00_im * HW##10_re) \ + (M##10_re * HW##11_im - M##10_im * HW##11_re) \ + (M##20_re * HW##12_im - M##20_im * HW##12_re); \ HWOUT##11_re = (M##01_re * HW##10_re + M##01_im * HW##10_im) \ + (M##11_re * HW##11_re + M##11_im * HW##11_im) \ + (M##21_re * HW##12_re + M##21_im * HW##12_im); \ HWOUT##11_im = (M##01_re * HW##10_im - M##01_im * HW##10_re) \ + (M##11_re * HW##11_im - M##11_im * HW##11_re) \ + (M##21_re * HW##12_im - M##21_im * HW##12_re); \ HWOUT##12_re = (M##02_re * HW##10_re + M##02_im * HW##10_im) \ + (M##12_re * HW##11_re + M##12_im * HW##11_im) \ + (M##22_re * HW##12_re + M##22_im * HW##12_im); \ HWOUT##12_im = (M##02_re * HW##10_im - M##02_im * HW##10_re) \ + (M##12_re * HW##11_im - M##12_im * HW##11_re) \ + (M##22_re * HW##12_im - M##22_im * HW##12_re); #define SU3_PROJECTOR(va, vb, m) \ m##00_re = va##0_re * vb##0_re + va##0_im * vb##0_im; \ m##00_im = va##0_im * vb##0_re - va##0_re * vb##0_im; \ m##01_re = va##0_re * vb##1_re + va##0_im * vb##1_im; \ m##01_im = va##0_im * vb##1_re - va##0_re * vb##1_im; \ m##02_re = va##0_re * vb##2_re + va##0_im * vb##2_im; \ m##02_im = va##0_im * vb##2_re - va##0_re * vb##2_im; \ m##10_re = va##1_re * vb##0_re + va##1_im * vb##0_im; \ m##10_im = va##1_im * vb##0_re - va##1_re * vb##0_im; \ m##11_re = va##1_re * vb##1_re + va##1_im * vb##1_im; \ m##11_im = va##1_im * vb##1_re - va##1_re * vb##1_im; \ m##12_re = va##1_re * vb##2_re + va##1_im * vb##2_im; \ m##12_im = va##1_im * vb##2_re - va##1_re * vb##2_im; \ m##20_re = va##2_re * vb##0_re + va##2_im * vb##0_im; \ m##20_im = va##2_im * vb##0_re - va##2_re * vb##0_im; \ m##21_re = va##2_re * vb##1_re + va##2_im * vb##1_im; \ m##21_im = va##2_im * vb##1_re - va##2_re * vb##1_im; \ m##22_re = va##2_re * vb##2_re + va##2_im * vb##2_im; \ m##22_im = va##2_im * vb##2_re - va##2_re * vb##2_im; //vc = va + vb*s #define SCALAR_MULT_ADD_SU3_VECTOR(va, vb, s, vc) do { \ vc##0_re = va##0_re + vb##0_re * s; \ vc##0_im = va##0_im + vb##0_im * s; \ vc##1_re = va##1_re + vb##1_re * s; \ vc##1_im = va##1_im + vb##1_im * s; \ vc##2_re = va##2_re + vb##2_re * s; \ vc##2_im = va##2_im + vb##2_im * s; \ }while (0) #define FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(mydir, idx, new_idx) do { \ switch(mydir){ \ case 0: \ new_idx = ( (new_x1==X1m1)?idx-X1m1:idx+1); \ new_x1 = (new_x1==X1m1)?0:new_x1+1; \ break; \ case 1: \ new_idx = ( (new_x2==X2m1)?idx-X2X1mX1:idx+X1); \ new_x2 = (new_x2==X2m1)?0:new_x2+1; \ break; \ case 2: \ new_idx = ( (new_x3==X3m1)?idx-X3X2X1mX2X1:idx+X2X1); \ new_x3 = (new_x3==X3m1)?0:new_x3+1; \ break; \ case 3: \ new_idx = ( (new_x4==X4m1)?idx-X4X3X2X1mX3X2X1:idx+X3X2X1); \ new_x4 = (new_x4==X4m1)?0:new_x4+1; \ break; \ } \ }while(0) #define FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mydir, idx, new_idx) do { \ switch(mydir){ \ case 0: \ new_idx = ( (new_x1==0)?idx+X1m1:idx-1); \ new_x1 = (new_x1==0)?X1m1:new_x1 - 1; \ break; \ case 1: \ new_idx = ( (new_x2==0)?idx+X2X1mX1:idx-X1); \ new_x2 = (new_x2==0)?X2m1:new_x2 - 1; \ break; \ case 2: \ new_idx = ( (new_x3==0)?idx+X3X2X1mX2X1:idx-X2X1); \ new_x3 = (new_x3==0)?X3m1:new_x3 - 1; \ break; \ case 3: \ new_idx = ( (new_x4==0)?idx+X4X3X2X1mX3X2X1:idx-X3X2X1); \ new_x4 = (new_x4==0)?X4m1:new_x4 - 1; \ break; \ } \ }while(0) #define FF_COMPUTE_NEW_FULL_IDX_PLUS(old_x1, old_x2, old_x3, old_x4, idx, mydir, new_idx) do { \ switch(mydir){ \ case 0: \ new_idx = ( (old_x1==X1m1)?idx-X1m1:idx+1); \ break; \ case 1: \ new_idx = ( (old_x2==X2m1)?idx-X2X1mX1:idx+X1); \ break; \ case 2: \ new_idx = ( (old_x3==X3m1)?idx-X3X2X1mX2X1:idx+X2X1); \ break; \ case 3: \ new_idx = ( (old_x4==X4m1)?idx-X4X3X2X1mX3X2X1:idx+X3X2X1); \ break; \ } \ }while(0) #define FF_COMPUTE_NEW_FULL_IDX_MINUS(old_x1, old_x2, old_x3, old_x4, idx, mydir, new_idx) do { \ switch(mydir){ \ case 0: \ new_idx = ( (old_x1==0)?idx+X1m1:idx-1); \ break; \ case 1: \ new_idx = ( (old_x2==0)?idx+X2X1mX1:idx-X1); \ break; \ case 2: \ new_idx = ( (old_x3==0)?idx+X3X2X1mX2X1:idx-X2X1); \ break; \ case 3: \ new_idx = ( (old_x4==0)?idx+X4X3X2X1mX3X2X1:idx-X3X2X1); \ break; \ } \ }while(0) //this macro require linka, linkb, and ah variables defined #define ADD_FORCE_TO_MOM(hw1, hw2, idx, dir, cf,oddness) do{ \ Float2 my_coeff; \ int mydir; \ if (GOES_BACKWARDS(dir)){ \ mydir=OPP_DIR(dir); \ my_coeff.x = -cf.x; \ my_coeff.y = -cf.y; \ }else{ \ mydir=dir; \ my_coeff.x = cf.x; \ my_coeff.y = cf.y; \ } \ Float2 tmp_coeff; \ tmp_coeff.x = my_coeff.x; \ tmp_coeff.y = my_coeff.y; \ if(oddness){ \ tmp_coeff.x = - my_coeff.x; \ tmp_coeff.y = - my_coeff.y; \ } \ Float2* mom = oddness?momOdd:momEven; \ LOAD_ANTI_HERMITIAN(mom, mydir, idx, AH); \ UNCOMPRESS_ANTI_HERMITIAN(ah, linka); \ SU3_PROJECTOR(hw1##0, hw2##0, linkb); \ SCALAR_MULT_ADD_SU3_MATRIX(linka, linkb, tmp_coeff.x, linka); \ SU3_PROJECTOR(hw1##1, hw2##1, linkb); \ SCALAR_MULT_ADD_SU3_MATRIX(linka, linkb, tmp_coeff.y, linka); \ MAKE_ANTI_HERMITIAN(linka, ah); \ WRITE_ANTI_HERMITIAN(mom, mydir, idx, AH, Vh); \ }while(0) #define FF_COMPUTE_RECONSTRUCT_SIGN(sign, dir, i1,i2,i3,i4) do { \ sign =1; \ switch(dir){ \ case XUP: \ if ( (i4 & 1) == 1){ \ sign = -1; \ } \ break; \ case YUP: \ if ( ((i4+i1) & 1) == 1){ \ sign = -1; \ } \ break; \ case ZUP: \ if ( ((i4+i1+i2) & 1) == 1){ \ sign = -1; \ } \ break; \ case TUP: \ if (i4 == X4m1 ){ \ sign = -1; \ } \ break; \ } \ }while (0) #define hwa00_re HWA0.x #define hwa00_im HWA0.y #define hwa01_re HWA1.x #define hwa01_im HWA1.y #define hwa02_re HWA2.x #define hwa02_im HWA2.y #define hwa10_re HWA3.x #define hwa10_im HWA3.y #define hwa11_re HWA4.x #define hwa11_im HWA4.y #define hwa12_re HWA5.x #define hwa12_im HWA5.y #define hwb00_re HWB0.x #define hwb00_im HWB0.y #define hwb01_re HWB1.x #define hwb01_im HWB1.y #define hwb02_re HWB2.x #define hwb02_im HWB2.y #define hwb10_re HWB3.x #define hwb10_im HWB3.y #define hwb11_re HWB4.x #define hwb11_im HWB4.y #define hwb12_re HWB5.x #define hwb12_im HWB5.y #define hwc00_re HWC0.x #define hwc00_im HWC0.y #define hwc01_re HWC1.x #define hwc01_im HWC1.y #define hwc02_re HWC2.x #define hwc02_im HWC2.y #define hwc10_re HWC3.x #define hwc10_im HWC3.y #define hwc11_re HWC4.x #define hwc11_im HWC4.y #define hwc12_re HWC5.x #define hwc12_im HWC5.y #define hwd00_re HWD0.x #define hwd00_im HWD0.y #define hwd01_re HWD1.x #define hwd01_im HWD1.y #define hwd02_re HWD2.x #define hwd02_im HWD2.y #define hwd10_re HWD3.x #define hwd10_im HWD3.y #define hwd11_re HWD4.x #define hwd11_im HWD4.y #define hwd12_re HWD5.x #define hwd12_im HWD5.y #define hwe00_re HWE0.x #define hwe00_im HWE0.y #define hwe01_re HWE1.x #define hwe01_im HWE1.y #define hwe02_re HWE2.x #define hwe02_im HWE2.y #define hwe10_re HWE3.x #define hwe10_im HWE3.y #define hwe11_re HWE4.x #define hwe11_im HWE4.y #define hwe12_re HWE5.x #define hwe12_im HWE5.y void fermion_force_init_cuda(QudaGaugeParam* param) { #ifdef MULTI_GPU #error "multi gpu is not supported for fermion force computation" #endif static int fermion_force_init_cuda_flag = 0; if (fermion_force_init_cuda_flag) return; fermion_force_init_cuda_flag=1; } /* * This function computes contribution to mometum from the middle link in a staple * * tempx: IN * Pmu: OUT * P3: OUT * */ template<int sig_positive, int mu_positive, int oddBit, typename Float2> __global__ void do_middle_link_kernel(Float2* tempxEven, Float2* tempxOdd, Float2* PmuEven, Float2* PmuOdd, Float2* P3Even, Float2* P3Odd, int sig, int mu, Float2 coeff, float4* linkEven, float4* linkOdd, Float2* momEven, Float2* momOdd) { int sid = blockIdx.x * blockDim.x + threadIdx.x; int z1 = sid / X1h; int x1h = sid - z1*X1h; int z2 = z1 / X2; int x2 = z1 - z2*X2; int x4 = z2 / X3; int x3 = z2 - x4*X3; int x1odd = (x2 + x3 + x4 + oddBit) & 1; int x1 = 2*x1h + x1odd; int X = 2*sid + x1odd; int new_x1, new_x2, new_x3, new_x4; int new_mem_idx; int ad_link_sign=1; int ab_link_sign=1; int bc_link_sign=1; Float2 HWA0, HWA1, HWA2, HWA3, HWA4, HWA5; Float2 HWB0, HWB1, HWB2, HWB3, HWB4, HWB5; Float2 HWC0, HWC1, HWC2, HWC3, HWC4, HWC5; Float2 HWD0, HWD1, HWD2, HWD3, HWD4, HWD5; float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4; float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4; Float2 AH0, AH1, AH2, AH3, AH4; /* sig * A________B * mu | | * D | |C * * A is the current point (sid) */ int point_b, point_c, point_d; int ad_link_nbr_idx, ab_link_nbr_idx, bc_link_nbr_idx; int mymu; new_x1 = x1; new_x2 = x2; new_x3 = x3; new_x4 = x4; if(mu_positive){ mymu =mu; FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mu, X, new_mem_idx); }else{ mymu = OPP_DIR(mu); FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(OPP_DIR(mu), X, new_mem_idx); } point_d = (new_mem_idx >> 1); if (mu_positive){ ad_link_nbr_idx = point_d; FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, new_x1,new_x2,new_x3,new_x4); }else{ ad_link_nbr_idx = sid; FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, x1, x2, x3, x4); } int mysig; if(sig_positive){ mysig = sig; FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(sig, new_mem_idx, new_mem_idx); }else{ mysig = OPP_DIR(sig); FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(sig), new_mem_idx, new_mem_idx); } point_c = (new_mem_idx >> 1); if (mu_positive){ bc_link_nbr_idx = point_c; FF_COMPUTE_RECONSTRUCT_SIGN(bc_link_sign, mymu, new_x1,new_x2,new_x3,new_x4); } new_x1 = x1; new_x2 = x2; new_x3 = x3; new_x4 = x4; if(sig_positive){ FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(sig, X, new_mem_idx); }else{ FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(sig), X, new_mem_idx); } point_b = (new_mem_idx >> 1); if (!mu_positive){ bc_link_nbr_idx = point_b; FF_COMPUTE_RECONSTRUCT_SIGN(bc_link_sign, mymu, new_x1,new_x2,new_x3,new_x4); } if(sig_positive){ ab_link_nbr_idx = sid; FF_COMPUTE_RECONSTRUCT_SIGN(ab_link_sign, mysig, x1, x2, x3, x4); }else{ ab_link_nbr_idx = point_b; FF_COMPUTE_RECONSTRUCT_SIGN(ab_link_sign, mysig, new_x1,new_x2,new_x3,new_x4); } LOAD_HW(tempxEven, tempxOdd, point_d, HWA, 1-oddBit ); if(mu_positive){ FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKA, 1-oddBit); }else{ FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKA, oddBit); } RECONSTRUCT_LINK_12(ad_link_sign, linka); if (mu_positive){ ADJ_MAT_MUL_HW(linka, hwa, hwd); }else{ MAT_MUL_HW(linka, hwa, hwd); } WRITE_HW(PmuEven,PmuOdd, sid, HWD, oddBit); LOAD_HW(tempxEven,tempxOdd, point_c, HWA, oddBit); if(mu_positive){ FF_LOAD_MATRIX(mymu, bc_link_nbr_idx, LINKA, oddBit); }else{ FF_LOAD_MATRIX(mymu, bc_link_nbr_idx, LINKA, 1-oddBit); } RECONSTRUCT_LINK_12(bc_link_sign, linka); if (mu_positive){ ADJ_MAT_MUL_HW(linka, hwa, hwb); }else{ MAT_MUL_HW(linka, hwa, hwb); } if(sig_positive){ FF_LOAD_MATRIX(mysig, ab_link_nbr_idx, LINKB, oddBit); }else{ FF_LOAD_MATRIX(mysig, ab_link_nbr_idx, LINKB, 1-oddBit); } RECONSTRUCT_LINK_12(ab_link_sign, linkb); if (sig_positive){ MAT_MUL_HW(linkb, hwb, hwc); }else{ ADJ_MAT_MUL_HW(linkb, hwb, hwc); } WRITE_HW(P3Even, P3Odd, sid, HWC, oddBit); if (sig_positive){ //add the force to mom ADD_FORCE_TO_MOM(hwc, hwd, sid, sig, coeff, oddBit); } } template<typename Float2> static void middle_link_kernel(Float2* tempxEven, Float2* tempxOdd, Float2* PmuEven, Float2* PmuOdd, Float2* P3Even, Float2* P3Odd, int sig, int mu, Float2 coeff, float4* linkEven, float4* linkOdd, cudaGaugeField &siteLink, Float2* momEven, Float2* momOdd, dim3 gridDim, dim3 BlockDim) { dim3 halfGridDim(gridDim.x/2, 1,1); #define CALL_MIDDLE_LINK_KERNEL(sig_sign, mu_sign) \ hipLaunchKernelGGL(( do_middle_link_kernel<sig_sign, mu_sign,0>), dim3(halfGridDim), dim3(BlockDim), 0, 0, tempxEven, tempxOdd, \ PmuEven, PmuOdd, \ P3Even, P3Odd, \ sig, mu, coeff, \ linkEven, linkOdd, \ momEven, momOdd); \ hipLaunchKernelGGL(( do_middle_link_kernel<sig_sign, mu_sign, 1>), dim3(halfGridDim), dim3(BlockDim), 0, 0, tempxEven, tempxOdd, \ PmuEven, PmuOdd, \ P3Even, P3Odd, \ sig, mu, coeff, \ linkEven, linkOdd, \ momEven, momOdd); if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){ CALL_MIDDLE_LINK_KERNEL(1, 1); }else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){ CALL_MIDDLE_LINK_KERNEL(1, 0); }else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){ CALL_MIDDLE_LINK_KERNEL(0, 1); }else{ CALL_MIDDLE_LINK_KERNEL(0, 0); } #undef CALL_MIDDLE_LINK_KERNEL } /* * Computes contribution to momentum from the side links in a staple * * P3: IN * P3mu: not used * Tempx: IN * Pmu: IN * shortPE: OUT * */ template<int sig_positive, int mu_positive, int oddBit, typename Float2> __global__ void do_side_link_kernel(Float2* P3Even, Float2* P3Odd, Float2* P3muEven, Float2* P3muOdd, Float2* TempxEven, Float2* TempxOdd, Float2* PmuEven, Float2* PmuOdd, Float2* shortPEven, Float2* shortPOdd, int sig, int mu, Float2 coeff, Float2 accumu_coeff, float4* linkEven, float4* linkOdd, Float2* momEven, Float2* momOdd) { Float2 mcoeff; mcoeff.x = -coeff.x; mcoeff.y = -coeff.y; int sid = blockIdx.x * blockDim.x + threadIdx.x; int z1 = sid / X1h; int x1h = sid - z1*X1h; int z2 = z1 / X2; int x2 = z1 - z2*X2; int x4 = z2 / X3; int x3 = z2 - x4*X3; int x1odd = (x2 + x3 + x4 + oddBit) & 1; int x1 = 2*x1h + x1odd; int X = 2*sid + x1odd; int ad_link_sign = 1; Float2 HWA0, HWA1, HWA2, HWA3, HWA4, HWA5; Float2 HWB0, HWB1, HWB2, HWB3, HWB4, HWB5; Float2 HWC0, HWC1, HWC2, HWC3, HWC4, HWC5; float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4; float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4; Float2 AH0, AH1, AH2, AH3, AH4; /* * compute the side link contribution to the momentum * * * sig * A________B * | | mu * D | |C * * A is the current point (sid) */ int point_d; int ad_link_nbr_idx; int mymu; int new_mem_idx; int new_x1 = x1; int new_x2 = x2; int new_x3 = x3; int new_x4 = x4; if(mu_positive){ mymu =mu; FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mymu,X, new_mem_idx); }else{ mymu = OPP_DIR(mu); FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(mymu, X, new_mem_idx); } point_d = (new_mem_idx >> 1); if (mu_positive){ ad_link_nbr_idx = point_d; FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, new_x1,new_x2,new_x3,new_x4); }else{ ad_link_nbr_idx = sid; FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, x1, x2, x3, x4); } LOAD_HW(P3Even, P3Odd, sid, HWA, oddBit); if(mu_positive){ FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKA, 1 - oddBit); }else{ FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKA, oddBit); } RECONSTRUCT_LINK_12(ad_link_sign, linka); if (mu_positive){ MAT_MUL_HW(linka, hwa, hwb); }else{ ADJ_MAT_MUL_HW(linka, hwa, hwb); } //start to add side link force if (mu_positive){ LOAD_HW(TempxEven, TempxOdd, point_d, HWC, 1-oddBit); if (sig_positive){ ADD_FORCE_TO_MOM(hwb, hwc, point_d, mu, coeff, 1-oddBit); }else{ ADD_FORCE_TO_MOM(hwc, hwb, point_d, OPP_DIR(mu), mcoeff, 1- oddBit); } }else{ LOAD_HW(PmuEven, PmuOdd, sid, HWC, oddBit); if (sig_positive){ ADD_FORCE_TO_MOM(hwa, hwc, sid, mu, mcoeff, oddBit); }else{ ADD_FORCE_TO_MOM(hwc, hwa, sid, OPP_DIR(mu), coeff, oddBit); } } if (shortPOdd){ LOAD_HW(shortPEven, shortPOdd, point_d, HWA, 1-oddBit); SCALAR_MULT_ADD_SU3_VECTOR(hwa0, hwb0, accumu_coeff.x, hwa0); SCALAR_MULT_ADD_SU3_VECTOR(hwa1, hwb1, accumu_coeff.y, hwa1); WRITE_HW(shortPEven, shortPOdd, point_d, HWA, 1-oddBit); } } template<typename Float2> static void side_link_kernel(Float2* P3Even, Float2* P3Odd, Float2* P3muEven, Float2* P3muOdd, Float2* TempxEven, Float2* TempxOdd, Float2* PmuEven, Float2* PmuOdd, Float2* shortPEven, Float2* shortPOdd, int sig, int mu, Float2 coeff, Float2 accumu_coeff, float4* linkEven, float4* linkOdd, cudaGaugeField &siteLink, Float2* momEven, Float2* momOdd, dim3 gridDim, dim3 blockDim) { dim3 halfGridDim(gridDim.x/2,1,1); #define CALL_SIDE_LINK_KERNEL(sig_sign, mu_sign) \ hipLaunchKernelGGL(( do_side_link_kernel<sig_sign,mu_sign,0>), dim3(halfGridDim), dim3(blockDim), 0, 0, P3Even, P3Odd, \ P3muEven, P3muOdd, \ TempxEven, TempxOdd, \ PmuEven, PmuOdd, \ shortPEven, shortPOdd, \ sig, mu, coeff, accumu_coeff, \ linkEven, linkOdd, \ momEven, momOdd); \ hipLaunchKernelGGL(( do_side_link_kernel<sig_sign,mu_sign,1>), dim3(halfGridDim), dim3(blockDim), 0, 0, P3Even, P3Odd, \ P3muEven, P3muOdd, \ TempxEven, TempxOdd, \ PmuEven, PmuOdd, \ shortPEven, shortPOdd, \ sig, mu, coeff, accumu_coeff, \ linkEven, linkOdd, \ momEven, momOdd); if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){ CALL_SIDE_LINK_KERNEL(1,1); }else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){ CALL_SIDE_LINK_KERNEL(1,0); }else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){ CALL_SIDE_LINK_KERNEL(0,1); }else{ CALL_SIDE_LINK_KERNEL(0,0); } #undef CALL_SIDE_LINK_KERNEL } /* * This function computes the contribution to momentum from middle and side links * * tempx: IN * Pmu: not used * P3: not used * P3mu: not used * shortP: OUT * */ template<int sig_positive, int mu_positive, int oddBit, typename Float2> __global__ void do_all_link_kernel(Float2* tempxEven, Float2* tempxOdd, Float2* PmuEven, Float2* PmuOdd, Float2* P3Even, Float2* P3Odd, Float2* P3muEven, Float2* P3muOdd, Float2* shortPEven, Float2* shortPOdd, int sig, int mu, Float2 coeff, Float2 mcoeff, Float2 accumu_coeff, float4* linkEven, float4* linkOdd, Float2* momEven, Float2* momOdd) { int sid = blockIdx.x * blockDim.x + threadIdx.x; int z1 = sid / X1h; int x1h = sid - z1*X1h; int z2 = z1 / X2; int x2 = z1 - z2*X2; int x4 = z2 / X3; int x3 = z2 - x4*X3; int x1odd = (x2 + x3 + x4 + oddBit) & 1; int x1 = 2*x1h + x1odd; int X = 2*sid + x1odd; int new_x1, new_x2, new_x3, new_x4; int ad_link_sign=1; int ab_link_sign=1; int bc_link_sign=1; Float2 HWA0, HWA1, HWA2, HWA3, HWA4, HWA5; Float2 HWB0, HWB1, HWB2, HWB3, HWB4, HWB5; Float2 HWC0, HWC1, HWC2, HWC3, HWC4, HWC5; Float2 HWD0, HWD1, HWD2, HWD3, HWD4, HWD5; Float2 HWE0, HWE1, HWE2, HWE3, HWE4, HWE5; float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4; float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4; float4 LINKC0, LINKC1, LINKC2, LINKC3, LINKC4; Float2 AH0, AH1, AH2, AH3, AH4; /* sig * A________B * mu | | * D | |C * * A is the current point (sid) */ int point_b, point_c, point_d; int ad_link_nbr_idx, ab_link_nbr_idx, bc_link_nbr_idx; int mymu; int new_mem_idx; new_x1 = x1; new_x2 = x2; new_x3 = x3; new_x4 = x4; if(mu_positive){ mymu =mu; FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mu, X, new_mem_idx); }else{ mymu = OPP_DIR(mu); FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(OPP_DIR(mu), X, new_mem_idx); } point_d = (new_mem_idx >> 1); if (mu_positive){ ad_link_nbr_idx = point_d; FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, new_x1,new_x2,new_x3,new_x4); }else{ ad_link_nbr_idx = sid; FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, x1, x2, x3, x4); } int mysig; if(sig_positive){ mysig = sig; FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(sig, new_mem_idx, new_mem_idx); }else{ mysig = OPP_DIR(sig); FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(sig), new_mem_idx, new_mem_idx); } point_c = (new_mem_idx >> 1); if (mu_positive){ bc_link_nbr_idx = point_c; FF_COMPUTE_RECONSTRUCT_SIGN(bc_link_sign, mymu, new_x1,new_x2,new_x3,new_x4); } new_x1 = x1; new_x2 = x2; new_x3 = x3; new_x4 = x4; if(sig_positive){ FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(sig, X, new_mem_idx); }else{ FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(sig), X, new_mem_idx); } point_b = (new_mem_idx >> 1); if (!mu_positive){ bc_link_nbr_idx = point_b; FF_COMPUTE_RECONSTRUCT_SIGN(bc_link_sign, mymu, new_x1,new_x2,new_x3,new_x4); } if(sig_positive){ ab_link_nbr_idx = sid; FF_COMPUTE_RECONSTRUCT_SIGN(ab_link_sign, mysig, x1, x2, x3, x4); }else{ ab_link_nbr_idx = point_b; FF_COMPUTE_RECONSTRUCT_SIGN(ab_link_sign, mysig, new_x1,new_x2,new_x3,new_x4); } LOAD_HW(tempxEven, tempxOdd, point_d, HWE, 1-oddBit); if (mu_positive){ FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKC, 1-oddBit); }else{ FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKC, oddBit); } RECONSTRUCT_LINK_12(ad_link_sign, linkc); if (mu_positive){ ADJ_MAT_MUL_HW(linkc, hwe, hwd); }else{ MAT_MUL_HW(linkc, hwe, hwd); } //we do not need to write Pmu here //WRITE_HW(myPmu, sid, HWD); LOAD_HW(tempxEven, tempxOdd, point_c, HWA, oddBit); if (mu_positive){ FF_LOAD_MATRIX(mymu, bc_link_nbr_idx, LINKA, oddBit); }else{ FF_LOAD_MATRIX(mymu, bc_link_nbr_idx, LINKA, 1-oddBit); } RECONSTRUCT_LINK_12(bc_link_sign, linka); if (mu_positive){ ADJ_MAT_MUL_HW(linka, hwa, hwb); }else{ MAT_MUL_HW(linka, hwa, hwb); } if (sig_positive){ FF_LOAD_MATRIX(mysig, ab_link_nbr_idx, LINKA, oddBit); }else{ FF_LOAD_MATRIX(mysig, ab_link_nbr_idx, LINKA, 1-oddBit); } RECONSTRUCT_LINK_12(ab_link_sign, linka); if (sig_positive){ MAT_MUL_HW(linka, hwb, hwc); }else{ ADJ_MAT_MUL_HW(linka, hwb, hwc); } //we do not need to write P3 here //WRITE_HW(myP3, sid, HWC); //The middle link contribution if (sig_positive){ //add the force to mom ADD_FORCE_TO_MOM(hwc, hwd, sid, sig, mcoeff, oddBit); } //P3 is hwc //ad_link is linkc if (mu_positive){ MAT_MUL_HW(linkc, hwc, hwa); }else{ ADJ_MAT_MUL_HW(linkc, hwc, hwa); } //accumulate P7rho to P5 //WRITE_HW(otherP3mu, point_d, HWA); LOAD_HW(shortPEven, shortPOdd, point_d, HWB, 1-oddBit); SCALAR_MULT_ADD_SU3_VECTOR(hwb0, hwa0, accumu_coeff.x, hwb0); SCALAR_MULT_ADD_SU3_VECTOR(hwb1, hwa1, accumu_coeff.y, hwb1); WRITE_HW(shortPEven, shortPOdd, point_d, HWB, 1-oddBit); //hwe holds tempx at point_d //hwd holds Pmu at point A(sid) if (mu_positive){ if (sig_positive){ ADD_FORCE_TO_MOM(hwa, hwe, point_d, mu, coeff, 1-oddBit); }else{ ADD_FORCE_TO_MOM(hwe, hwa, point_d, OPP_DIR(mu), mcoeff, 1- oddBit); } }else{ if (sig_positive){ ADD_FORCE_TO_MOM(hwc, hwd, sid, mu, mcoeff, oddBit); }else{ ADD_FORCE_TO_MOM(hwd, hwc, sid, OPP_DIR(mu), coeff, oddBit); } } } template<typename Float2> static void all_link_kernel(Float2* tempxEven, Float2* tempxOdd, Float2* PmuEven, Float2* PmuOdd, Float2* P3Even, Float2* P3Odd, Float2* P3muEven, Float2* P3muOdd, Float2* shortPEven, Float2* shortPOdd, int sig, int mu, Float2 coeff, Float2 mcoeff, Float2 accumu_coeff, float4* linkEven, float4* linkOdd, cudaGaugeField &siteLink, Float2* momEven, Float2* momOdd, dim3 gridDim, dim3 blockDim) { dim3 halfGridDim(gridDim.x/2, 1,1); #define CALL_ALL_LINK_KERNEL(sig_sign, mu_sign) \ hipLaunchKernelGGL(( do_all_link_kernel<sig_sign,mu_sign,0>), dim3(halfGridDim), dim3(blockDim), 0, 0, tempxEven, tempxOdd, \ PmuEven, PmuOdd, \ P3Even, P3Odd, \ P3muEven, P3muOdd, \ shortPEven, shortPOdd, \ sig, mu, coeff, mcoeff, accumu_coeff, \ linkEven, linkOdd, \ momEven, momOdd); \ hipLaunchKernelGGL(( do_all_link_kernel<sig_sign,mu_sign,1>), dim3(halfGridDim), dim3(blockDim), 0, 0, tempxEven, tempxOdd, \ PmuEven, PmuOdd, \ P3Even, P3Odd, \ P3muEven, P3muOdd, \ shortPEven, shortPOdd, \ sig, mu, coeff, mcoeff, accumu_coeff, \ linkEven, linkOdd, \ momEven, momOdd); if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){ CALL_ALL_LINK_KERNEL(1,1); }else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){ CALL_ALL_LINK_KERNEL(1,0); }else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){ CALL_ALL_LINK_KERNEL(0,1); }else{ CALL_ALL_LINK_KERNEL(0,0); } #undef CALL_ALL_LINK_KERNEL } /* This function computes the one and naik terms' contribution to momentum * * Tempx: IN * Pmu: IN * Pnumu: IN * */ template <int oddBit, typename Float2> __global__ void do_one_and_naik_terms_kernel(Float2* TempxEven, Float2* TempxOdd, Float2* PmuEven, Float2* PmuOdd, Float2* PnumuEven, Float2* PnumuOdd, int mu, Float2 OneLink, Float2 Naik, Float2 mNaik, float4* linkEven, float4* linkOdd, Float2* momEven, Float2* momOdd) { Float2 HWA0, HWA1, HWA2, HWA3, HWA4, HWA5; Float2 HWB0, HWB1, HWB2, HWB3, HWB4, HWB5; Float2 HWC0, HWC1, HWC2, HWC3, HWC4, HWC5; Float2 HWD0, HWD1, HWD2, HWD3, HWD4, HWD5; float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4; float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4; Float2 AH0, AH1, AH2, AH3, AH4; int sid = blockIdx.x * blockDim.x + threadIdx.x; int z1 = sid / X1h; int x1h = sid - z1*X1h; int z2 = z1 / X2; int x2 = z1 - z2*X2; int x4 = z2 / X3; int x3 = z2 - x4*X3; int x1odd = (x2 + x3 + x4 + oddBit) & 1; int x1 = 2*x1h + x1odd; //int X = 2*sid + x1odd; int dx[4]; int new_x1, new_x2, new_x3, new_x4, new_idx; int sign=1; if (GOES_BACKWARDS(mu)){ //The one link LOAD_HW(PmuEven, PmuOdd, sid, HWA, oddBit); LOAD_HW(TempxEven, TempxOdd, sid, HWB, oddBit); ADD_FORCE_TO_MOM(hwa, hwb, sid, OPP_DIR(mu), OneLink, oddBit); //Naik term dx[3]=dx[2]=dx[1]=dx[0]=0; dx[OPP_DIR(mu)] = -1; new_x1 = (x1 + dx[0] + X1)%X1; new_x2 = (x2 + dx[1] + X2)%X2; new_x3 = (x3 + dx[2] + X3)%X3; new_x4 = (x4 + dx[3] + X4)%X4; new_idx = (new_x4*X3X2X1+new_x3*X2X1+new_x2*X1+new_x1) >> 1; LOAD_HW(TempxEven, TempxOdd, new_idx, HWA, 1-oddBit); FF_LOAD_MATRIX(OPP_DIR(mu), new_idx, LINKA, 1-oddBit); FF_COMPUTE_RECONSTRUCT_SIGN(sign, OPP_DIR(mu), new_x1,new_x2,new_x3,new_x4); RECONSTRUCT_LINK_12(sign, linka); ADJ_MAT_MUL_HW(linka, hwa, hwc); //Popmu LOAD_HW(PnumuEven, PnumuOdd, sid, HWD, oddBit); ADD_FORCE_TO_MOM(hwd, hwc, sid, OPP_DIR(mu), mNaik, oddBit); dx[3]=dx[2]=dx[1]=dx[0]=0; dx[OPP_DIR(mu)] = 1; new_x1 = (x1 + dx[0] + X1)%X1; new_x2 = (x2 + dx[1] + X2)%X2; new_x3 = (x3 + dx[2] + X3)%X3; new_x4 = (x4 + dx[3] + X4)%X4; new_idx = (new_x4*X3X2X1+new_x3*X2X1+new_x2*X1+new_x1) >> 1; LOAD_HW(PnumuEven, PnumuOdd, new_idx, HWA, 1-oddBit); FF_LOAD_MATRIX(OPP_DIR(mu), sid, LINKA, oddBit); FF_COMPUTE_RECONSTRUCT_SIGN(sign, OPP_DIR(mu), x1, x2, x3, x4); RECONSTRUCT_LINK_12(sign, linka); MAT_MUL_HW(linka, hwa, hwc); ADD_FORCE_TO_MOM(hwc, hwb, sid, OPP_DIR(mu), Naik, oddBit); }else{ dx[3]=dx[2]=dx[1]=dx[0]=0; dx[mu] = 1; new_x1 = (x1 + dx[0] + X1)%X1; new_x2 = (x2 + dx[1] + X2)%X2; new_x3 = (x3 + dx[2] + X3)%X3; new_x4 = (x4 + dx[3] + X4)%X4; new_idx = (new_x4*X3X2X1+new_x3*X2X1+new_x2*X1+new_x1) >> 1; LOAD_HW(TempxEven, TempxOdd, new_idx, HWA, 1-oddBit); FF_LOAD_MATRIX(mu, sid, LINKA, oddBit); FF_COMPUTE_RECONSTRUCT_SIGN(sign, mu, x1, x2, x3, x4); RECONSTRUCT_LINK_12(sign, linka); MAT_MUL_HW(linka, hwa, hwb); LOAD_HW(PnumuEven, PnumuOdd, sid, HWC, oddBit); ADD_FORCE_TO_MOM(hwb, hwc, sid, mu, Naik, oddBit); } } template<typename Float2> static void one_and_naik_terms_kernel(Float2* TempxEven, Float2* TempxOdd, Float2* PmuEven, Float2* PmuOdd, Float2* PnumuEven, Float2* PnumuOdd, int mu, Float2 OneLink, Float2 Naik, Float2 mNaik, float4* linkEven, float4* linkOdd, Float2* momEven, Float2* momOdd, dim3 gridDim, dim3 blockDim) { dim3 halfGridDim(gridDim.x/2, 1,1); hipLaunchKernelGGL(( do_one_and_naik_terms_kernel<0>), dim3(halfGridDim), dim3(blockDim), 0, 0, TempxEven, TempxOdd, PmuEven, PmuOdd, PnumuEven, PnumuOdd, mu, OneLink, Naik, mNaik, linkEven, linkOdd, momEven, momOdd); hipLaunchKernelGGL(( do_one_and_naik_terms_kernel<1>), dim3(halfGridDim), dim3(blockDim), 0, 0, TempxEven, TempxOdd, PmuEven, PmuOdd, PnumuEven, PnumuOdd, mu, OneLink, Naik, mNaik, linkEven, linkOdd, momEven, momOdd); return; } #define Pmu tempvec[0] #define Pnumu tempvec[1] #define Prhonumu tempvec[2] #define P7 tempvec[3] #define P7rho tempvec[4] #define P7rhonu tempvec[5] #define P5 tempvec[6] #define P3 tempvec[7] #define P5nu tempvec[3] #define P3mu tempvec[3] #define Popmu tempvec[4] #define Pmumumu tempvec[4] template<typename Real> static void do_fermion_force_cuda(Real eps, Real weight1, Real weight2, Real* act_path_coeff, FullHw cudaHw, cudaGaugeField &siteLink, cudaGaugeField &cudaMom, FullHw tempvec[8], QudaGaugeParam* param) { int mu, nu, rho, sig; float2 coeff; float2 OneLink, Lepage, Naik, FiveSt, ThreeSt, SevenSt; float2 mNaik, mLepage, mFiveSt, mThreeSt, mSevenSt; Real ferm_epsilon; ferm_epsilon = 2.0*weight1*eps; OneLink.x = act_path_coeff[0]*ferm_epsilon ; Naik.x = act_path_coeff[1]*ferm_epsilon ; mNaik.x = -Naik.x; ThreeSt.x = act_path_coeff[2]*ferm_epsilon ; mThreeSt.x = -ThreeSt.x; FiveSt.x = act_path_coeff[3]*ferm_epsilon ; mFiveSt.x = -FiveSt.x; SevenSt.x = act_path_coeff[4]*ferm_epsilon ; mSevenSt.x = -SevenSt.x; Lepage.x = act_path_coeff[5]*ferm_epsilon ; mLepage.x = -Lepage.x; ferm_epsilon = 2.0*weight2*eps; OneLink.y = act_path_coeff[0]*ferm_epsilon ; Naik.y = act_path_coeff[1]*ferm_epsilon ; mNaik.y = -Naik.y; ThreeSt.y = act_path_coeff[2]*ferm_epsilon ; mThreeSt.y = -ThreeSt.y; FiveSt.y = act_path_coeff[3]*ferm_epsilon ; mFiveSt.y = -FiveSt.y; SevenSt.y = act_path_coeff[4]*ferm_epsilon ; mSevenSt.y = -SevenSt.y; Lepage.y = act_path_coeff[5]*ferm_epsilon ; mLepage.y = -Lepage.y; int DirectLinks[8] ; for(mu=0;mu<8;mu++){ DirectLinks[mu] = 0 ; } int volume = param->X[0]*param->X[1]*param->X[2]*param->X[3]; dim3 blockDim(BLOCK_DIM,1,1); dim3 gridDim(volume/blockDim.x, 1, 1); hipBindTexture(0, siteLink0TexSingle_recon, siteLink.Even_p(), siteLink.Bytes()/2); hipBindTexture(0, siteLink1TexSingle_recon, siteLink.Odd_p(), siteLink.Bytes()/2); for(sig=0; sig < 8; sig++){ for(mu = 0; mu < 8; mu++){ if ( (mu == sig) || (mu == OPP_DIR(sig))){ continue; } //3-link //Kernel A: middle link middle_link_kernel( (float2*)cudaHw.even.data, (float2*)cudaHw.odd.data, (float2*)Pmu.even.data, (float2*)Pmu.odd.data, (float2*)P3.even.data, (float2*)P3.odd.data, sig, mu, mThreeSt, (float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink, (float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(), gridDim, blockDim); checkCudaError(); for(nu=0; nu < 8; nu++){ if (nu == sig || nu == OPP_DIR(sig) || nu == mu || nu == OPP_DIR(mu)){ continue; } //5-link: middle link //Kernel B middle_link_kernel( (float2*)Pmu.even.data, (float2*)Pmu.odd.data, (float2*)Pnumu.even.data, (float2*)Pnumu.odd.data, (float2*)P5.even.data, (float2*)P5.odd.data, sig, nu, FiveSt, (float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink, (float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(), gridDim, blockDim); checkCudaError(); for(rho =0; rho < 8; rho++){ if (rho == sig || rho == OPP_DIR(sig) || rho == mu || rho == OPP_DIR(mu) || rho == nu || rho == OPP_DIR(nu)){ continue; } //7-link: middle link and side link //kernel C if(FiveSt.x != 0)coeff.x = SevenSt.x/FiveSt.x ; else coeff.x = 0; if(FiveSt.y != 0)coeff.y = SevenSt.y/FiveSt.y ; else coeff.y = 0; all_link_kernel((float2*)Pnumu.even.data, (float2*)Pnumu.odd.data, (float2*)Prhonumu.even.data, (float2*)Prhonumu.odd.data, (float2*)P7.even.data, (float2*)P7.odd.data, (float2*)P7rho.even.data, (float2*)P7rho.odd.data, (float2*)P5.even.data, (float2*)P5.odd.data, sig, rho, SevenSt,mSevenSt,coeff, (float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink, (float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(), gridDim, blockDim); checkCudaError(); }//rho //5-link: side link //kernel B2 if(ThreeSt.x != 0)coeff.x = FiveSt.x/ThreeSt.x ; else coeff.x = 0; if(ThreeSt.y != 0)coeff.y = FiveSt.y/ThreeSt.y ; else coeff.y = 0; side_link_kernel((float2*)P5.even.data, (float2*)P5.odd.data, (float2*)P5nu.even.data, (float2*)P5nu.odd.data, (float2*)Pmu.even.data, (float2*)Pmu.odd.data, (float2*)Pnumu.even.data, (float2*)Pnumu.odd.data, (float2*)P3.even.data, (float2*)P3.odd.data, sig, nu, mFiveSt, coeff, (float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink, (float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(), gridDim, blockDim); checkCudaError(); }//nu //lepage //Kernel A2 middle_link_kernel( (float2*)Pmu.even.data, (float2*)Pmu.odd.data, (float2*)Pnumu.even.data, (float2*)Pnumu.odd.data, (float2*)P5.even.data, (float2*)P5.odd.data, sig, mu, Lepage, (float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink, (float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(), gridDim, blockDim); checkCudaError(); if(ThreeSt.x != 0)coeff.x = Lepage.x/ThreeSt.x ; else coeff.x = 0; if(ThreeSt.y != 0)coeff.y = Lepage.y/ThreeSt.y ; else coeff.y = 0; side_link_kernel((float2*)P5.even.data, (float2*)P5.odd.data, (float2*)P5nu.even.data, (float2*)P5nu.odd.data, (float2*)Pmu.even.data, (float2*)Pmu.odd.data, (float2*)Pnumu.even.data, (float2*)Pnumu.odd.data, (float2*)P3.even.data, (float2*)P3.odd.data, sig, mu, mLepage,coeff, (float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink, (float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(), gridDim, blockDim); checkCudaError(); //3-link side link coeff.x=coeff.y=0; side_link_kernel((float2*)P3.even.data, (float2*)P3.odd.data, (float2*)P3mu.even.data, (float2*)P3mu.odd.data, (float2*)cudaHw.even.data, (float2*)cudaHw.odd.data, (float2*)Pmu.even.data, (float2*)Pmu.odd.data, (float2*)NULL, (float2*)NULL, sig, mu, ThreeSt,coeff, (float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink, (float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(), gridDim, blockDim); checkCudaError(); //1-link and naik term if (!DirectLinks[mu]){ DirectLinks[mu]=1; //kernel Z one_and_naik_terms_kernel((float2*)cudaHw.even.data, (float2*)cudaHw.odd.data, (float2*)Pmu.even.data, (float2*)Pmu.odd.data, (float2*)Pnumu.even.data, (float2*)Pnumu.odd.data, mu, OneLink, Naik, mNaik, (float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), (float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(), gridDim, blockDim); checkCudaError(); } }//mu }//sig hipUnbindTexture(siteLink0TexSingle_recon); hipUnbindTexture(siteLink1TexSingle_recon); } #undef Pmu #undef Pnumu #undef Prhonumu #undef P7 #undef P7rho #undef P7rhonu #undef P5 #undef P3 #undef P5nu #undef P3mu #undef Popmu #undef Pmumumu void fermion_force_cuda(double eps, double weight1, double weight2, void* act_path_coeff, FullHw cudaHw, cudaGaugeField &siteLink, cudaGaugeField &cudaMom, QudaGaugeParam* param) { int i; FullHw tempvec[8]; if (siteLink.Reconstruct() != QUDA_RECONSTRUCT_12) errorQuda("Reconstruct type %d not supported for gauge field", siteLink.Reconstruct()); if (cudaMom.Reconstruct() != QUDA_RECONSTRUCT_10) errorQuda("Reconstruct type %d not supported for momentum field", cudaMom.Reconstruct()); for(i=0;i < 8;i++){ tempvec[i] = createHwQuda(param->X, param->cuda_prec); } if (param->cuda_prec == QUDA_DOUBLE_PRECISION){ /* do_fermion_force_cuda( (double)eps, (double)weight1, (double)weight2, (double*)act_path_coeff, cudaHw, siteLink, cudaMom, tempvec, param); */ errorQuda("Double precision not supported?"); }else{ do_fermion_force_cuda( (float)eps, (float)weight1, (float)weight2, (float*)act_path_coeff, cudaHw, siteLink, cudaMom, tempvec, param); } for(i=0;i < 8;i++){ freeHwQuda(tempvec[i]); } } #undef BLOCK_DIM #undef FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE #undef FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE } // namespace quda #endif // defined(GPU_FERMION_FORCE)
5ea73172ee7225d89ca38ea134b689b23d803f08.cu
#include <dslash_quda.h> #include <read_gauge.h> #include <gauge_field.h> #include <clover_field.h> #include <fermion_force_quda.h> #include <force_common.h> #include <hw_quda.h> #if defined(GPU_FERMION_FORCE) namespace quda { namespace fermionforce { #include <dslash_constants.h> #include <dslash_textures.h> } using namespace fermionforce; #define BLOCK_DIM 64 #define LOAD_ANTI_HERMITIAN(src, dir, idx, var) LOAD_ANTI_HERMITIAN_DIRECT(src, dir, idx, var, Vh) #define LOAD_HW_SINGLE(hw_even, hw_odd, idx, var, oddness) do{ \ Float2* hw = (oddness)?hw_odd:hw_even; \ var##0 = hw[idx + 0*Vh]; \ var##1 = hw[idx + 1*Vh]; \ var##2 = hw[idx + 2*Vh]; \ var##3 = hw[idx + 3*Vh]; \ var##4 = hw[idx + 4*Vh]; \ var##5 = hw[idx + 5*Vh]; \ }while(0) #define WRITE_HW_SINGLE(hw_even, hw_odd, idx, var, oddness) do{ \ Float2* hw = (oddness)?hw_odd:hw_even; \ hw[idx + 0*Vh] = var##0; \ hw[idx + 1*Vh] = var##1; \ hw[idx + 2*Vh] = var##2; \ hw[idx + 3*Vh] = var##3; \ hw[idx + 4*Vh] = var##4; \ hw[idx + 5*Vh] = var##5; \ }while(0) #define LOAD_HW(hw_eve, hw_odd, idx, var, oddness) LOAD_HW_SINGLE(hw_eve, hw_odd, idx, var, oddness) #define WRITE_HW(hw_even, hw_odd, idx, var, oddness) WRITE_HW_SINGLE(hw_even, hw_odd, idx, var, oddness) #define LOAD_MATRIX(src, dir, idx, var) LOAD_MATRIX_12_SINGLE(src, dir, idx, var, Vh) #define FF_SITE_MATRIX_LOAD_TEX 1 #define linkEvenTex siteLink0TexSingle_recon #define linkOddTex siteLink1TexSingle_recon #if (FF_SITE_MATRIX_LOAD_TEX == 1) #define FF_LOAD_MATRIX(dir, idx, var, oddness) LOAD_MATRIX_12_SINGLE_TEX(((oddness)?linkOddTex:linkEvenTex), dir, idx, var, Vh) #else #define FF_LOAD_MATRIX(dir, idx, var, oddness) LOAD_MATRIX_12_SINGLE(((oddness)?linkOdd:linkEven), dir, idx, var, Vh) #endif #define linka00_re LINKA0.x #define linka00_im LINKA0.y #define linka01_re LINKA0.z #define linka01_im LINKA0.w #define linka02_re LINKA1.x #define linka02_im LINKA1.y #define linka10_re LINKA1.z #define linka10_im LINKA1.w #define linka11_re LINKA2.x #define linka11_im LINKA2.y #define linka12_re LINKA2.z #define linka12_im LINKA2.w #define linka20_re LINKA3.x #define linka20_im LINKA3.y #define linka21_re LINKA3.z #define linka21_im LINKA3.w #define linka22_re LINKA4.x #define linka22_im LINKA4.y #define linkb00_re LINKB0.x #define linkb00_im LINKB0.y #define linkb01_re LINKB0.z #define linkb01_im LINKB0.w #define linkb02_re LINKB1.x #define linkb02_im LINKB1.y #define linkb10_re LINKB1.z #define linkb10_im LINKB1.w #define linkb11_re LINKB2.x #define linkb11_im LINKB2.y #define linkb12_re LINKB2.z #define linkb12_im LINKB2.w #define linkb20_re LINKB3.x #define linkb20_im LINKB3.y #define linkb21_re LINKB3.z #define linkb21_im LINKB3.w #define linkb22_re LINKB4.x #define linkb22_im LINKB4.y #define MAT_MUL_HW(M, HW, HWOUT) \ HWOUT##00_re = (M##00_re * HW##00_re - M##00_im * HW##00_im) \ + (M##01_re * HW##01_re - M##01_im * HW##01_im) \ + (M##02_re * HW##02_re - M##02_im * HW##02_im); \ HWOUT##00_im = (M##00_re * HW##00_im + M##00_im * HW##00_re) \ + (M##01_re * HW##01_im + M##01_im * HW##01_re) \ + (M##02_re * HW##02_im + M##02_im * HW##02_re); \ HWOUT##01_re = (M##10_re * HW##00_re - M##10_im * HW##00_im) \ + (M##11_re * HW##01_re - M##11_im * HW##01_im) \ + (M##12_re * HW##02_re - M##12_im * HW##02_im); \ HWOUT##01_im = (M##10_re * HW##00_im + M##10_im * HW##00_re) \ + (M##11_re * HW##01_im + M##11_im * HW##01_re) \ + (M##12_re * HW##02_im + M##12_im * HW##02_re); \ HWOUT##02_re = (M##20_re * HW##00_re - M##20_im * HW##00_im) \ + (M##21_re * HW##01_re - M##21_im * HW##01_im) \ + (M##22_re * HW##02_re - M##22_im * HW##02_im); \ HWOUT##02_im = (M##20_re * HW##00_im + M##20_im * HW##00_re) \ + (M##21_re * HW##01_im + M##21_im * HW##01_re) \ + (M##22_re * HW##02_im + M##22_im * HW##02_re); \ HWOUT##10_re = (M##00_re * HW##10_re - M##00_im * HW##10_im) \ + (M##01_re * HW##11_re - M##01_im * HW##11_im) \ + (M##02_re * HW##12_re - M##02_im * HW##12_im); \ HWOUT##10_im = (M##00_re * HW##10_im + M##00_im * HW##10_re) \ + (M##01_re * HW##11_im + M##01_im * HW##11_re) \ + (M##02_re * HW##12_im + M##02_im * HW##12_re); \ HWOUT##11_re = (M##10_re * HW##10_re - M##10_im * HW##10_im) \ + (M##11_re * HW##11_re - M##11_im * HW##11_im) \ + (M##12_re * HW##12_re - M##12_im * HW##12_im); \ HWOUT##11_im = (M##10_re * HW##10_im + M##10_im * HW##10_re) \ + (M##11_re * HW##11_im + M##11_im * HW##11_re) \ + (M##12_re * HW##12_im + M##12_im * HW##12_re); \ HWOUT##12_re = (M##20_re * HW##10_re - M##20_im * HW##10_im) \ + (M##21_re * HW##11_re - M##21_im * HW##11_im) \ + (M##22_re * HW##12_re - M##22_im * HW##12_im); \ HWOUT##12_im = (M##20_re * HW##10_im + M##20_im * HW##10_re) \ + (M##21_re * HW##11_im + M##21_im * HW##11_re) \ + (M##22_re * HW##12_im + M##22_im * HW##12_re); #define ADJ_MAT_MUL_HW(M, HW, HWOUT) \ HWOUT##00_re = (M##00_re * HW##00_re + M##00_im * HW##00_im) \ + (M##10_re * HW##01_re + M##10_im * HW##01_im) \ + (M##20_re * HW##02_re + M##20_im * HW##02_im); \ HWOUT##00_im = (M##00_re * HW##00_im - M##00_im * HW##00_re) \ + (M##10_re * HW##01_im - M##10_im * HW##01_re) \ + (M##20_re * HW##02_im - M##20_im * HW##02_re); \ HWOUT##01_re = (M##01_re * HW##00_re + M##01_im * HW##00_im) \ + (M##11_re * HW##01_re + M##11_im * HW##01_im) \ + (M##21_re * HW##02_re + M##21_im * HW##02_im); \ HWOUT##01_im = (M##01_re * HW##00_im - M##01_im * HW##00_re) \ + (M##11_re * HW##01_im - M##11_im * HW##01_re) \ + (M##21_re * HW##02_im - M##21_im * HW##02_re); \ HWOUT##02_re = (M##02_re * HW##00_re + M##02_im * HW##00_im) \ + (M##12_re * HW##01_re + M##12_im * HW##01_im) \ + (M##22_re * HW##02_re + M##22_im * HW##02_im); \ HWOUT##02_im = (M##02_re * HW##00_im - M##02_im * HW##00_re) \ + (M##12_re * HW##01_im - M##12_im * HW##01_re) \ + (M##22_re * HW##02_im - M##22_im * HW##02_re); \ HWOUT##10_re = (M##00_re * HW##10_re + M##00_im * HW##10_im) \ + (M##10_re * HW##11_re + M##10_im * HW##11_im) \ + (M##20_re * HW##12_re + M##20_im * HW##12_im); \ HWOUT##10_im = (M##00_re * HW##10_im - M##00_im * HW##10_re) \ + (M##10_re * HW##11_im - M##10_im * HW##11_re) \ + (M##20_re * HW##12_im - M##20_im * HW##12_re); \ HWOUT##11_re = (M##01_re * HW##10_re + M##01_im * HW##10_im) \ + (M##11_re * HW##11_re + M##11_im * HW##11_im) \ + (M##21_re * HW##12_re + M##21_im * HW##12_im); \ HWOUT##11_im = (M##01_re * HW##10_im - M##01_im * HW##10_re) \ + (M##11_re * HW##11_im - M##11_im * HW##11_re) \ + (M##21_re * HW##12_im - M##21_im * HW##12_re); \ HWOUT##12_re = (M##02_re * HW##10_re + M##02_im * HW##10_im) \ + (M##12_re * HW##11_re + M##12_im * HW##11_im) \ + (M##22_re * HW##12_re + M##22_im * HW##12_im); \ HWOUT##12_im = (M##02_re * HW##10_im - M##02_im * HW##10_re) \ + (M##12_re * HW##11_im - M##12_im * HW##11_re) \ + (M##22_re * HW##12_im - M##22_im * HW##12_re); #define SU3_PROJECTOR(va, vb, m) \ m##00_re = va##0_re * vb##0_re + va##0_im * vb##0_im; \ m##00_im = va##0_im * vb##0_re - va##0_re * vb##0_im; \ m##01_re = va##0_re * vb##1_re + va##0_im * vb##1_im; \ m##01_im = va##0_im * vb##1_re - va##0_re * vb##1_im; \ m##02_re = va##0_re * vb##2_re + va##0_im * vb##2_im; \ m##02_im = va##0_im * vb##2_re - va##0_re * vb##2_im; \ m##10_re = va##1_re * vb##0_re + va##1_im * vb##0_im; \ m##10_im = va##1_im * vb##0_re - va##1_re * vb##0_im; \ m##11_re = va##1_re * vb##1_re + va##1_im * vb##1_im; \ m##11_im = va##1_im * vb##1_re - va##1_re * vb##1_im; \ m##12_re = va##1_re * vb##2_re + va##1_im * vb##2_im; \ m##12_im = va##1_im * vb##2_re - va##1_re * vb##2_im; \ m##20_re = va##2_re * vb##0_re + va##2_im * vb##0_im; \ m##20_im = va##2_im * vb##0_re - va##2_re * vb##0_im; \ m##21_re = va##2_re * vb##1_re + va##2_im * vb##1_im; \ m##21_im = va##2_im * vb##1_re - va##2_re * vb##1_im; \ m##22_re = va##2_re * vb##2_re + va##2_im * vb##2_im; \ m##22_im = va##2_im * vb##2_re - va##2_re * vb##2_im; //vc = va + vb*s #define SCALAR_MULT_ADD_SU3_VECTOR(va, vb, s, vc) do { \ vc##0_re = va##0_re + vb##0_re * s; \ vc##0_im = va##0_im + vb##0_im * s; \ vc##1_re = va##1_re + vb##1_re * s; \ vc##1_im = va##1_im + vb##1_im * s; \ vc##2_re = va##2_re + vb##2_re * s; \ vc##2_im = va##2_im + vb##2_im * s; \ }while (0) #define FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(mydir, idx, new_idx) do { \ switch(mydir){ \ case 0: \ new_idx = ( (new_x1==X1m1)?idx-X1m1:idx+1); \ new_x1 = (new_x1==X1m1)?0:new_x1+1; \ break; \ case 1: \ new_idx = ( (new_x2==X2m1)?idx-X2X1mX1:idx+X1); \ new_x2 = (new_x2==X2m1)?0:new_x2+1; \ break; \ case 2: \ new_idx = ( (new_x3==X3m1)?idx-X3X2X1mX2X1:idx+X2X1); \ new_x3 = (new_x3==X3m1)?0:new_x3+1; \ break; \ case 3: \ new_idx = ( (new_x4==X4m1)?idx-X4X3X2X1mX3X2X1:idx+X3X2X1); \ new_x4 = (new_x4==X4m1)?0:new_x4+1; \ break; \ } \ }while(0) #define FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mydir, idx, new_idx) do { \ switch(mydir){ \ case 0: \ new_idx = ( (new_x1==0)?idx+X1m1:idx-1); \ new_x1 = (new_x1==0)?X1m1:new_x1 - 1; \ break; \ case 1: \ new_idx = ( (new_x2==0)?idx+X2X1mX1:idx-X1); \ new_x2 = (new_x2==0)?X2m1:new_x2 - 1; \ break; \ case 2: \ new_idx = ( (new_x3==0)?idx+X3X2X1mX2X1:idx-X2X1); \ new_x3 = (new_x3==0)?X3m1:new_x3 - 1; \ break; \ case 3: \ new_idx = ( (new_x4==0)?idx+X4X3X2X1mX3X2X1:idx-X3X2X1); \ new_x4 = (new_x4==0)?X4m1:new_x4 - 1; \ break; \ } \ }while(0) #define FF_COMPUTE_NEW_FULL_IDX_PLUS(old_x1, old_x2, old_x3, old_x4, idx, mydir, new_idx) do { \ switch(mydir){ \ case 0: \ new_idx = ( (old_x1==X1m1)?idx-X1m1:idx+1); \ break; \ case 1: \ new_idx = ( (old_x2==X2m1)?idx-X2X1mX1:idx+X1); \ break; \ case 2: \ new_idx = ( (old_x3==X3m1)?idx-X3X2X1mX2X1:idx+X2X1); \ break; \ case 3: \ new_idx = ( (old_x4==X4m1)?idx-X4X3X2X1mX3X2X1:idx+X3X2X1); \ break; \ } \ }while(0) #define FF_COMPUTE_NEW_FULL_IDX_MINUS(old_x1, old_x2, old_x3, old_x4, idx, mydir, new_idx) do { \ switch(mydir){ \ case 0: \ new_idx = ( (old_x1==0)?idx+X1m1:idx-1); \ break; \ case 1: \ new_idx = ( (old_x2==0)?idx+X2X1mX1:idx-X1); \ break; \ case 2: \ new_idx = ( (old_x3==0)?idx+X3X2X1mX2X1:idx-X2X1); \ break; \ case 3: \ new_idx = ( (old_x4==0)?idx+X4X3X2X1mX3X2X1:idx-X3X2X1); \ break; \ } \ }while(0) //this macro require linka, linkb, and ah variables defined #define ADD_FORCE_TO_MOM(hw1, hw2, idx, dir, cf,oddness) do{ \ Float2 my_coeff; \ int mydir; \ if (GOES_BACKWARDS(dir)){ \ mydir=OPP_DIR(dir); \ my_coeff.x = -cf.x; \ my_coeff.y = -cf.y; \ }else{ \ mydir=dir; \ my_coeff.x = cf.x; \ my_coeff.y = cf.y; \ } \ Float2 tmp_coeff; \ tmp_coeff.x = my_coeff.x; \ tmp_coeff.y = my_coeff.y; \ if(oddness){ \ tmp_coeff.x = - my_coeff.x; \ tmp_coeff.y = - my_coeff.y; \ } \ Float2* mom = oddness?momOdd:momEven; \ LOAD_ANTI_HERMITIAN(mom, mydir, idx, AH); \ UNCOMPRESS_ANTI_HERMITIAN(ah, linka); \ SU3_PROJECTOR(hw1##0, hw2##0, linkb); \ SCALAR_MULT_ADD_SU3_MATRIX(linka, linkb, tmp_coeff.x, linka); \ SU3_PROJECTOR(hw1##1, hw2##1, linkb); \ SCALAR_MULT_ADD_SU3_MATRIX(linka, linkb, tmp_coeff.y, linka); \ MAKE_ANTI_HERMITIAN(linka, ah); \ WRITE_ANTI_HERMITIAN(mom, mydir, idx, AH, Vh); \ }while(0) #define FF_COMPUTE_RECONSTRUCT_SIGN(sign, dir, i1,i2,i3,i4) do { \ sign =1; \ switch(dir){ \ case XUP: \ if ( (i4 & 1) == 1){ \ sign = -1; \ } \ break; \ case YUP: \ if ( ((i4+i1) & 1) == 1){ \ sign = -1; \ } \ break; \ case ZUP: \ if ( ((i4+i1+i2) & 1) == 1){ \ sign = -1; \ } \ break; \ case TUP: \ if (i4 == X4m1 ){ \ sign = -1; \ } \ break; \ } \ }while (0) #define hwa00_re HWA0.x #define hwa00_im HWA0.y #define hwa01_re HWA1.x #define hwa01_im HWA1.y #define hwa02_re HWA2.x #define hwa02_im HWA2.y #define hwa10_re HWA3.x #define hwa10_im HWA3.y #define hwa11_re HWA4.x #define hwa11_im HWA4.y #define hwa12_re HWA5.x #define hwa12_im HWA5.y #define hwb00_re HWB0.x #define hwb00_im HWB0.y #define hwb01_re HWB1.x #define hwb01_im HWB1.y #define hwb02_re HWB2.x #define hwb02_im HWB2.y #define hwb10_re HWB3.x #define hwb10_im HWB3.y #define hwb11_re HWB4.x #define hwb11_im HWB4.y #define hwb12_re HWB5.x #define hwb12_im HWB5.y #define hwc00_re HWC0.x #define hwc00_im HWC0.y #define hwc01_re HWC1.x #define hwc01_im HWC1.y #define hwc02_re HWC2.x #define hwc02_im HWC2.y #define hwc10_re HWC3.x #define hwc10_im HWC3.y #define hwc11_re HWC4.x #define hwc11_im HWC4.y #define hwc12_re HWC5.x #define hwc12_im HWC5.y #define hwd00_re HWD0.x #define hwd00_im HWD0.y #define hwd01_re HWD1.x #define hwd01_im HWD1.y #define hwd02_re HWD2.x #define hwd02_im HWD2.y #define hwd10_re HWD3.x #define hwd10_im HWD3.y #define hwd11_re HWD4.x #define hwd11_im HWD4.y #define hwd12_re HWD5.x #define hwd12_im HWD5.y #define hwe00_re HWE0.x #define hwe00_im HWE0.y #define hwe01_re HWE1.x #define hwe01_im HWE1.y #define hwe02_re HWE2.x #define hwe02_im HWE2.y #define hwe10_re HWE3.x #define hwe10_im HWE3.y #define hwe11_re HWE4.x #define hwe11_im HWE4.y #define hwe12_re HWE5.x #define hwe12_im HWE5.y void fermion_force_init_cuda(QudaGaugeParam* param) { #ifdef MULTI_GPU #error "multi gpu is not supported for fermion force computation" #endif static int fermion_force_init_cuda_flag = 0; if (fermion_force_init_cuda_flag) return; fermion_force_init_cuda_flag=1; } /* * This function computes contribution to mometum from the middle link in a staple * * tempx: IN * Pmu: OUT * P3: OUT * */ template<int sig_positive, int mu_positive, int oddBit, typename Float2> __global__ void do_middle_link_kernel(Float2* tempxEven, Float2* tempxOdd, Float2* PmuEven, Float2* PmuOdd, Float2* P3Even, Float2* P3Odd, int sig, int mu, Float2 coeff, float4* linkEven, float4* linkOdd, Float2* momEven, Float2* momOdd) { int sid = blockIdx.x * blockDim.x + threadIdx.x; int z1 = sid / X1h; int x1h = sid - z1*X1h; int z2 = z1 / X2; int x2 = z1 - z2*X2; int x4 = z2 / X3; int x3 = z2 - x4*X3; int x1odd = (x2 + x3 + x4 + oddBit) & 1; int x1 = 2*x1h + x1odd; int X = 2*sid + x1odd; int new_x1, new_x2, new_x3, new_x4; int new_mem_idx; int ad_link_sign=1; int ab_link_sign=1; int bc_link_sign=1; Float2 HWA0, HWA1, HWA2, HWA3, HWA4, HWA5; Float2 HWB0, HWB1, HWB2, HWB3, HWB4, HWB5; Float2 HWC0, HWC1, HWC2, HWC3, HWC4, HWC5; Float2 HWD0, HWD1, HWD2, HWD3, HWD4, HWD5; float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4; float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4; Float2 AH0, AH1, AH2, AH3, AH4; /* sig * A________B * mu | | * D | |C * * A is the current point (sid) */ int point_b, point_c, point_d; int ad_link_nbr_idx, ab_link_nbr_idx, bc_link_nbr_idx; int mymu; new_x1 = x1; new_x2 = x2; new_x3 = x3; new_x4 = x4; if(mu_positive){ mymu =mu; FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mu, X, new_mem_idx); }else{ mymu = OPP_DIR(mu); FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(OPP_DIR(mu), X, new_mem_idx); } point_d = (new_mem_idx >> 1); if (mu_positive){ ad_link_nbr_idx = point_d; FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, new_x1,new_x2,new_x3,new_x4); }else{ ad_link_nbr_idx = sid; FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, x1, x2, x3, x4); } int mysig; if(sig_positive){ mysig = sig; FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(sig, new_mem_idx, new_mem_idx); }else{ mysig = OPP_DIR(sig); FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(sig), new_mem_idx, new_mem_idx); } point_c = (new_mem_idx >> 1); if (mu_positive){ bc_link_nbr_idx = point_c; FF_COMPUTE_RECONSTRUCT_SIGN(bc_link_sign, mymu, new_x1,new_x2,new_x3,new_x4); } new_x1 = x1; new_x2 = x2; new_x3 = x3; new_x4 = x4; if(sig_positive){ FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(sig, X, new_mem_idx); }else{ FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(sig), X, new_mem_idx); } point_b = (new_mem_idx >> 1); if (!mu_positive){ bc_link_nbr_idx = point_b; FF_COMPUTE_RECONSTRUCT_SIGN(bc_link_sign, mymu, new_x1,new_x2,new_x3,new_x4); } if(sig_positive){ ab_link_nbr_idx = sid; FF_COMPUTE_RECONSTRUCT_SIGN(ab_link_sign, mysig, x1, x2, x3, x4); }else{ ab_link_nbr_idx = point_b; FF_COMPUTE_RECONSTRUCT_SIGN(ab_link_sign, mysig, new_x1,new_x2,new_x3,new_x4); } LOAD_HW(tempxEven, tempxOdd, point_d, HWA, 1-oddBit ); if(mu_positive){ FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKA, 1-oddBit); }else{ FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKA, oddBit); } RECONSTRUCT_LINK_12(ad_link_sign, linka); if (mu_positive){ ADJ_MAT_MUL_HW(linka, hwa, hwd); }else{ MAT_MUL_HW(linka, hwa, hwd); } WRITE_HW(PmuEven,PmuOdd, sid, HWD, oddBit); LOAD_HW(tempxEven,tempxOdd, point_c, HWA, oddBit); if(mu_positive){ FF_LOAD_MATRIX(mymu, bc_link_nbr_idx, LINKA, oddBit); }else{ FF_LOAD_MATRIX(mymu, bc_link_nbr_idx, LINKA, 1-oddBit); } RECONSTRUCT_LINK_12(bc_link_sign, linka); if (mu_positive){ ADJ_MAT_MUL_HW(linka, hwa, hwb); }else{ MAT_MUL_HW(linka, hwa, hwb); } if(sig_positive){ FF_LOAD_MATRIX(mysig, ab_link_nbr_idx, LINKB, oddBit); }else{ FF_LOAD_MATRIX(mysig, ab_link_nbr_idx, LINKB, 1-oddBit); } RECONSTRUCT_LINK_12(ab_link_sign, linkb); if (sig_positive){ MAT_MUL_HW(linkb, hwb, hwc); }else{ ADJ_MAT_MUL_HW(linkb, hwb, hwc); } WRITE_HW(P3Even, P3Odd, sid, HWC, oddBit); if (sig_positive){ //add the force to mom ADD_FORCE_TO_MOM(hwc, hwd, sid, sig, coeff, oddBit); } } template<typename Float2> static void middle_link_kernel(Float2* tempxEven, Float2* tempxOdd, Float2* PmuEven, Float2* PmuOdd, Float2* P3Even, Float2* P3Odd, int sig, int mu, Float2 coeff, float4* linkEven, float4* linkOdd, cudaGaugeField &siteLink, Float2* momEven, Float2* momOdd, dim3 gridDim, dim3 BlockDim) { dim3 halfGridDim(gridDim.x/2, 1,1); #define CALL_MIDDLE_LINK_KERNEL(sig_sign, mu_sign) \ do_middle_link_kernel<sig_sign, mu_sign,0><<<halfGridDim, BlockDim>>>( tempxEven, tempxOdd, \ PmuEven, PmuOdd, \ P3Even, P3Odd, \ sig, mu, coeff, \ linkEven, linkOdd, \ momEven, momOdd); \ do_middle_link_kernel<sig_sign, mu_sign, 1><<<halfGridDim, BlockDim>>>(tempxEven, tempxOdd, \ PmuEven, PmuOdd, \ P3Even, P3Odd, \ sig, mu, coeff, \ linkEven, linkOdd, \ momEven, momOdd); if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){ CALL_MIDDLE_LINK_KERNEL(1, 1); }else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){ CALL_MIDDLE_LINK_KERNEL(1, 0); }else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){ CALL_MIDDLE_LINK_KERNEL(0, 1); }else{ CALL_MIDDLE_LINK_KERNEL(0, 0); } #undef CALL_MIDDLE_LINK_KERNEL } /* * Computes contribution to momentum from the side links in a staple * * P3: IN * P3mu: not used * Tempx: IN * Pmu: IN * shortPE: OUT * */ template<int sig_positive, int mu_positive, int oddBit, typename Float2> __global__ void do_side_link_kernel(Float2* P3Even, Float2* P3Odd, Float2* P3muEven, Float2* P3muOdd, Float2* TempxEven, Float2* TempxOdd, Float2* PmuEven, Float2* PmuOdd, Float2* shortPEven, Float2* shortPOdd, int sig, int mu, Float2 coeff, Float2 accumu_coeff, float4* linkEven, float4* linkOdd, Float2* momEven, Float2* momOdd) { Float2 mcoeff; mcoeff.x = -coeff.x; mcoeff.y = -coeff.y; int sid = blockIdx.x * blockDim.x + threadIdx.x; int z1 = sid / X1h; int x1h = sid - z1*X1h; int z2 = z1 / X2; int x2 = z1 - z2*X2; int x4 = z2 / X3; int x3 = z2 - x4*X3; int x1odd = (x2 + x3 + x4 + oddBit) & 1; int x1 = 2*x1h + x1odd; int X = 2*sid + x1odd; int ad_link_sign = 1; Float2 HWA0, HWA1, HWA2, HWA3, HWA4, HWA5; Float2 HWB0, HWB1, HWB2, HWB3, HWB4, HWB5; Float2 HWC0, HWC1, HWC2, HWC3, HWC4, HWC5; float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4; float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4; Float2 AH0, AH1, AH2, AH3, AH4; /* * compute the side link contribution to the momentum * * * sig * A________B * | | mu * D | |C * * A is the current point (sid) */ int point_d; int ad_link_nbr_idx; int mymu; int new_mem_idx; int new_x1 = x1; int new_x2 = x2; int new_x3 = x3; int new_x4 = x4; if(mu_positive){ mymu =mu; FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mymu,X, new_mem_idx); }else{ mymu = OPP_DIR(mu); FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(mymu, X, new_mem_idx); } point_d = (new_mem_idx >> 1); if (mu_positive){ ad_link_nbr_idx = point_d; FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, new_x1,new_x2,new_x3,new_x4); }else{ ad_link_nbr_idx = sid; FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, x1, x2, x3, x4); } LOAD_HW(P3Even, P3Odd, sid, HWA, oddBit); if(mu_positive){ FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKA, 1 - oddBit); }else{ FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKA, oddBit); } RECONSTRUCT_LINK_12(ad_link_sign, linka); if (mu_positive){ MAT_MUL_HW(linka, hwa, hwb); }else{ ADJ_MAT_MUL_HW(linka, hwa, hwb); } //start to add side link force if (mu_positive){ LOAD_HW(TempxEven, TempxOdd, point_d, HWC, 1-oddBit); if (sig_positive){ ADD_FORCE_TO_MOM(hwb, hwc, point_d, mu, coeff, 1-oddBit); }else{ ADD_FORCE_TO_MOM(hwc, hwb, point_d, OPP_DIR(mu), mcoeff, 1- oddBit); } }else{ LOAD_HW(PmuEven, PmuOdd, sid, HWC, oddBit); if (sig_positive){ ADD_FORCE_TO_MOM(hwa, hwc, sid, mu, mcoeff, oddBit); }else{ ADD_FORCE_TO_MOM(hwc, hwa, sid, OPP_DIR(mu), coeff, oddBit); } } if (shortPOdd){ LOAD_HW(shortPEven, shortPOdd, point_d, HWA, 1-oddBit); SCALAR_MULT_ADD_SU3_VECTOR(hwa0, hwb0, accumu_coeff.x, hwa0); SCALAR_MULT_ADD_SU3_VECTOR(hwa1, hwb1, accumu_coeff.y, hwa1); WRITE_HW(shortPEven, shortPOdd, point_d, HWA, 1-oddBit); } } template<typename Float2> static void side_link_kernel(Float2* P3Even, Float2* P3Odd, Float2* P3muEven, Float2* P3muOdd, Float2* TempxEven, Float2* TempxOdd, Float2* PmuEven, Float2* PmuOdd, Float2* shortPEven, Float2* shortPOdd, int sig, int mu, Float2 coeff, Float2 accumu_coeff, float4* linkEven, float4* linkOdd, cudaGaugeField &siteLink, Float2* momEven, Float2* momOdd, dim3 gridDim, dim3 blockDim) { dim3 halfGridDim(gridDim.x/2,1,1); #define CALL_SIDE_LINK_KERNEL(sig_sign, mu_sign) \ do_side_link_kernel<sig_sign,mu_sign,0><<<halfGridDim, blockDim>>>( P3Even, P3Odd, \ P3muEven, P3muOdd, \ TempxEven, TempxOdd, \ PmuEven, PmuOdd, \ shortPEven, shortPOdd, \ sig, mu, coeff, accumu_coeff, \ linkEven, linkOdd, \ momEven, momOdd); \ do_side_link_kernel<sig_sign,mu_sign,1><<<halfGridDim, blockDim>>>( P3Even, P3Odd, \ P3muEven, P3muOdd, \ TempxEven, TempxOdd, \ PmuEven, PmuOdd, \ shortPEven, shortPOdd, \ sig, mu, coeff, accumu_coeff, \ linkEven, linkOdd, \ momEven, momOdd); if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){ CALL_SIDE_LINK_KERNEL(1,1); }else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){ CALL_SIDE_LINK_KERNEL(1,0); }else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){ CALL_SIDE_LINK_KERNEL(0,1); }else{ CALL_SIDE_LINK_KERNEL(0,0); } #undef CALL_SIDE_LINK_KERNEL } /* * This function computes the contribution to momentum from middle and side links * * tempx: IN * Pmu: not used * P3: not used * P3mu: not used * shortP: OUT * */ template<int sig_positive, int mu_positive, int oddBit, typename Float2> __global__ void do_all_link_kernel(Float2* tempxEven, Float2* tempxOdd, Float2* PmuEven, Float2* PmuOdd, Float2* P3Even, Float2* P3Odd, Float2* P3muEven, Float2* P3muOdd, Float2* shortPEven, Float2* shortPOdd, int sig, int mu, Float2 coeff, Float2 mcoeff, Float2 accumu_coeff, float4* linkEven, float4* linkOdd, Float2* momEven, Float2* momOdd) { int sid = blockIdx.x * blockDim.x + threadIdx.x; int z1 = sid / X1h; int x1h = sid - z1*X1h; int z2 = z1 / X2; int x2 = z1 - z2*X2; int x4 = z2 / X3; int x3 = z2 - x4*X3; int x1odd = (x2 + x3 + x4 + oddBit) & 1; int x1 = 2*x1h + x1odd; int X = 2*sid + x1odd; int new_x1, new_x2, new_x3, new_x4; int ad_link_sign=1; int ab_link_sign=1; int bc_link_sign=1; Float2 HWA0, HWA1, HWA2, HWA3, HWA4, HWA5; Float2 HWB0, HWB1, HWB2, HWB3, HWB4, HWB5; Float2 HWC0, HWC1, HWC2, HWC3, HWC4, HWC5; Float2 HWD0, HWD1, HWD2, HWD3, HWD4, HWD5; Float2 HWE0, HWE1, HWE2, HWE3, HWE4, HWE5; float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4; float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4; float4 LINKC0, LINKC1, LINKC2, LINKC3, LINKC4; Float2 AH0, AH1, AH2, AH3, AH4; /* sig * A________B * mu | | * D | |C * * A is the current point (sid) */ int point_b, point_c, point_d; int ad_link_nbr_idx, ab_link_nbr_idx, bc_link_nbr_idx; int mymu; int new_mem_idx; new_x1 = x1; new_x2 = x2; new_x3 = x3; new_x4 = x4; if(mu_positive){ mymu =mu; FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mu, X, new_mem_idx); }else{ mymu = OPP_DIR(mu); FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(OPP_DIR(mu), X, new_mem_idx); } point_d = (new_mem_idx >> 1); if (mu_positive){ ad_link_nbr_idx = point_d; FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, new_x1,new_x2,new_x3,new_x4); }else{ ad_link_nbr_idx = sid; FF_COMPUTE_RECONSTRUCT_SIGN(ad_link_sign, mymu, x1, x2, x3, x4); } int mysig; if(sig_positive){ mysig = sig; FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(sig, new_mem_idx, new_mem_idx); }else{ mysig = OPP_DIR(sig); FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(sig), new_mem_idx, new_mem_idx); } point_c = (new_mem_idx >> 1); if (mu_positive){ bc_link_nbr_idx = point_c; FF_COMPUTE_RECONSTRUCT_SIGN(bc_link_sign, mymu, new_x1,new_x2,new_x3,new_x4); } new_x1 = x1; new_x2 = x2; new_x3 = x3; new_x4 = x4; if(sig_positive){ FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(sig, X, new_mem_idx); }else{ FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(sig), X, new_mem_idx); } point_b = (new_mem_idx >> 1); if (!mu_positive){ bc_link_nbr_idx = point_b; FF_COMPUTE_RECONSTRUCT_SIGN(bc_link_sign, mymu, new_x1,new_x2,new_x3,new_x4); } if(sig_positive){ ab_link_nbr_idx = sid; FF_COMPUTE_RECONSTRUCT_SIGN(ab_link_sign, mysig, x1, x2, x3, x4); }else{ ab_link_nbr_idx = point_b; FF_COMPUTE_RECONSTRUCT_SIGN(ab_link_sign, mysig, new_x1,new_x2,new_x3,new_x4); } LOAD_HW(tempxEven, tempxOdd, point_d, HWE, 1-oddBit); if (mu_positive){ FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKC, 1-oddBit); }else{ FF_LOAD_MATRIX(mymu, ad_link_nbr_idx, LINKC, oddBit); } RECONSTRUCT_LINK_12(ad_link_sign, linkc); if (mu_positive){ ADJ_MAT_MUL_HW(linkc, hwe, hwd); }else{ MAT_MUL_HW(linkc, hwe, hwd); } //we do not need to write Pmu here //WRITE_HW(myPmu, sid, HWD); LOAD_HW(tempxEven, tempxOdd, point_c, HWA, oddBit); if (mu_positive){ FF_LOAD_MATRIX(mymu, bc_link_nbr_idx, LINKA, oddBit); }else{ FF_LOAD_MATRIX(mymu, bc_link_nbr_idx, LINKA, 1-oddBit); } RECONSTRUCT_LINK_12(bc_link_sign, linka); if (mu_positive){ ADJ_MAT_MUL_HW(linka, hwa, hwb); }else{ MAT_MUL_HW(linka, hwa, hwb); } if (sig_positive){ FF_LOAD_MATRIX(mysig, ab_link_nbr_idx, LINKA, oddBit); }else{ FF_LOAD_MATRIX(mysig, ab_link_nbr_idx, LINKA, 1-oddBit); } RECONSTRUCT_LINK_12(ab_link_sign, linka); if (sig_positive){ MAT_MUL_HW(linka, hwb, hwc); }else{ ADJ_MAT_MUL_HW(linka, hwb, hwc); } //we do not need to write P3 here //WRITE_HW(myP3, sid, HWC); //The middle link contribution if (sig_positive){ //add the force to mom ADD_FORCE_TO_MOM(hwc, hwd, sid, sig, mcoeff, oddBit); } //P3 is hwc //ad_link is linkc if (mu_positive){ MAT_MUL_HW(linkc, hwc, hwa); }else{ ADJ_MAT_MUL_HW(linkc, hwc, hwa); } //accumulate P7rho to P5 //WRITE_HW(otherP3mu, point_d, HWA); LOAD_HW(shortPEven, shortPOdd, point_d, HWB, 1-oddBit); SCALAR_MULT_ADD_SU3_VECTOR(hwb0, hwa0, accumu_coeff.x, hwb0); SCALAR_MULT_ADD_SU3_VECTOR(hwb1, hwa1, accumu_coeff.y, hwb1); WRITE_HW(shortPEven, shortPOdd, point_d, HWB, 1-oddBit); //hwe holds tempx at point_d //hwd holds Pmu at point A(sid) if (mu_positive){ if (sig_positive){ ADD_FORCE_TO_MOM(hwa, hwe, point_d, mu, coeff, 1-oddBit); }else{ ADD_FORCE_TO_MOM(hwe, hwa, point_d, OPP_DIR(mu), mcoeff, 1- oddBit); } }else{ if (sig_positive){ ADD_FORCE_TO_MOM(hwc, hwd, sid, mu, mcoeff, oddBit); }else{ ADD_FORCE_TO_MOM(hwd, hwc, sid, OPP_DIR(mu), coeff, oddBit); } } } template<typename Float2> static void all_link_kernel(Float2* tempxEven, Float2* tempxOdd, Float2* PmuEven, Float2* PmuOdd, Float2* P3Even, Float2* P3Odd, Float2* P3muEven, Float2* P3muOdd, Float2* shortPEven, Float2* shortPOdd, int sig, int mu, Float2 coeff, Float2 mcoeff, Float2 accumu_coeff, float4* linkEven, float4* linkOdd, cudaGaugeField &siteLink, Float2* momEven, Float2* momOdd, dim3 gridDim, dim3 blockDim) { dim3 halfGridDim(gridDim.x/2, 1,1); #define CALL_ALL_LINK_KERNEL(sig_sign, mu_sign) \ do_all_link_kernel<sig_sign,mu_sign,0><<<halfGridDim, blockDim>>>(tempxEven, tempxOdd, \ PmuEven, PmuOdd, \ P3Even, P3Odd, \ P3muEven, P3muOdd, \ shortPEven, shortPOdd, \ sig, mu, coeff, mcoeff, accumu_coeff, \ linkEven, linkOdd, \ momEven, momOdd); \ do_all_link_kernel<sig_sign,mu_sign,1><<<halfGridDim, blockDim>>>(tempxEven, tempxOdd, \ PmuEven, PmuOdd, \ P3Even, P3Odd, \ P3muEven, P3muOdd, \ shortPEven, shortPOdd, \ sig, mu, coeff, mcoeff, accumu_coeff, \ linkEven, linkOdd, \ momEven, momOdd); if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){ CALL_ALL_LINK_KERNEL(1,1); }else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){ CALL_ALL_LINK_KERNEL(1,0); }else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){ CALL_ALL_LINK_KERNEL(0,1); }else{ CALL_ALL_LINK_KERNEL(0,0); } #undef CALL_ALL_LINK_KERNEL } /* This function computes the one and naik terms' contribution to momentum * * Tempx: IN * Pmu: IN * Pnumu: IN * */ template <int oddBit, typename Float2> __global__ void do_one_and_naik_terms_kernel(Float2* TempxEven, Float2* TempxOdd, Float2* PmuEven, Float2* PmuOdd, Float2* PnumuEven, Float2* PnumuOdd, int mu, Float2 OneLink, Float2 Naik, Float2 mNaik, float4* linkEven, float4* linkOdd, Float2* momEven, Float2* momOdd) { Float2 HWA0, HWA1, HWA2, HWA3, HWA4, HWA5; Float2 HWB0, HWB1, HWB2, HWB3, HWB4, HWB5; Float2 HWC0, HWC1, HWC2, HWC3, HWC4, HWC5; Float2 HWD0, HWD1, HWD2, HWD3, HWD4, HWD5; float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4; float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4; Float2 AH0, AH1, AH2, AH3, AH4; int sid = blockIdx.x * blockDim.x + threadIdx.x; int z1 = sid / X1h; int x1h = sid - z1*X1h; int z2 = z1 / X2; int x2 = z1 - z2*X2; int x4 = z2 / X3; int x3 = z2 - x4*X3; int x1odd = (x2 + x3 + x4 + oddBit) & 1; int x1 = 2*x1h + x1odd; //int X = 2*sid + x1odd; int dx[4]; int new_x1, new_x2, new_x3, new_x4, new_idx; int sign=1; if (GOES_BACKWARDS(mu)){ //The one link LOAD_HW(PmuEven, PmuOdd, sid, HWA, oddBit); LOAD_HW(TempxEven, TempxOdd, sid, HWB, oddBit); ADD_FORCE_TO_MOM(hwa, hwb, sid, OPP_DIR(mu), OneLink, oddBit); //Naik term dx[3]=dx[2]=dx[1]=dx[0]=0; dx[OPP_DIR(mu)] = -1; new_x1 = (x1 + dx[0] + X1)%X1; new_x2 = (x2 + dx[1] + X2)%X2; new_x3 = (x3 + dx[2] + X3)%X3; new_x4 = (x4 + dx[3] + X4)%X4; new_idx = (new_x4*X3X2X1+new_x3*X2X1+new_x2*X1+new_x1) >> 1; LOAD_HW(TempxEven, TempxOdd, new_idx, HWA, 1-oddBit); FF_LOAD_MATRIX(OPP_DIR(mu), new_idx, LINKA, 1-oddBit); FF_COMPUTE_RECONSTRUCT_SIGN(sign, OPP_DIR(mu), new_x1,new_x2,new_x3,new_x4); RECONSTRUCT_LINK_12(sign, linka); ADJ_MAT_MUL_HW(linka, hwa, hwc); //Popmu LOAD_HW(PnumuEven, PnumuOdd, sid, HWD, oddBit); ADD_FORCE_TO_MOM(hwd, hwc, sid, OPP_DIR(mu), mNaik, oddBit); dx[3]=dx[2]=dx[1]=dx[0]=0; dx[OPP_DIR(mu)] = 1; new_x1 = (x1 + dx[0] + X1)%X1; new_x2 = (x2 + dx[1] + X2)%X2; new_x3 = (x3 + dx[2] + X3)%X3; new_x4 = (x4 + dx[3] + X4)%X4; new_idx = (new_x4*X3X2X1+new_x3*X2X1+new_x2*X1+new_x1) >> 1; LOAD_HW(PnumuEven, PnumuOdd, new_idx, HWA, 1-oddBit); FF_LOAD_MATRIX(OPP_DIR(mu), sid, LINKA, oddBit); FF_COMPUTE_RECONSTRUCT_SIGN(sign, OPP_DIR(mu), x1, x2, x3, x4); RECONSTRUCT_LINK_12(sign, linka); MAT_MUL_HW(linka, hwa, hwc); ADD_FORCE_TO_MOM(hwc, hwb, sid, OPP_DIR(mu), Naik, oddBit); }else{ dx[3]=dx[2]=dx[1]=dx[0]=0; dx[mu] = 1; new_x1 = (x1 + dx[0] + X1)%X1; new_x2 = (x2 + dx[1] + X2)%X2; new_x3 = (x3 + dx[2] + X3)%X3; new_x4 = (x4 + dx[3] + X4)%X4; new_idx = (new_x4*X3X2X1+new_x3*X2X1+new_x2*X1+new_x1) >> 1; LOAD_HW(TempxEven, TempxOdd, new_idx, HWA, 1-oddBit); FF_LOAD_MATRIX(mu, sid, LINKA, oddBit); FF_COMPUTE_RECONSTRUCT_SIGN(sign, mu, x1, x2, x3, x4); RECONSTRUCT_LINK_12(sign, linka); MAT_MUL_HW(linka, hwa, hwb); LOAD_HW(PnumuEven, PnumuOdd, sid, HWC, oddBit); ADD_FORCE_TO_MOM(hwb, hwc, sid, mu, Naik, oddBit); } } template<typename Float2> static void one_and_naik_terms_kernel(Float2* TempxEven, Float2* TempxOdd, Float2* PmuEven, Float2* PmuOdd, Float2* PnumuEven, Float2* PnumuOdd, int mu, Float2 OneLink, Float2 Naik, Float2 mNaik, float4* linkEven, float4* linkOdd, Float2* momEven, Float2* momOdd, dim3 gridDim, dim3 blockDim) { dim3 halfGridDim(gridDim.x/2, 1,1); do_one_and_naik_terms_kernel<0><<<halfGridDim, blockDim>>>(TempxEven, TempxOdd, PmuEven, PmuOdd, PnumuEven, PnumuOdd, mu, OneLink, Naik, mNaik, linkEven, linkOdd, momEven, momOdd); do_one_and_naik_terms_kernel<1><<<halfGridDim, blockDim>>>(TempxEven, TempxOdd, PmuEven, PmuOdd, PnumuEven, PnumuOdd, mu, OneLink, Naik, mNaik, linkEven, linkOdd, momEven, momOdd); return; } #define Pmu tempvec[0] #define Pnumu tempvec[1] #define Prhonumu tempvec[2] #define P7 tempvec[3] #define P7rho tempvec[4] #define P7rhonu tempvec[5] #define P5 tempvec[6] #define P3 tempvec[7] #define P5nu tempvec[3] #define P3mu tempvec[3] #define Popmu tempvec[4] #define Pmumumu tempvec[4] template<typename Real> static void do_fermion_force_cuda(Real eps, Real weight1, Real weight2, Real* act_path_coeff, FullHw cudaHw, cudaGaugeField &siteLink, cudaGaugeField &cudaMom, FullHw tempvec[8], QudaGaugeParam* param) { int mu, nu, rho, sig; float2 coeff; float2 OneLink, Lepage, Naik, FiveSt, ThreeSt, SevenSt; float2 mNaik, mLepage, mFiveSt, mThreeSt, mSevenSt; Real ferm_epsilon; ferm_epsilon = 2.0*weight1*eps; OneLink.x = act_path_coeff[0]*ferm_epsilon ; Naik.x = act_path_coeff[1]*ferm_epsilon ; mNaik.x = -Naik.x; ThreeSt.x = act_path_coeff[2]*ferm_epsilon ; mThreeSt.x = -ThreeSt.x; FiveSt.x = act_path_coeff[3]*ferm_epsilon ; mFiveSt.x = -FiveSt.x; SevenSt.x = act_path_coeff[4]*ferm_epsilon ; mSevenSt.x = -SevenSt.x; Lepage.x = act_path_coeff[5]*ferm_epsilon ; mLepage.x = -Lepage.x; ferm_epsilon = 2.0*weight2*eps; OneLink.y = act_path_coeff[0]*ferm_epsilon ; Naik.y = act_path_coeff[1]*ferm_epsilon ; mNaik.y = -Naik.y; ThreeSt.y = act_path_coeff[2]*ferm_epsilon ; mThreeSt.y = -ThreeSt.y; FiveSt.y = act_path_coeff[3]*ferm_epsilon ; mFiveSt.y = -FiveSt.y; SevenSt.y = act_path_coeff[4]*ferm_epsilon ; mSevenSt.y = -SevenSt.y; Lepage.y = act_path_coeff[5]*ferm_epsilon ; mLepage.y = -Lepage.y; int DirectLinks[8] ; for(mu=0;mu<8;mu++){ DirectLinks[mu] = 0 ; } int volume = param->X[0]*param->X[1]*param->X[2]*param->X[3]; dim3 blockDim(BLOCK_DIM,1,1); dim3 gridDim(volume/blockDim.x, 1, 1); cudaBindTexture(0, siteLink0TexSingle_recon, siteLink.Even_p(), siteLink.Bytes()/2); cudaBindTexture(0, siteLink1TexSingle_recon, siteLink.Odd_p(), siteLink.Bytes()/2); for(sig=0; sig < 8; sig++){ for(mu = 0; mu < 8; mu++){ if ( (mu == sig) || (mu == OPP_DIR(sig))){ continue; } //3-link //Kernel A: middle link middle_link_kernel( (float2*)cudaHw.even.data, (float2*)cudaHw.odd.data, (float2*)Pmu.even.data, (float2*)Pmu.odd.data, (float2*)P3.even.data, (float2*)P3.odd.data, sig, mu, mThreeSt, (float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink, (float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(), gridDim, blockDim); checkCudaError(); for(nu=0; nu < 8; nu++){ if (nu == sig || nu == OPP_DIR(sig) || nu == mu || nu == OPP_DIR(mu)){ continue; } //5-link: middle link //Kernel B middle_link_kernel( (float2*)Pmu.even.data, (float2*)Pmu.odd.data, (float2*)Pnumu.even.data, (float2*)Pnumu.odd.data, (float2*)P5.even.data, (float2*)P5.odd.data, sig, nu, FiveSt, (float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink, (float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(), gridDim, blockDim); checkCudaError(); for(rho =0; rho < 8; rho++){ if (rho == sig || rho == OPP_DIR(sig) || rho == mu || rho == OPP_DIR(mu) || rho == nu || rho == OPP_DIR(nu)){ continue; } //7-link: middle link and side link //kernel C if(FiveSt.x != 0)coeff.x = SevenSt.x/FiveSt.x ; else coeff.x = 0; if(FiveSt.y != 0)coeff.y = SevenSt.y/FiveSt.y ; else coeff.y = 0; all_link_kernel((float2*)Pnumu.even.data, (float2*)Pnumu.odd.data, (float2*)Prhonumu.even.data, (float2*)Prhonumu.odd.data, (float2*)P7.even.data, (float2*)P7.odd.data, (float2*)P7rho.even.data, (float2*)P7rho.odd.data, (float2*)P5.even.data, (float2*)P5.odd.data, sig, rho, SevenSt,mSevenSt,coeff, (float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink, (float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(), gridDim, blockDim); checkCudaError(); }//rho //5-link: side link //kernel B2 if(ThreeSt.x != 0)coeff.x = FiveSt.x/ThreeSt.x ; else coeff.x = 0; if(ThreeSt.y != 0)coeff.y = FiveSt.y/ThreeSt.y ; else coeff.y = 0; side_link_kernel((float2*)P5.even.data, (float2*)P5.odd.data, (float2*)P5nu.even.data, (float2*)P5nu.odd.data, (float2*)Pmu.even.data, (float2*)Pmu.odd.data, (float2*)Pnumu.even.data, (float2*)Pnumu.odd.data, (float2*)P3.even.data, (float2*)P3.odd.data, sig, nu, mFiveSt, coeff, (float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink, (float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(), gridDim, blockDim); checkCudaError(); }//nu //lepage //Kernel A2 middle_link_kernel( (float2*)Pmu.even.data, (float2*)Pmu.odd.data, (float2*)Pnumu.even.data, (float2*)Pnumu.odd.data, (float2*)P5.even.data, (float2*)P5.odd.data, sig, mu, Lepage, (float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink, (float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(), gridDim, blockDim); checkCudaError(); if(ThreeSt.x != 0)coeff.x = Lepage.x/ThreeSt.x ; else coeff.x = 0; if(ThreeSt.y != 0)coeff.y = Lepage.y/ThreeSt.y ; else coeff.y = 0; side_link_kernel((float2*)P5.even.data, (float2*)P5.odd.data, (float2*)P5nu.even.data, (float2*)P5nu.odd.data, (float2*)Pmu.even.data, (float2*)Pmu.odd.data, (float2*)Pnumu.even.data, (float2*)Pnumu.odd.data, (float2*)P3.even.data, (float2*)P3.odd.data, sig, mu, mLepage,coeff, (float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink, (float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(), gridDim, blockDim); checkCudaError(); //3-link side link coeff.x=coeff.y=0; side_link_kernel((float2*)P3.even.data, (float2*)P3.odd.data, (float2*)P3mu.even.data, (float2*)P3mu.odd.data, (float2*)cudaHw.even.data, (float2*)cudaHw.odd.data, (float2*)Pmu.even.data, (float2*)Pmu.odd.data, (float2*)NULL, (float2*)NULL, sig, mu, ThreeSt,coeff, (float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), siteLink, (float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(), gridDim, blockDim); checkCudaError(); //1-link and naik term if (!DirectLinks[mu]){ DirectLinks[mu]=1; //kernel Z one_and_naik_terms_kernel((float2*)cudaHw.even.data, (float2*)cudaHw.odd.data, (float2*)Pmu.even.data, (float2*)Pmu.odd.data, (float2*)Pnumu.even.data, (float2*)Pnumu.odd.data, mu, OneLink, Naik, mNaik, (float4*)siteLink.Even_p(), (float4*)siteLink.Odd_p(), (float2*)cudaMom.Even_p(), (float2*)cudaMom.Odd_p(), gridDim, blockDim); checkCudaError(); } }//mu }//sig cudaUnbindTexture(siteLink0TexSingle_recon); cudaUnbindTexture(siteLink1TexSingle_recon); } #undef Pmu #undef Pnumu #undef Prhonumu #undef P7 #undef P7rho #undef P7rhonu #undef P5 #undef P3 #undef P5nu #undef P3mu #undef Popmu #undef Pmumumu void fermion_force_cuda(double eps, double weight1, double weight2, void* act_path_coeff, FullHw cudaHw, cudaGaugeField &siteLink, cudaGaugeField &cudaMom, QudaGaugeParam* param) { int i; FullHw tempvec[8]; if (siteLink.Reconstruct() != QUDA_RECONSTRUCT_12) errorQuda("Reconstruct type %d not supported for gauge field", siteLink.Reconstruct()); if (cudaMom.Reconstruct() != QUDA_RECONSTRUCT_10) errorQuda("Reconstruct type %d not supported for momentum field", cudaMom.Reconstruct()); for(i=0;i < 8;i++){ tempvec[i] = createHwQuda(param->X, param->cuda_prec); } if (param->cuda_prec == QUDA_DOUBLE_PRECISION){ /* do_fermion_force_cuda( (double)eps, (double)weight1, (double)weight2, (double*)act_path_coeff, cudaHw, siteLink, cudaMom, tempvec, param); */ errorQuda("Double precision not supported?"); }else{ do_fermion_force_cuda( (float)eps, (float)weight1, (float)weight2, (float*)act_path_coeff, cudaHw, siteLink, cudaMom, tempvec, param); } for(i=0;i < 8;i++){ freeHwQuda(tempvec[i]); } } #undef BLOCK_DIM #undef FF_COMPUTE_NEW_FULL_IDX_PLUS_UPDATE #undef FF_COMPUTE_NEW_FULL_IDX_MINUS_UPDATE } // namespace quda #endif // defined(GPU_FERMION_FORCE)
258778d774d20fb2e1663c4f7b7a106f31eccdff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" # include<stdio.h> __global__ void print_thread_ids() { printf("threadIdx.x: %d, threadIdx.y: %d, threadIdx.z: %d <-> blockIdx.x: %d, blockIdx.y: %d, blockIdx.z: %d <-> blockDim.x: %d, blockDim.y: %d, blockDim.z: %d <-> gridDim.x: %d, gridDim.y: %d, gridDim.z: %d\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z); // printf("threadIdx.x: %d, threadIdx.y: %d, threadIdx.z: %d\n", threadIdx.x, threadIdx.y, threadIdx.z); } int main() { int nx, ny; nx = 16; ny = 16; dim3 block(8,8); dim3 grid(nx/block.x, ny/block.y); hipLaunchKernelGGL(( print_thread_ids), dim3(grid), dim3(block), 0, 0, ); hipDeviceSynchronize(); hipDeviceReset(); return 0; }
258778d774d20fb2e1663c4f7b7a106f31eccdff.cu
# include<stdio.h> __global__ void print_thread_ids() { printf("threadIdx.x: %d, threadIdx.y: %d, threadIdx.z: %d <-> blockIdx.x: %d, blockIdx.y: %d, blockIdx.z: %d <-> blockDim.x: %d, blockDim.y: %d, blockDim.z: %d <-> gridDim.x: %d, gridDim.y: %d, gridDim.z: %d\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z); // printf("threadIdx.x: %d, threadIdx.y: %d, threadIdx.z: %d\n", threadIdx.x, threadIdx.y, threadIdx.z); } int main() { int nx, ny; nx = 16; ny = 16; dim3 block(8,8); dim3 grid(nx/block.x, ny/block.y); print_thread_ids<<<grid, block>>>(); cudaDeviceSynchronize(); cudaDeviceReset(); return 0; }
7b3446c3a5938ea72130257b59283c61b9f2cad8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <utility> #include <utils.hpp> #include "Assert.cuh" #include "Assert.hpp" #include "Types.hpp" #include "Preferences.hpp" #include "BE/GpuProjector.cuh" #include "BE/GpuTable.cuh" #include "Utils/GpuAllocator.cuh" std::pair<value_t, util_t> Gpu::Projector::project(const GpuTable::ptr& in) { value_t bestVal = 0; util_t bestUtil = 0; std::vector<util_t> hUtilsIn(in->getSize()); Gpu::Allocator::cpyToHost(&hUtilsIn[0], in->getUtils(), in->getSize()); // Choose best Value for (int r=0; r<in->getSize(); r++) { #ifndef MPE if (bestUtil RelOP hUtilsIn[r]) bestVal = r; #endif bestUtil = ProjOP(bestUtil, hUtilsIn[r]); } if (Preferences::verbose) { std::cout << "Best Val = " << bestVal << "\n"; } return std::make_pair<>(bestVal, bestUtil); } GpuTable::ptr Gpu::Projector::project(GpuTable::ptr& in, const Variable::ptr& var) { ASSERT(in->getScope().back()->getAgtID() == var->getAgtID(), "Projection done over bad ordered variables"); auto scope = in->getScope(); std::vector<Variable::ptr> newScope(scope.begin(), scope.end()-1); size_t varDomSize = var->getDomSize(); size_t newTabSize = in->getSize() / varDomSize; size_t nOutRows = newTabSize; size_t nThreads = varDomSize; size_t nBlocksLeft = nOutRows % nThreads == 0 ? (nOutRows / nThreads) : (nOutRows / nThreads) + 1; size_t blockShift = 0; while (nBlocksLeft > 0) { size_t nBlocks = nBlocksLeft > Gpu::Info::maxBlocks ? Gpu::Info::maxBlocks : nBlocksLeft; hipLaunchKernelGGL(( cudaProject), dim3(nBlocks), dim3(nThreads), 0, 0, in->getUtils(), varDomSize, blockShift, newTabSize); cuCheck(hipDeviceSynchronize()); nBlocksLeft -= nBlocks; blockShift += (nBlocks*nThreads); } // Gpu::Allocator::free(in->getUtils()); in->update(newScope, newTabSize); return in; } // After projecting need an opreartion of compress to move all elements // projected on the top of the First Table // Each Thread carries the projection for 1 Segment (D Util rows in the new // util table size) // __global__ void Gpu::Projector::cudaProject(util_t* out, int segmentSize, //int nSegments, size_t blockShift, size_t threadGuard) { size_t Tid = blockShift + (blockIdx.x * blockDim.x) + threadIdx.x; if (Tid >= threadGuard) return; size_t outIdx = (blockIdx.x * blockDim.x) + threadIdx.x; size_t inIdx = outIdx * segmentSize; // [todo] Copy in shared the amount of table to check? util_t bestItem = out[inIdx]; #pragma unroll for (int i=1; i<segmentSize; i++) { //maxItem = /*__nv_*/max(maxItem, out[inIdx+i]); bestItem = cudaProjOP(bestItem, out[inIdx+i]); } out[outIdx] = bestItem; }
7b3446c3a5938ea72130257b59283c61b9f2cad8.cu
#include <iostream> #include <utility> #include <utils.hpp> #include "Assert.cuh" #include "Assert.hpp" #include "Types.hpp" #include "Preferences.hpp" #include "BE/GpuProjector.cuh" #include "BE/GpuTable.cuh" #include "Utils/GpuAllocator.cuh" std::pair<value_t, util_t> Gpu::Projector::project(const GpuTable::ptr& in) { value_t bestVal = 0; util_t bestUtil = 0; std::vector<util_t> hUtilsIn(in->getSize()); Gpu::Allocator::cpyToHost(&hUtilsIn[0], in->getUtils(), in->getSize()); // Choose best Value for (int r=0; r<in->getSize(); r++) { #ifndef MPE if (bestUtil RelOP hUtilsIn[r]) bestVal = r; #endif bestUtil = ProjOP(bestUtil, hUtilsIn[r]); } if (Preferences::verbose) { std::cout << "Best Val = " << bestVal << "\n"; } return std::make_pair<>(bestVal, bestUtil); } GpuTable::ptr Gpu::Projector::project(GpuTable::ptr& in, const Variable::ptr& var) { ASSERT(in->getScope().back()->getAgtID() == var->getAgtID(), "Projection done over bad ordered variables"); auto scope = in->getScope(); std::vector<Variable::ptr> newScope(scope.begin(), scope.end()-1); size_t varDomSize = var->getDomSize(); size_t newTabSize = in->getSize() / varDomSize; size_t nOutRows = newTabSize; size_t nThreads = varDomSize; size_t nBlocksLeft = nOutRows % nThreads == 0 ? (nOutRows / nThreads) : (nOutRows / nThreads) + 1; size_t blockShift = 0; while (nBlocksLeft > 0) { size_t nBlocks = nBlocksLeft > Gpu::Info::maxBlocks ? Gpu::Info::maxBlocks : nBlocksLeft; cudaProject<<<nBlocks, nThreads>>>(in->getUtils(), varDomSize, blockShift, newTabSize); cuCheck(cudaDeviceSynchronize()); nBlocksLeft -= nBlocks; blockShift += (nBlocks*nThreads); } // Gpu::Allocator::free(in->getUtils()); in->update(newScope, newTabSize); return in; } // After projecting need an opreartion of compress to move all elements // projected on the top of the First Table // Each Thread carries the projection for 1 Segment (D Util rows in the new // util table size) // __global__ void Gpu::Projector::cudaProject(util_t* out, int segmentSize, //int nSegments, size_t blockShift, size_t threadGuard) { size_t Tid = blockShift + (blockIdx.x * blockDim.x) + threadIdx.x; if (Tid >= threadGuard) return; size_t outIdx = (blockIdx.x * blockDim.x) + threadIdx.x; size_t inIdx = outIdx * segmentSize; // [todo] Copy in shared the amount of table to check? util_t bestItem = out[inIdx]; #pragma unroll for (int i=1; i<segmentSize; i++) { //maxItem = /*__nv_*/max(maxItem, out[inIdx+i]); bestItem = cudaProjOP(bestItem, out[inIdx+i]); } out[outIdx] = bestItem; }
289dbc9c9fba67bd5b8fe560412474b25db4f885.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> // CUDA imports #include <hip/hip_runtime.h> #include <hip/hip_complex.h> #include <device_launch_parameters.h> #include "util/hsv_to_rgb.h" #include "util/color_palette.h" #include "mandelbrot_image.h" #include "constants.h" // Create a grid of complex numbers around the center point (center_real, center_imag). __global__ void build_complex_grid_cuda(mandelbrot_image* image) { int block_index_x = blockIdx.x; int block_stride_x = gridDim.x; int thread_index_x = threadIdx.x; int thread_stride_x = blockDim.x; double step_x = 2 * image->draw_radius_x / image->resolution_x; double step_y = 2 * image->draw_radius_y / image->resolution_y; double point_re; double point_im; int index; // Start drawing in the bottom left, go row by row. for (int pixel_y = block_index_x; pixel_y < image->resolution_y; pixel_y += block_stride_x) { point_im = image->center_imag + pixel_y * step_y - image->draw_radius_y; for (int pixel_x = thread_index_x; pixel_x < image->resolution_x; pixel_x += thread_stride_x) { index = pixel_y * image->resolution_x + pixel_x; point_re = image->center_real + pixel_x * step_x - image->draw_radius_x; (image->points)[index] = make_cuDoubleComplex(point_re, point_im); //(image->iterated_points)[index] = make_cuDoubleComplex(point_re, point_im); } } } extern "C" void launch_build_complex_grid_cuda(int num_blocks, int block_size, mandelbrot_image* image) { hipLaunchKernelGGL(( build_complex_grid_cuda) , dim3(num_blocks), dim3(block_size) , 0, 0, image); } __global__ void reset_render_arrays_cuda(mandelbrot_image* image) { int block_index_x = blockIdx.x; int block_stride_x = gridDim.x; int thread_index_x = threadIdx.x; int thread_stride_x = blockDim.x; int index; // Start drawing in the bottom left, go row by row. for (int pixel_y = block_index_x; pixel_y < image->resolution_y; pixel_y += block_stride_x) { for (int pixel_x = thread_index_x; pixel_x < image->resolution_x; pixel_x += thread_stride_x) { index = pixel_y * image->resolution_x + pixel_x; (image->iterationsArr)[index] = 0; (image->squared_absolute_values)[index] = 0; } } } extern "C" void launch_reset_render_arrays_cuda(int num_blocks, int block_size, mandelbrot_image* image) { hipLaunchKernelGGL(( reset_render_arrays_cuda) , dim3(num_blocks), dim3(block_size) , 0, 0, image); } __global__ void mandelbrot_iterate_cuda(mandelbrot_image* image) { // For every complex point in the current view, calculate the // iterations required for a given point to exceed the escape radius. int block_index_x = blockIdx.x; int block_stride_x = gridDim.x; int thread_index_x = threadIdx.x; int thread_stride_x = blockDim.x; int index; unsigned int iterations_; for (int pixel_y = block_index_x; pixel_y < image->resolution_y; pixel_y += block_stride_x) { for (int pixel_x = thread_index_x; pixel_x < image->resolution_x; pixel_x += thread_stride_x) { index = pixel_y * image->resolution_x + pixel_x; // `cnum` is the complex number that we'll iterate hipDoubleComplex cnum = (image->points)[index]; double re = cnum.x; double im = cnum.y; // The below variable exists so that we can change `re` // without it affecting the calculation for the new // value of `im` double re_temp; double sq_abs = (image->squared_absolute_values)[index]; iterations_ = 0; while (iterations_ < image->max_iterations && sq_abs < image->escape_radius_squared) { re_temp = re * re - im * im + cnum.x; im = 2 * re * im + cnum.y; re = re_temp; sq_abs = re * re + im * im; iterations_++; } (image->iterationsArr)[index] = iterations_; (image->squared_absolute_values)[index] = sq_abs; } } } extern "C" void launch_mandelbrot_iterate_cuda(int num_blocks, int block_size, mandelbrot_image* image) { hipLaunchKernelGGL(( mandelbrot_iterate_cuda) , dim3(num_blocks), dim3(block_size) , 0, 0, image); } __global__ void mandelbrot_iterate_downscaled_cuda(mandelbrot_image* image, unsigned int downscale_factor) { // For every complex point in the current view, calculate the // iterations required for a given point to exceed the escape radius. int block_index_x = blockIdx.x; int block_stride_x = gridDim.x; int thread_index_x = threadIdx.x; int thread_stride_x = blockDim.x; int index; unsigned int iterations_; for (int pixel_y = block_index_x * downscale_factor; pixel_y < image->resolution_y; pixel_y += block_stride_x * downscale_factor) { for (int pixel_x = thread_index_x * downscale_factor; pixel_x < image->resolution_x; pixel_x += thread_stride_x * downscale_factor) { index = pixel_y * image->resolution_x + pixel_x; // `cnum` is the complex number that we'll iterate hipDoubleComplex cnum = (image->points)[index]; double re = cnum.x; double im = cnum.y; // The below variable exists so that we can change `re` // without it affecting the calculation for the new // value of `im` double re_temp; double sq_abs = (image->squared_absolute_values)[index]; iterations_ = 0; while (iterations_ < image->max_iterations && sq_abs < image->escape_radius_squared) { re_temp = re * re - im * im + cnum.x; im = 2 * re * im + cnum.y; re = re_temp; sq_abs = re * re + im * im; iterations_++; } for (int block_y = pixel_y; block_y < pixel_y + downscale_factor && block_y < image->resolution_y; block_y++) { for (int block_x = pixel_x; block_x < pixel_x + downscale_factor && block_x < image->resolution_x; block_x++) { index = block_y * image->resolution_x + block_x; (image->iterationsArr)[index] = iterations_; (image->squared_absolute_values)[index] = sq_abs; } } } } } extern "C" void launch_mandelbrot_iterate_downscaled_cuda(int num_blocks, int block_size, mandelbrot_image * image, unsigned int downscale_factor) { mandelbrot_iterate_downscaled_cuda << < num_blocks, block_size >> > (image, downscale_factor); } // TODO: add coloring modes in CUDA __global__ void color_smooth_cuda(mandelbrot_image* image) { // Do some coloring! int block_index_x = blockIdx.x; int block_stride_x = gridDim.x; int thread_index_x = threadIdx.x; int thread_stride_x = blockDim.x; int index; unsigned int iterations; color_rgb pixel_color; for (int pixel_y = block_index_x; pixel_y < image->resolution_y; pixel_y += block_stride_x) { for (int pixel_x = thread_index_x; pixel_x < image->resolution_x; pixel_x += thread_stride_x) { // Calculate the iterations required for a given point to exceed the escape radius. index = pixel_y * image->resolution_x + pixel_x; iterations = (image->iterationsArr)[index]; pixel_color.r = 0; pixel_color.g = 0; pixel_color.b = 0; if (iterations < image->max_iterations) { // Calculate the iterations required for a given point to exceed the escape radius. // Calculate the iterations required for a given point to exceed the escape radius. index = pixel_y * image->resolution_x + pixel_x; iterations = (image->iterationsArr)[index]; if (iterations < image->max_iterations) { float f_iterations = (float)iterations; float f_max_iterations = (float)image->max_iterations; // Smooth colors! float escape_size = __double2float_rn(image->squared_absolute_values[index]); float smoothed_iterations = iterations + 1 - log2f(log(escape_size)) + sqrtf(sqrtf(image->draw_radius_x)); float H = 360 * smoothed_iterations / f_max_iterations; float S = 0.7; float V = 1; // HSV to RGB conversion, yay! // TODO: look into edge cases for H and why they happen. //if (H > 360 || H < 0 || S > 1 || S < 0 || V > 1 || V < 0) //{ //printf("The given HSV values are not in valid range.\n H: %f S: %.2f, V: %.2f\n", H, S, V); //printf("Iterations: %f\n", f_iterations); //} float h = H / 60; float C = S * V; float X = C * (1 - fabsf((fmodf(h, 2) - 1))); float m = V - C; float r, g, b; if (h >= 0 && h <= 1) { r = C; g = X; b = 0; } else if (h > 1 && h < 2) { r = X; g = C; b = 0; } else if (h > 2 && h <= 3) { r = 0; g = C; b = X; } else if (h > 3 && h <= 4) { r = 0; g = X; b = C; } else if (h > 4 && h <= 5) { r = X; g = 0; b = C; } else if (h > 5 && h <= 6) { r = C; g = 0; b = X; } else { // color white to make stand out r = 1 - m; g = 1 - m; b = 1 - m; } pixel_color.r = (r + m) * 255; pixel_color.g = (g + m) * 255; pixel_color.b = (b + m) * 255; } } (image->pixels_rgb)[3 * index + 0] = pixel_color.r; // Red value (image->pixels_rgb)[3 * index + 1] = pixel_color.g; // Green value (image->pixels_rgb)[3 * index + 2] = pixel_color.b; // Blue value } } } extern "C" void launch_color_smooth_cuda(int num_blocks, int block_size, mandelbrot_image* image) { hipLaunchKernelGGL(( color_smooth_cuda) , dim3(num_blocks), dim3(block_size) , 0, 0, image); } __global__ void color_palette_cuda(mandelbrot_image* image, palette plt) { int block_index_x = blockIdx.x; int block_stride_x = gridDim.x; int thread_index_x = threadIdx.x; int thread_stride_x = blockDim.x; int index; unsigned int iterations; color_rgb pixel_color; for (int pixel_y = block_index_x; pixel_y < image->resolution_y; pixel_y += block_stride_x) { for (int pixel_x = thread_index_x; pixel_x < image->resolution_x; pixel_x += thread_stride_x) { // Calculate the iterations required for a given point to exceed the escape radius. index = pixel_y * image->resolution_x + pixel_x; iterations = (image->iterationsArr)[index]; pixel_color.r = 0; pixel_color.g = 0; pixel_color.b = 0; if (iterations < image->max_iterations) { int color_index = iterations % plt.length; pixel_color = plt.colors[color_index]; // smooth color to make it a little easier on the eyes //int next_color_index = (color_index + 1) % p.length; //float escape_size = (float)(image->squared_absolute_values[index]); //float lerp_factor = 1 - log2f(log(escape_size)); //pixel_color = lerp_color(p.colors[color_index], p.colors[next_color_index], lerp_factor); } // Set the RGB values in the array (image->pixels_rgb)[3 * index + 0] = pixel_color.r; // Red value (image->pixels_rgb)[3 * index + 1] = pixel_color.g; // Green value (image->pixels_rgb)[3 * index + 2] = pixel_color.b; // Blue value } } } extern "C" void launch_color_palette_cuda(int num_blocks, int block_size, mandelbrot_image * image, palette plt) { hipLaunchKernelGGL(( color_palette_cuda) , dim3(num_blocks), dim3(block_size) , 0, 0, image, plt); }
289dbc9c9fba67bd5b8fe560412474b25db4f885.cu
#include <math.h> // CUDA imports #include <cuda_runtime.h> #include <cuComplex.h> #include <device_launch_parameters.h> #include "util/hsv_to_rgb.h" #include "util/color_palette.h" #include "mandelbrot_image.h" #include "constants.h" // Create a grid of complex numbers around the center point (center_real, center_imag). __global__ void build_complex_grid_cuda(mandelbrot_image* image) { int block_index_x = blockIdx.x; int block_stride_x = gridDim.x; int thread_index_x = threadIdx.x; int thread_stride_x = blockDim.x; double step_x = 2 * image->draw_radius_x / image->resolution_x; double step_y = 2 * image->draw_radius_y / image->resolution_y; double point_re; double point_im; int index; // Start drawing in the bottom left, go row by row. for (int pixel_y = block_index_x; pixel_y < image->resolution_y; pixel_y += block_stride_x) { point_im = image->center_imag + pixel_y * step_y - image->draw_radius_y; for (int pixel_x = thread_index_x; pixel_x < image->resolution_x; pixel_x += thread_stride_x) { index = pixel_y * image->resolution_x + pixel_x; point_re = image->center_real + pixel_x * step_x - image->draw_radius_x; (image->points)[index] = make_cuDoubleComplex(point_re, point_im); //(image->iterated_points)[index] = make_cuDoubleComplex(point_re, point_im); } } } extern "C" void launch_build_complex_grid_cuda(int num_blocks, int block_size, mandelbrot_image* image) { build_complex_grid_cuda <<< num_blocks, block_size >>> (image); } __global__ void reset_render_arrays_cuda(mandelbrot_image* image) { int block_index_x = blockIdx.x; int block_stride_x = gridDim.x; int thread_index_x = threadIdx.x; int thread_stride_x = blockDim.x; int index; // Start drawing in the bottom left, go row by row. for (int pixel_y = block_index_x; pixel_y < image->resolution_y; pixel_y += block_stride_x) { for (int pixel_x = thread_index_x; pixel_x < image->resolution_x; pixel_x += thread_stride_x) { index = pixel_y * image->resolution_x + pixel_x; (image->iterationsArr)[index] = 0; (image->squared_absolute_values)[index] = 0; } } } extern "C" void launch_reset_render_arrays_cuda(int num_blocks, int block_size, mandelbrot_image* image) { reset_render_arrays_cuda <<< num_blocks, block_size >>> (image); } __global__ void mandelbrot_iterate_cuda(mandelbrot_image* image) { // For every complex point in the current view, calculate the // iterations required for a given point to exceed the escape radius. int block_index_x = blockIdx.x; int block_stride_x = gridDim.x; int thread_index_x = threadIdx.x; int thread_stride_x = blockDim.x; int index; unsigned int iterations_; for (int pixel_y = block_index_x; pixel_y < image->resolution_y; pixel_y += block_stride_x) { for (int pixel_x = thread_index_x; pixel_x < image->resolution_x; pixel_x += thread_stride_x) { index = pixel_y * image->resolution_x + pixel_x; // `cnum` is the complex number that we'll iterate cuDoubleComplex cnum = (image->points)[index]; double re = cnum.x; double im = cnum.y; // The below variable exists so that we can change `re` // without it affecting the calculation for the new // value of `im` double re_temp; double sq_abs = (image->squared_absolute_values)[index]; iterations_ = 0; while (iterations_ < image->max_iterations && sq_abs < image->escape_radius_squared) { re_temp = re * re - im * im + cnum.x; im = 2 * re * im + cnum.y; re = re_temp; sq_abs = re * re + im * im; iterations_++; } (image->iterationsArr)[index] = iterations_; (image->squared_absolute_values)[index] = sq_abs; } } } extern "C" void launch_mandelbrot_iterate_cuda(int num_blocks, int block_size, mandelbrot_image* image) { mandelbrot_iterate_cuda <<< num_blocks, block_size >>> (image); } __global__ void mandelbrot_iterate_downscaled_cuda(mandelbrot_image* image, unsigned int downscale_factor) { // For every complex point in the current view, calculate the // iterations required for a given point to exceed the escape radius. int block_index_x = blockIdx.x; int block_stride_x = gridDim.x; int thread_index_x = threadIdx.x; int thread_stride_x = blockDim.x; int index; unsigned int iterations_; for (int pixel_y = block_index_x * downscale_factor; pixel_y < image->resolution_y; pixel_y += block_stride_x * downscale_factor) { for (int pixel_x = thread_index_x * downscale_factor; pixel_x < image->resolution_x; pixel_x += thread_stride_x * downscale_factor) { index = pixel_y * image->resolution_x + pixel_x; // `cnum` is the complex number that we'll iterate cuDoubleComplex cnum = (image->points)[index]; double re = cnum.x; double im = cnum.y; // The below variable exists so that we can change `re` // without it affecting the calculation for the new // value of `im` double re_temp; double sq_abs = (image->squared_absolute_values)[index]; iterations_ = 0; while (iterations_ < image->max_iterations && sq_abs < image->escape_radius_squared) { re_temp = re * re - im * im + cnum.x; im = 2 * re * im + cnum.y; re = re_temp; sq_abs = re * re + im * im; iterations_++; } for (int block_y = pixel_y; block_y < pixel_y + downscale_factor && block_y < image->resolution_y; block_y++) { for (int block_x = pixel_x; block_x < pixel_x + downscale_factor && block_x < image->resolution_x; block_x++) { index = block_y * image->resolution_x + block_x; (image->iterationsArr)[index] = iterations_; (image->squared_absolute_values)[index] = sq_abs; } } } } } extern "C" void launch_mandelbrot_iterate_downscaled_cuda(int num_blocks, int block_size, mandelbrot_image * image, unsigned int downscale_factor) { mandelbrot_iterate_downscaled_cuda << < num_blocks, block_size >> > (image, downscale_factor); } // TODO: add coloring modes in CUDA __global__ void color_smooth_cuda(mandelbrot_image* image) { // Do some coloring! int block_index_x = blockIdx.x; int block_stride_x = gridDim.x; int thread_index_x = threadIdx.x; int thread_stride_x = blockDim.x; int index; unsigned int iterations; color_rgb pixel_color; for (int pixel_y = block_index_x; pixel_y < image->resolution_y; pixel_y += block_stride_x) { for (int pixel_x = thread_index_x; pixel_x < image->resolution_x; pixel_x += thread_stride_x) { // Calculate the iterations required for a given point to exceed the escape radius. index = pixel_y * image->resolution_x + pixel_x; iterations = (image->iterationsArr)[index]; pixel_color.r = 0; pixel_color.g = 0; pixel_color.b = 0; if (iterations < image->max_iterations) { // Calculate the iterations required for a given point to exceed the escape radius. // Calculate the iterations required for a given point to exceed the escape radius. index = pixel_y * image->resolution_x + pixel_x; iterations = (image->iterationsArr)[index]; if (iterations < image->max_iterations) { float f_iterations = (float)iterations; float f_max_iterations = (float)image->max_iterations; // Smooth colors! float escape_size = __double2float_rn(image->squared_absolute_values[index]); float smoothed_iterations = iterations + 1 - log2f(log(escape_size)) + sqrtf(sqrtf(image->draw_radius_x)); float H = 360 * smoothed_iterations / f_max_iterations; float S = 0.7; float V = 1; // HSV to RGB conversion, yay! // TODO: look into edge cases for H and why they happen. //if (H > 360 || H < 0 || S > 1 || S < 0 || V > 1 || V < 0) //{ //printf("The given HSV values are not in valid range.\n H: %f S: %.2f, V: %.2f\n", H, S, V); //printf("Iterations: %f\n", f_iterations); //} float h = H / 60; float C = S * V; float X = C * (1 - fabsf((fmodf(h, 2) - 1))); float m = V - C; float r, g, b; if (h >= 0 && h <= 1) { r = C; g = X; b = 0; } else if (h > 1 && h < 2) { r = X; g = C; b = 0; } else if (h > 2 && h <= 3) { r = 0; g = C; b = X; } else if (h > 3 && h <= 4) { r = 0; g = X; b = C; } else if (h > 4 && h <= 5) { r = X; g = 0; b = C; } else if (h > 5 && h <= 6) { r = C; g = 0; b = X; } else { // color white to make stand out r = 1 - m; g = 1 - m; b = 1 - m; } pixel_color.r = (r + m) * 255; pixel_color.g = (g + m) * 255; pixel_color.b = (b + m) * 255; } } (image->pixels_rgb)[3 * index + 0] = pixel_color.r; // Red value (image->pixels_rgb)[3 * index + 1] = pixel_color.g; // Green value (image->pixels_rgb)[3 * index + 2] = pixel_color.b; // Blue value } } } extern "C" void launch_color_smooth_cuda(int num_blocks, int block_size, mandelbrot_image* image) { color_smooth_cuda <<< num_blocks, block_size >>> (image); } __global__ void color_palette_cuda(mandelbrot_image* image, palette plt) { int block_index_x = blockIdx.x; int block_stride_x = gridDim.x; int thread_index_x = threadIdx.x; int thread_stride_x = blockDim.x; int index; unsigned int iterations; color_rgb pixel_color; for (int pixel_y = block_index_x; pixel_y < image->resolution_y; pixel_y += block_stride_x) { for (int pixel_x = thread_index_x; pixel_x < image->resolution_x; pixel_x += thread_stride_x) { // Calculate the iterations required for a given point to exceed the escape radius. index = pixel_y * image->resolution_x + pixel_x; iterations = (image->iterationsArr)[index]; pixel_color.r = 0; pixel_color.g = 0; pixel_color.b = 0; if (iterations < image->max_iterations) { int color_index = iterations % plt.length; pixel_color = plt.colors[color_index]; // smooth color to make it a little easier on the eyes //int next_color_index = (color_index + 1) % p.length; //float escape_size = (float)(image->squared_absolute_values[index]); //float lerp_factor = 1 - log2f(log(escape_size)); //pixel_color = lerp_color(p.colors[color_index], p.colors[next_color_index], lerp_factor); } // Set the RGB values in the array (image->pixels_rgb)[3 * index + 0] = pixel_color.r; // Red value (image->pixels_rgb)[3 * index + 1] = pixel_color.g; // Green value (image->pixels_rgb)[3 * index + 2] = pixel_color.b; // Blue value } } } extern "C" void launch_color_palette_cuda(int num_blocks, int block_size, mandelbrot_image * image, palette plt) { color_palette_cuda <<< num_blocks, block_size >>> (image, plt); }
2bb23147ae8f73a26ba647bcf3ecfb601e266f9a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include<stdio.h> #include <math.h> #define STRIDE_64K 65536 __global__ void init(int n, float *x, float *y) { int lane_id = threadIdx.x & 31; size_t warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5; size_t warps_per_grid = (blockDim.x * gridDim.x) >> 5; size_t warp_total = ((sizeof(float)*n) + STRIDE_64K-1) / STRIDE_64K; if(blockIdx.x==0 && threadIdx.x==0) { //printf("\n TId[%d] ", threadIdx.x); //printf(" WId[%u] ", warp_id); //printf(" LId[%u] ", lane_id); //printf(" WperG[%u] ", warps_per_grid); //printf(" wTot[%u] ", warp_total); //printf(" rep[%d] ", STRIDE_64K/sizeof(float)/32); } for(; warp_id < warp_total; warp_id += warps_per_grid) { #pragma unroll for(int rep = 0; rep < STRIDE_64K/sizeof(float)/32; rep++) { size_t ind = warp_id * STRIDE_64K/sizeof(float) + rep * 32 + lane_id; if (ind < n) { x[ind] = 1.0f; //if(blockIdx.x==0 && threadIdx.x==0) { // printf(" \nind[%d] ", ind); //} y[ind] = 2.0f; } } } } // CUDA kernel to add elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory -- accessible from CPU or GPU hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); // Launch kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; size_t warp_total = ((sizeof(float)*N) + STRIDE_64K-1) / STRIDE_64K; int numBlocksInit = (warp_total*32) / blockSize; hipLaunchKernelGGL(( init), dim3(numBlocksInit), dim3(blockSize), 0, 0, N, x, y); hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
2bb23147ae8f73a26ba647bcf3ecfb601e266f9a.cu
#include <iostream> #include<stdio.h> #include <math.h> #define STRIDE_64K 65536 __global__ void init(int n, float *x, float *y) { int lane_id = threadIdx.x & 31; size_t warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5; size_t warps_per_grid = (blockDim.x * gridDim.x) >> 5; size_t warp_total = ((sizeof(float)*n) + STRIDE_64K-1) / STRIDE_64K; if(blockIdx.x==0 && threadIdx.x==0) { //printf("\n TId[%d] ", threadIdx.x); //printf(" WId[%u] ", warp_id); //printf(" LId[%u] ", lane_id); //printf(" WperG[%u] ", warps_per_grid); //printf(" wTot[%u] ", warp_total); //printf(" rep[%d] ", STRIDE_64K/sizeof(float)/32); } for(; warp_id < warp_total; warp_id += warps_per_grid) { #pragma unroll for(int rep = 0; rep < STRIDE_64K/sizeof(float)/32; rep++) { size_t ind = warp_id * STRIDE_64K/sizeof(float) + rep * 32 + lane_id; if (ind < n) { x[ind] = 1.0f; //if(blockIdx.x==0 && threadIdx.x==0) { // printf(" \nind[%d] ", ind); //} y[ind] = 2.0f; } } } } // CUDA kernel to add elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory -- accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // Launch kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; size_t warp_total = ((sizeof(float)*N) + STRIDE_64K-1) / STRIDE_64K; int numBlocksInit = (warp_total*32) / blockSize; init<<<numBlocksInit, blockSize>>>(N, x, y); add<<<numBlocks, blockSize>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
682bbd05a9145d62b55ae8671684976c316c218e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/channel_backprop_stats_op.h" namespace caffe2 { namespace { // based on "Optimizing Parallel Reduction in CUDA" by Mark Harris // note - volatile keyword is needed to allow doing a warp reduction without // synchronization on recent architectures template <unsigned int blockSize> __device__ void warpReduce(volatile float* sdata, unsigned int tid) { // note - the if statements are "free" as they are resolved at compile time if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; } template <unsigned int blockSize> __global__ void ChannelBackpropStatsBlockKernel( int N, int C, int valsPerChannel, const float* X, const float* dY, const float* mean, const float* invStddev, float* dBiasBlocks, float* dScaleBlocks) { __shared__ float dBiasData[blockSize]; __shared__ float dScaleData[blockSize]; auto tid = threadIdx.x; auto numBlocksPerChannel = (valsPerChannel + blockSize - 1) / blockSize; auto localBlockIndex = blockIdx.x % numBlocksPerChannel; auto inputIndex = (blockIdx.x / numBlocksPerChannel) * valsPerChannel + localBlockIndex * blockSize + tid; auto n = blockIdx.x / numBlocksPerChannel / C; auto c = (blockIdx.x / numBlocksPerChannel) % C; dBiasData[tid] = 0; dScaleData[tid] = 0; if (localBlockIndex * blockSize + tid < valsPerChannel) { dBiasData[tid] += dY[inputIndex]; dScaleData[tid] += (X[inputIndex] - mean[c]) * invStddev[c] * dY[inputIndex]; } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { dBiasData[tid] += dBiasData[tid + 256]; dScaleData[tid] += dScaleData[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { dBiasData[tid] += dBiasData[tid + 128]; dScaleData[tid] += dScaleData[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { dBiasData[tid] += dBiasData[tid + 64]; dScaleData[tid] += dScaleData[tid + 64]; } __syncthreads(); } if (tid < 32) { warpReduce<blockSize>(dBiasData, tid); warpReduce<blockSize>(dScaleData, tid); } // output block data sorted by C to simplify second reduction if (tid == 0) { auto outputIndex = (c * N + n) * numBlocksPerChannel + localBlockIndex; dBiasBlocks[outputIndex] = dBiasData[0]; dScaleBlocks[outputIndex] = dScaleData[0]; } } template <unsigned int blockSize> __global__ void ChannelBackpropStatsFinalSumsKernel( int N, int C, int numSumsPerChannel, const float* dBiasScratch, const float* dScaleScratch, float* dBias, float* dScale) { __shared__ float dBiasData[blockSize]; __shared__ float dScaleData[blockSize]; auto tid = threadIdx.x; auto inputIndex = blockIdx.x * N * numSumsPerChannel + tid; dBiasData[tid] = 0; dScaleData[tid] = 0; for (auto i = inputIndex; i < (blockIdx.x + 1) * N * numSumsPerChannel; i += blockSize) { dBiasData[tid] += dBiasScratch[i]; dScaleData[tid] += dScaleScratch[i]; } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { dBiasData[tid] += dBiasData[tid + 256]; dScaleData[tid] += dScaleData[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { dBiasData[tid] += dBiasData[tid + 128]; dScaleData[tid] += dScaleData[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { dBiasData[tid] += dBiasData[tid + 64]; dScaleData[tid] += dScaleData[tid + 64]; } __syncthreads(); } if (tid < 32) { warpReduce<blockSize>(dBiasData, tid); warpReduce<blockSize>(dScaleData, tid); } if (tid == 0) { dBias[blockIdx.x] = dBiasData[0]; dScale[blockIdx.x] = dScaleData[0]; } } } // namespace template <> bool ChannelBackpropStatsOp<CUDAContext>::RunOnDevice() { const auto& X = Input(INPUT); const auto& dY = Input(OUTPUT_GRAD); const auto& mean = Input(SAVED_MEAN); const auto& invStddev = Input(SAVED_INV_STDDEV); CAFFE_ENFORCE(X.ndim() >= 3 && X.ndim() <= 5); const int N = X.dim32(0); const int C = X.dim32(1); const int H = X.dim32(2); const int W = X.ndim() > 3 ? X.dim32(3) : 1; const int D = X.ndim() > 4 ? X.dim32(4) : 1; const auto Xarr = X.data<float>(); const auto dYarr = dY.data<float>(); const auto meanArr = mean.data<float>(); const auto invStddevArr = invStddev.data<float>(); auto dBias = Output(BIAS_GRAD, {C}, at::dtype<float>()); auto dScale = Output(SCALE_GRAD, {C}, at::dtype<float>()); const auto valsPerChannel = H * W * D; const auto numBlocksPerChannel = CAFFE_GET_BLOCKS(valsPerChannel); const auto numBlocksTotal = numBlocksPerChannel * N * C; dBiasScratch_.Resize(numBlocksTotal); dScaleScratch_.Resize(numBlocksTotal); hipLaunchKernelGGL(( ChannelBackpropStatsBlockKernel<CAFFE_CUDA_NUM_THREADS>) , dim3(numBlocksTotal), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, valsPerChannel, Xarr, dYarr, meanArr, invStddevArr, dBiasScratch_.mutable_data<float>(), dScaleScratch_.mutable_data<float>()); hipLaunchKernelGGL(( ChannelBackpropStatsFinalSumsKernel<CAFFE_CUDA_NUM_THREADS>) , dim3(C), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, numBlocksPerChannel, dBiasScratch_.data<float>(), dScaleScratch_.data<float>(), dBias->template mutable_data<float>(), dScale->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR( ChannelBackpropStats, ChannelBackpropStatsOp<CUDAContext>); } // namespace caffe2
682bbd05a9145d62b55ae8671684976c316c218e.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/channel_backprop_stats_op.h" namespace caffe2 { namespace { // based on "Optimizing Parallel Reduction in CUDA" by Mark Harris // note - volatile keyword is needed to allow doing a warp reduction without // synchronization on recent architectures template <unsigned int blockSize> __device__ void warpReduce(volatile float* sdata, unsigned int tid) { // note - the if statements are "free" as they are resolved at compile time if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; } template <unsigned int blockSize> __global__ void ChannelBackpropStatsBlockKernel( int N, int C, int valsPerChannel, const float* X, const float* dY, const float* mean, const float* invStddev, float* dBiasBlocks, float* dScaleBlocks) { __shared__ float dBiasData[blockSize]; __shared__ float dScaleData[blockSize]; auto tid = threadIdx.x; auto numBlocksPerChannel = (valsPerChannel + blockSize - 1) / blockSize; auto localBlockIndex = blockIdx.x % numBlocksPerChannel; auto inputIndex = (blockIdx.x / numBlocksPerChannel) * valsPerChannel + localBlockIndex * blockSize + tid; auto n = blockIdx.x / numBlocksPerChannel / C; auto c = (blockIdx.x / numBlocksPerChannel) % C; dBiasData[tid] = 0; dScaleData[tid] = 0; if (localBlockIndex * blockSize + tid < valsPerChannel) { dBiasData[tid] += dY[inputIndex]; dScaleData[tid] += (X[inputIndex] - mean[c]) * invStddev[c] * dY[inputIndex]; } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { dBiasData[tid] += dBiasData[tid + 256]; dScaleData[tid] += dScaleData[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { dBiasData[tid] += dBiasData[tid + 128]; dScaleData[tid] += dScaleData[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { dBiasData[tid] += dBiasData[tid + 64]; dScaleData[tid] += dScaleData[tid + 64]; } __syncthreads(); } if (tid < 32) { warpReduce<blockSize>(dBiasData, tid); warpReduce<blockSize>(dScaleData, tid); } // output block data sorted by C to simplify second reduction if (tid == 0) { auto outputIndex = (c * N + n) * numBlocksPerChannel + localBlockIndex; dBiasBlocks[outputIndex] = dBiasData[0]; dScaleBlocks[outputIndex] = dScaleData[0]; } } template <unsigned int blockSize> __global__ void ChannelBackpropStatsFinalSumsKernel( int N, int C, int numSumsPerChannel, const float* dBiasScratch, const float* dScaleScratch, float* dBias, float* dScale) { __shared__ float dBiasData[blockSize]; __shared__ float dScaleData[blockSize]; auto tid = threadIdx.x; auto inputIndex = blockIdx.x * N * numSumsPerChannel + tid; dBiasData[tid] = 0; dScaleData[tid] = 0; for (auto i = inputIndex; i < (blockIdx.x + 1) * N * numSumsPerChannel; i += blockSize) { dBiasData[tid] += dBiasScratch[i]; dScaleData[tid] += dScaleScratch[i]; } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { dBiasData[tid] += dBiasData[tid + 256]; dScaleData[tid] += dScaleData[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { dBiasData[tid] += dBiasData[tid + 128]; dScaleData[tid] += dScaleData[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { dBiasData[tid] += dBiasData[tid + 64]; dScaleData[tid] += dScaleData[tid + 64]; } __syncthreads(); } if (tid < 32) { warpReduce<blockSize>(dBiasData, tid); warpReduce<blockSize>(dScaleData, tid); } if (tid == 0) { dBias[blockIdx.x] = dBiasData[0]; dScale[blockIdx.x] = dScaleData[0]; } } } // namespace template <> bool ChannelBackpropStatsOp<CUDAContext>::RunOnDevice() { const auto& X = Input(INPUT); const auto& dY = Input(OUTPUT_GRAD); const auto& mean = Input(SAVED_MEAN); const auto& invStddev = Input(SAVED_INV_STDDEV); CAFFE_ENFORCE(X.ndim() >= 3 && X.ndim() <= 5); const int N = X.dim32(0); const int C = X.dim32(1); const int H = X.dim32(2); const int W = X.ndim() > 3 ? X.dim32(3) : 1; const int D = X.ndim() > 4 ? X.dim32(4) : 1; const auto Xarr = X.data<float>(); const auto dYarr = dY.data<float>(); const auto meanArr = mean.data<float>(); const auto invStddevArr = invStddev.data<float>(); auto dBias = Output(BIAS_GRAD, {C}, at::dtype<float>()); auto dScale = Output(SCALE_GRAD, {C}, at::dtype<float>()); const auto valsPerChannel = H * W * D; const auto numBlocksPerChannel = CAFFE_GET_BLOCKS(valsPerChannel); const auto numBlocksTotal = numBlocksPerChannel * N * C; dBiasScratch_.Resize(numBlocksTotal); dScaleScratch_.Resize(numBlocksTotal); ChannelBackpropStatsBlockKernel<CAFFE_CUDA_NUM_THREADS> <<<numBlocksTotal, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, valsPerChannel, Xarr, dYarr, meanArr, invStddevArr, dBiasScratch_.mutable_data<float>(), dScaleScratch_.mutable_data<float>()); ChannelBackpropStatsFinalSumsKernel<CAFFE_CUDA_NUM_THREADS> <<<C, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, numBlocksPerChannel, dBiasScratch_.data<float>(), dScaleScratch_.data<float>(), dBias->template mutable_data<float>(), dScale->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR( ChannelBackpropStats, ChannelBackpropStatsOp<CUDAContext>); } // namespace caffe2
2f344a81ac4d165b441b853accd04389cd38d180.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_tea_leaf_xpy_kernel; int xdim0_tea_leaf_xpy_kernel_h = -1; int ydim0_tea_leaf_xpy_kernel_h = -1; __constant__ int xdim1_tea_leaf_xpy_kernel; int xdim1_tea_leaf_xpy_kernel_h = -1; int ydim1_tea_leaf_xpy_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y) (x+xdim0_tea_leaf_xpy_kernel*(y)) #define OPS_ACC1(x,y) (x+xdim1_tea_leaf_xpy_kernel*(y)) //user function __device__ void tea_leaf_xpy_kernel_gpu(double * u, const double * p) { u[OPS_ACC0(0,0)] = u[OPS_ACC0(0,0)] + p[OPS_ACC1(0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_tea_leaf_xpy_kernel( double* __restrict arg0, const double* __restrict arg1, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_tea_leaf_xpy_kernel; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_tea_leaf_xpy_kernel; if (idx_x < size0 && idx_y < size1) { tea_leaf_xpy_kernel_gpu(arg0, arg1); } } // host stub function void ops_par_loop_tea_leaf_xpy_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { //Timing double t1,t2,c1,c2; ops_arg args[2] = { arg0, arg1}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args,2,range,25)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(25,"tea_leaf_xpy_kernel"); OPS_kernels[25].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; if (xdim0 != xdim0_tea_leaf_xpy_kernel_h || xdim1 != xdim1_tea_leaf_xpy_kernel_h) { hipMemcpyToSymbol( xdim0_tea_leaf_xpy_kernel, &xdim0, sizeof(int) ); xdim0_tea_leaf_xpy_kernel_h = xdim0; hipMemcpyToSymbol( xdim1_tea_leaf_xpy_kernel, &xdim1, sizeof(int) ); xdim1_tea_leaf_xpy_kernel_h = xdim1; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[2]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args,2,range); if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[25].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_tea_leaf_xpy_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],x_size, y_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[25].time += t1-t2; } ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[0],range); if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[25].mpi_time += t2-t1; OPS_kernels[25].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[25].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
2f344a81ac4d165b441b853accd04389cd38d180.cu
// // auto-generated by ops.py // __constant__ int xdim0_tea_leaf_xpy_kernel; int xdim0_tea_leaf_xpy_kernel_h = -1; int ydim0_tea_leaf_xpy_kernel_h = -1; __constant__ int xdim1_tea_leaf_xpy_kernel; int xdim1_tea_leaf_xpy_kernel_h = -1; int ydim1_tea_leaf_xpy_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y) (x+xdim0_tea_leaf_xpy_kernel*(y)) #define OPS_ACC1(x,y) (x+xdim1_tea_leaf_xpy_kernel*(y)) //user function __device__ void tea_leaf_xpy_kernel_gpu(double * u, const double * p) { u[OPS_ACC0(0,0)] = u[OPS_ACC0(0,0)] + p[OPS_ACC1(0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_tea_leaf_xpy_kernel( double* __restrict arg0, const double* __restrict arg1, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_tea_leaf_xpy_kernel; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_tea_leaf_xpy_kernel; if (idx_x < size0 && idx_y < size1) { tea_leaf_xpy_kernel_gpu(arg0, arg1); } } // host stub function void ops_par_loop_tea_leaf_xpy_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { //Timing double t1,t2,c1,c2; ops_arg args[2] = { arg0, arg1}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args,2,range,25)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(25,"tea_leaf_xpy_kernel"); OPS_kernels[25].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; if (xdim0 != xdim0_tea_leaf_xpy_kernel_h || xdim1 != xdim1_tea_leaf_xpy_kernel_h) { cudaMemcpyToSymbol( xdim0_tea_leaf_xpy_kernel, &xdim0, sizeof(int) ); xdim0_tea_leaf_xpy_kernel_h = xdim0; cudaMemcpyToSymbol( xdim1_tea_leaf_xpy_kernel, &xdim1, sizeof(int) ); xdim1_tea_leaf_xpy_kernel_h = xdim1; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[2]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args,2,range); if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[25].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data ops_tea_leaf_xpy_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],x_size, y_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[25].time += t1-t2; } ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[0],range); if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[25].mpi_time += t2-t1; OPS_kernels[25].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[25].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
3c9f1c29ebc8ad6da3f989321b3d9f8411fc3f73.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPEvent.h> #include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h> #include <ATen/native/Copy.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> #include <THH/THH.h> #ifdef __HIP_PLATFORM_HCC__ #include <hip/hip_version.h> #endif namespace at { namespace native { using namespace at::cuda; // device-to-device copy, does type conversion void copy_device_to_device(TensorIterator& iter, bool non_blocking) { int64_t numel = iter.numel(); // We can memcpy the memory if both tensors have the same type AND both // tensors are contiguous after dimension coalescing and reordering. bool same_type = iter.dtype(0) == iter.dtype(1); bool memcpy_eligible = same_type && iter.is_contiguous(); Device dst_device = iter.device(0); Device src_device = iter.device(1); HIPGuardMasqueradingAsCUDA device_guard(src_device); // We always perform the copy on the source device, using the current stream // on the source device, and we fully synchronize on both src and dst's // current streams for completion of the copy. We have to explicitly do this // for non-contig copies. This mimics the behavior of cross-device // hipMemcpyAsync on the default stream. HIPStreamMasqueradingAsCUDA copy_stream = getCurrentHIPStreamMasqueradingAsCUDA(src_device.index()); if (src_device != dst_device) { // This is a cross-device copy on the src current stream and dst current // stream. We perform a two-way barrier between both devices' streams // before the copy. This ensures that any write-after-write and // write-after-read dependencies on the destination side are handled, so // that no one is operating on the dst memory when we perform the copy. // src waits on dst barrier (src already waits on src) CUDAEvent dst_ready; device_guard.set_device(dst_device); dst_ready.record(getCurrentHIPStreamMasqueradingAsCUDA(dst_device.index())); device_guard.set_device(src_device); dst_ready.block(copy_stream); } if (memcpy_eligible) { void *dst = iter.data_ptr(0); void *src = iter.data_ptr(1); size_t size = numel * iter.element_size(0); if (src != dst || src_device != dst_device) { // Perform the copy AT_CUDA_CHECK(hipMemcpyAsync( dst, src, size, hipMemcpyDeviceToDevice, copy_stream)); } } else { auto dtype = iter.dtype(0); if (isQIntType(dtype)) { AT_DISPATCH_QINT_TYPES(dtype, "copy_", [&] { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return x; }); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( kHalf, kBool, kBFloat16, dtype, "copy_", [&] { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return x; }); }); } } if (src_device != dst_device) { // dst waits on src barrier (dst already waits on dst). We cannot // operate on dst's copy until the copy is complete. // Still on src_device, record stream event CUDAEvent src_ready; src_ready.record(copy_stream); device_guard.set_device(dst_device); src_ready.block(getCurrentHIPStreamMasqueradingAsCUDA(dst_device.index())); } AT_CUDA_CHECK(hipGetLastError()); } static bool copy_requires_temporaries(TensorIterator& iter, bool p2p_enabled) { Device dst_device = iter.device(0); Device src_device = iter.device(1); if (dst_device == src_device) { // We never require temporaries for copies on the same GPU. TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda()); return false; } bool same_dtype = iter.dtype(0) == iter.dtype(1); if (same_dtype && iter.is_contiguous()) { // Contiguous same-dtype copies can always use hipMemcpyAsync return false; } else if (dst_device.is_cuda() && src_device.is_cuda()) { // Copies between GPUs can use the copy kernel if P2P is supported return !p2p_enabled; } else { // The remaining cases require temporaries. For example, this includes // non-contiguous copies between CPU and GPU. return true; } } static bool maybe_enable_p2p_access(Device dst_device, Device src_device) { if (dst_device.is_cpu() || src_device.is_cpu()) { return false; } return THCState_getPeerToPeerAccess( globalContext().getTHCState(), src_device.index(), dst_device.index()); } static void copy_kernel_cuda(TensorIterator& iter, bool non_blocking) { AT_ASSERT(iter.ntensors() == 2); Device dst_device = iter.device(0); Device src_device = iter.device(1); // Enable p2p access between devices. (No-op if it involves the CPU) bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device); if (copy_requires_temporaries(iter, p2p_enabled)) { // NB: this involves recursive calls to copy. Be careful that those copies // don't require temporaries or you will cause an infinite recursion! auto& dst = iter.tensor(0); Tensor dst_contig; Tensor src_contig; // Type conversions are performed on the CPU for CPU-GPU copies and on // the src device for GPU-GPU copies. if (iter.device_type(0) == kCUDA) { dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst, LEGACY_CONTIGUOUS_MEMORY_FORMAT); src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous(); } else { bool same_type = iter.dtype(0) == iter.dtype(1); dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1), LEGACY_CONTIGUOUS_MEMORY_FORMAT); src_contig = iter.tensor(1).expand_as(dst).contiguous(); } // perform a same-dtype copy on contiguous tensors TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes())); TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type()); dst_contig.copy_(src_contig, non_blocking); // if necessary, copy back into dst if (!dst_contig.is_same(dst)) { TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device()); dst.copy_(dst_contig, non_blocking); } return; } // Copy on GPU (or between GPUs) if (dst_device.is_cuda() && src_device.is_cuda()) { copy_device_to_device(iter, non_blocking); return; } // Copy between CPU and GPU hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; hipMemcpyKind kind; if (dst_device.is_cuda() && src_device.is_cpu()) { device_guard.set_device(dst_device); kind = hipMemcpyHostToDevice; } else if (dst_device.is_cpu() && src_device.is_cuda()) { device_guard.set_device(src_device); kind = hipMemcpyDeviceToHost; } else { TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()"); } void* dst = iter.data_ptr(0); void* src = iter.data_ptr(1); int64_t nbytes = iter.numel() * iter.element_size(0); HIPStreamMasqueradingAsCUDA stream = getCurrentHIPStreamMasqueradingAsCUDA(); if (non_blocking) { AT_CUDA_CHECK(hipMemcpyAsync(dst, src, nbytes, kind, stream)); void* ptr = (dst_device == kCPU ? dst : src); AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream)); } else { #if HIP_VERSION >= 301 AT_CUDA_CHECK(hipMemcpyWithStream(dst, src, nbytes, kind, stream)); #else AT_CUDA_CHECK(hipMemcpyAsync(dst, src, nbytes, kind, stream)); AT_CUDA_CHECK(hipStreamSynchronize(stream)); #endif } } REGISTER_DISPATCH(copy_stub, &copy_kernel_cuda); } // namespace native } // namespace at
3c9f1c29ebc8ad6da3f989321b3d9f8411fc3f73.cu
#include <ATen/ATen.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAEvent.h> #include <c10/cuda/CUDAStream.h> #include <ATen/native/Copy.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> #include <THC/THC.h> #ifdef __HIP_PLATFORM_HCC__ #include <hip/hip_version.h> #endif namespace at { namespace native { using namespace at::cuda; // device-to-device copy, does type conversion void copy_device_to_device(TensorIterator& iter, bool non_blocking) { int64_t numel = iter.numel(); // We can memcpy the memory if both tensors have the same type AND both // tensors are contiguous after dimension coalescing and reordering. bool same_type = iter.dtype(0) == iter.dtype(1); bool memcpy_eligible = same_type && iter.is_contiguous(); Device dst_device = iter.device(0); Device src_device = iter.device(1); CUDAGuard device_guard(src_device); // We always perform the copy on the source device, using the current stream // on the source device, and we fully synchronize on both src and dst's // current streams for completion of the copy. We have to explicitly do this // for non-contig copies. This mimics the behavior of cross-device // cudaMemcpyAsync on the default stream. CUDAStream copy_stream = getCurrentCUDAStream(src_device.index()); if (src_device != dst_device) { // This is a cross-device copy on the src current stream and dst current // stream. We perform a two-way barrier between both devices' streams // before the copy. This ensures that any write-after-write and // write-after-read dependencies on the destination side are handled, so // that no one is operating on the dst memory when we perform the copy. // src waits on dst barrier (src already waits on src) CUDAEvent dst_ready; device_guard.set_device(dst_device); dst_ready.record(getCurrentCUDAStream(dst_device.index())); device_guard.set_device(src_device); dst_ready.block(copy_stream); } if (memcpy_eligible) { void *dst = iter.data_ptr(0); void *src = iter.data_ptr(1); size_t size = numel * iter.element_size(0); if (src != dst || src_device != dst_device) { // Perform the copy AT_CUDA_CHECK(cudaMemcpyAsync( dst, src, size, cudaMemcpyDeviceToDevice, copy_stream)); } } else { auto dtype = iter.dtype(0); if (isQIntType(dtype)) { AT_DISPATCH_QINT_TYPES(dtype, "copy_", [&] { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return x; }); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( kHalf, kBool, kBFloat16, dtype, "copy_", [&] { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return x; }); }); } } if (src_device != dst_device) { // dst waits on src barrier (dst already waits on dst). We cannot // operate on dst's copy until the copy is complete. // Still on src_device, record stream event CUDAEvent src_ready; src_ready.record(copy_stream); device_guard.set_device(dst_device); src_ready.block(getCurrentCUDAStream(dst_device.index())); } AT_CUDA_CHECK(cudaGetLastError()); } static bool copy_requires_temporaries(TensorIterator& iter, bool p2p_enabled) { Device dst_device = iter.device(0); Device src_device = iter.device(1); if (dst_device == src_device) { // We never require temporaries for copies on the same GPU. TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda()); return false; } bool same_dtype = iter.dtype(0) == iter.dtype(1); if (same_dtype && iter.is_contiguous()) { // Contiguous same-dtype copies can always use cudaMemcpyAsync return false; } else if (dst_device.is_cuda() && src_device.is_cuda()) { // Copies between GPUs can use the copy kernel if P2P is supported return !p2p_enabled; } else { // The remaining cases require temporaries. For example, this includes // non-contiguous copies between CPU and GPU. return true; } } static bool maybe_enable_p2p_access(Device dst_device, Device src_device) { if (dst_device.is_cpu() || src_device.is_cpu()) { return false; } return THCState_getPeerToPeerAccess( globalContext().getTHCState(), src_device.index(), dst_device.index()); } static void copy_kernel_cuda(TensorIterator& iter, bool non_blocking) { AT_ASSERT(iter.ntensors() == 2); Device dst_device = iter.device(0); Device src_device = iter.device(1); // Enable p2p access between devices. (No-op if it involves the CPU) bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device); if (copy_requires_temporaries(iter, p2p_enabled)) { // NB: this involves recursive calls to copy. Be careful that those copies // don't require temporaries or you will cause an infinite recursion! auto& dst = iter.tensor(0); Tensor dst_contig; Tensor src_contig; // Type conversions are performed on the CPU for CPU-GPU copies and on // the src device for GPU-GPU copies. if (iter.device_type(0) == kCUDA) { dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst, LEGACY_CONTIGUOUS_MEMORY_FORMAT); src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous(); } else { bool same_type = iter.dtype(0) == iter.dtype(1); dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1), LEGACY_CONTIGUOUS_MEMORY_FORMAT); src_contig = iter.tensor(1).expand_as(dst).contiguous(); } // perform a same-dtype copy on contiguous tensors TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes())); TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type()); dst_contig.copy_(src_contig, non_blocking); // if necessary, copy back into dst if (!dst_contig.is_same(dst)) { TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device()); dst.copy_(dst_contig, non_blocking); } return; } // Copy on GPU (or between GPUs) if (dst_device.is_cuda() && src_device.is_cuda()) { copy_device_to_device(iter, non_blocking); return; } // Copy between CPU and GPU cuda::OptionalCUDAGuard device_guard; cudaMemcpyKind kind; if (dst_device.is_cuda() && src_device.is_cpu()) { device_guard.set_device(dst_device); kind = cudaMemcpyHostToDevice; } else if (dst_device.is_cpu() && src_device.is_cuda()) { device_guard.set_device(src_device); kind = cudaMemcpyDeviceToHost; } else { TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()"); } void* dst = iter.data_ptr(0); void* src = iter.data_ptr(1); int64_t nbytes = iter.numel() * iter.element_size(0); CUDAStream stream = getCurrentCUDAStream(); if (non_blocking) { AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream)); void* ptr = (dst_device == kCPU ? dst : src); AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream)); } else { #if HIP_VERSION >= 301 AT_CUDA_CHECK(hipMemcpyWithStream(dst, src, nbytes, kind, stream)); #else AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream)); AT_CUDA_CHECK(cudaStreamSynchronize(stream)); #endif } } REGISTER_DISPATCH(copy_stub, &copy_kernel_cuda); } // namespace native } // namespace at
02bf398523dbeb9754e736ff4ebe24c148779344.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> __global__ void add(int *X, int *Y, int *alpha){ int idx = blockIdx.x; Y[idx] = ((*alpha)*(X[idx])) + Y[idx]; } int main(){ int alpha,*X,*Y, N; //program vars int *d_x, *d_y, *d_a; //device vars int size = sizeof(int); printf("Enter number of elements and alpha: "); scanf("%d %d",&N, &alpha); X = (int*)malloc(sizeof(int)*N); Y = (int*)malloc(sizeof(int)*N); printf("Enter elements x <space> y:\n"); for(int i=0; i<N; i++){ scanf("%d %d",&X[i],&Y[i]); } //Allocate space for device copies of a,b,c hipMalloc((void**)&d_x,size*N); hipMalloc((void**)&d_y,size*N); hipMalloc((void**)&d_a,size); //setup input values hipMemcpy(d_a,&alpha,size,hipMemcpyHostToDevice); hipMemcpy(d_x,X,size*N,hipMemcpyHostToDevice); hipMemcpy(d_y,Y,size*N,hipMemcpyHostToDevice); //launch add kernel on GPU hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, d_x,d_y,d_a); //copy result back to host hipMemcpy(Y,d_y,size*N,hipMemcpyDeviceToHost); printf("Result:\n"); for(int i=0; i<N; i++){ printf("Y%d = %d \n",i,Y[i]); } //Cleanup hipFree(d_a); hipFree(d_x); hipFree(d_y); return 0; }
02bf398523dbeb9754e736ff4ebe24c148779344.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> __global__ void add(int *X, int *Y, int *alpha){ int idx = blockIdx.x; Y[idx] = ((*alpha)*(X[idx])) + Y[idx]; } int main(){ int alpha,*X,*Y, N; //program vars int *d_x, *d_y, *d_a; //device vars int size = sizeof(int); printf("Enter number of elements and alpha: "); scanf("%d %d",&N, &alpha); X = (int*)malloc(sizeof(int)*N); Y = (int*)malloc(sizeof(int)*N); printf("Enter elements x <space> y:\n"); for(int i=0; i<N; i++){ scanf("%d %d",&X[i],&Y[i]); } //Allocate space for device copies of a,b,c cudaMalloc((void**)&d_x,size*N); cudaMalloc((void**)&d_y,size*N); cudaMalloc((void**)&d_a,size); //setup input values cudaMemcpy(d_a,&alpha,size,cudaMemcpyHostToDevice); cudaMemcpy(d_x,X,size*N,cudaMemcpyHostToDevice); cudaMemcpy(d_y,Y,size*N,cudaMemcpyHostToDevice); //launch add kernel on GPU add<<<N,1>>>(d_x,d_y,d_a); //copy result back to host cudaMemcpy(Y,d_y,size*N,cudaMemcpyDeviceToHost); printf("Result:\n"); for(int i=0; i<N; i++){ printf("Y%d = %d \n",i,Y[i]); } //Cleanup cudaFree(d_a); cudaFree(d_x); cudaFree(d_y); return 0; }
6ce1e4df6902b08ba46f0e8391b994fa62f9d272.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This version is WITHOUT splitting. All data are in FP32 type. * Implementing the FFT algorithm for general input * Input should be fp32 vectors with size equals to the power of 4 * Number of vectors is given by BATCH (B) * Recursive algorithm, base case is fft4 * Combine all components in one file * Version after multiple optimizations * This implementation is without matrix and vector * This implementation uses global cublas handle */ #include "util/my_include_combined.h" #define PI 3.14159265 const float UPPER_BOUND = 1.0f; const int BATCH = 16; const int SIZE = 1024; FFT_S gfft(int N, float* X_re, float* X_im, float*& FX_re, float*& FX_im, int B); __global__ void myTranspose(int m, int n, float* input, float* output, int B); __global__ void multiply_twiddle(int N, int m, int n, float* matrix_re, float* matrix_im, int B); FFT_S init_F4(); FFT_S fft4(float* X_re, float* X_im, float* FX_re, float* FX_im, int B); __global__ void myAccumulate(int N, float* X1, float* X2, float* X3, float* X4, float* R1, float* R2, int B); FFT_S fft4_transposed(int M, float* X_re, float* X_im, float* FX_re, float* FX_im, int B); __global__ void myAccumulate_transposed(int n, int M, float* X1, float* X2, float* X3, float* X4, float* R1, float* R2, int B); hipblasStatus_t status; hipblasHandle_t handle; float* F4_re; float* F4_im; float* buffer; //float* scales; // = re_s1, re_s2, im_s1, im_s2; //float* X_split; // = X_re_hi, X_re_lo, X_im_hi, X_im_lo; float *result1, *result2, *result3, *result4; // F4_re * X_split, F4_im * X_split int main() { int mem_size; FFT_S fft_status; // Allocate unified memory for input and output matrix float *input_re, *input_im, *output_re, *output_im; mem_size = BATCH * SIZE * sizeof(float); checkCudaErrors(hipMallocManaged((void **) &input_re, mem_size)); checkCudaErrors(hipMallocManaged((void **) &input_im, mem_size)); checkCudaErrors(hipMallocManaged((void **) &output_re, mem_size)); checkCudaErrors(hipMallocManaged((void **) &output_im, mem_size)); // Initialize the input data srand(time(NULL)); printf("The input is: \n"); for (int j = 0; j < BATCH; j++){ printf("Vector %d: \n", j); for (int i = 0; i < SIZE; i++){ input_re[i + j * SIZE] = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND; input_im[i + j * SIZE] = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND; input_re[i + j * SIZE] = (float)i + 1; input_im[i + j * SIZE] = 0.0f; printf("X[%d] = (%.10f, %.10f) \n", i, input_re[i + j * SIZE], input_im[i + j * SIZE]); } printf("\n"); } // Allocate unified memory for the buffer (global) mem_size = SIZE * BATCH * sizeof(float); checkCudaErrors(hipMallocManaged((void **) &buffer, mem_size)); // Allocate unified memory for temporary result (global) //mem_size = SIZE / 4 * BATCH * 4 * sizeof(float); // Unit length = 4, re_s1, re_s2, im_s1, im_s2 //checkCudaErrors(hipMallocManaged((void **) &scales, mem_size)); //mem_size = SIZE * BATCH * 4 * sizeof(float); // re_hi, re_lo, im_hi, im_lo //checkCudaErrors(hipMallocManaged((void **) &X_split, mem_size)); mem_size = SIZE * BATCH * sizeof(float); // re_hi, re_lo, im_hi, im_lo checkCudaErrors(hipMallocManaged((void **) &result1, mem_size)); checkCudaErrors(hipMallocManaged((void **) &result2, mem_size)); checkCudaErrors(hipMallocManaged((void **) &result3, mem_size)); checkCudaErrors(hipMallocManaged((void **) &result4, mem_size)); // Allocate memory for and initialize Fourier matrix mem_size = 16 * sizeof(float); checkCudaErrors(hipMallocManaged((void **) &F4_re, mem_size)); checkCudaErrors(hipMallocManaged((void **) &F4_im, mem_size)); fft_status = init_F4(); if (fft_status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Matrix initialization error (Fourier matrix).\n"); exit(1); } // Initialize cublas with global cublas handle and status status = hipblasCreate(&handle); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS initialization error.\n"); exit(1); } // Allow cublas to use Tensor Core status = cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS setting math mode error.\n"); exit(1); } // Call gfft function fft_status = gfft(SIZE, input_re, input_im, output_re, output_im, BATCH); if (fft_status != FFT_SUCCESS){ fprintf(stderr, "!!!!! gFFT execution error.\n"); exit(1); } // Shutdown cublas status = hipblasDestroy(handle); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS shutdown error.\n"); exit(1); } // Deallocate unified memory for buffer and temporary result checkCudaErrors(hipFree(F4_re)); checkCudaErrors(hipFree(F4_im)); checkCudaErrors(hipFree(buffer)); //checkCudaErrors(hipFree(scales)); //checkCudaErrors(hipFree(X_split)); checkCudaErrors(hipFree(result1)); checkCudaErrors(hipFree(result2)); checkCudaErrors(hipFree(result3)); checkCudaErrors(hipFree(result4)); // Print result printf("Result: \n"); for (int j = 0; j < BATCH; j++){ printf("Resulting vector %d: \n", j); for (int i = 0; i < SIZE; i++){ printf("FX[%d] = (%.10f, %.10f) \n", i, output_re[i + j * SIZE], output_im[i + j * SIZE]); } } // Deallocate unified memory checkCudaErrors(hipFree(input_re)); checkCudaErrors(hipFree(input_im)); checkCudaErrors(hipFree(output_re)); checkCudaErrors(hipFree(output_im)); exit(0); } FFT_S gfft(int N, float* X_re, float* X_im, float*& FX_re, float*& FX_im, int B) { // Base case if (N == 4) { return fft4(X_re, X_im, FX_re, FX_im, B); } // Status and error variable declaration FFT_S fft_status; hipError_t cerror; // Declare temp variable for buffer swapping float* temp; // Transpose input matrix: 4 * (N/4*B) --> (N/4) * (4*B) // First store the result in buffer to avoid racing condition //// Set grid and block size dim3 threadsPerBlock1(4, 16); dim3 blockPerGrid1(B, (N / 4 + 15)/16); // Make sure blocks are enough //// Transpose real matrix hipLaunchKernelGGL(( myTranspose), dim3(blockPerGrid1), dim3(threadsPerBlock1), 0, 0, 4, N / 4, X_re, buffer, B); cerror = hipGetLastError(); if (cerror != hipSuccess) { fprintf(stderr, "!!!!! CUDA error: %s during transposition of real matrix.\n", hipGetErrorString(cerror)); return FFT_FAILURE; } //// Swap FX_re and buffer to store the transposition result in FX_re temp = FX_re; FX_re = buffer; buffer = temp; //// Transpose imaginary matrix hipLaunchKernelGGL(( myTranspose), dim3(blockPerGrid1), dim3(threadsPerBlock1), 0, 0, 4, N / 4, X_im, buffer, B); cerror = hipGetLastError(); if (cerror != hipSuccess) { fprintf(stderr, "!!!!! CUDA error: %s during transposition of imaginary matrix.\n", hipGetErrorString(cerror)); return FFT_FAILURE; } ////// Swap FX_im and buffer to store the transposition result in FX_im temp = FX_im; FX_im = buffer; buffer = temp; // Wait for GPU to finish work hipDeviceSynchronize(); // Recursively call gfft function, NOT using buffer matrix fft_status = gfft(N / 4, FX_re, FX_im, FX_re, FX_im, 4 * B); if (fft_status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Function execution error (recursively call gfft).\n"); return FFT_FAILURE; } // Multiplicate each element with twiddle factor //// Set grid and block size dim3 threadsPerBlock2(4, 16); dim3 blockPerGrid2(B, (N / 4 + 15)/16); // Make sure blocks are enough //// Call kernel function hipLaunchKernelGGL(( multiply_twiddle), dim3(blockPerGrid2), dim3(threadsPerBlock2), 0, 0, N, N/4, 4, FX_re, FX_im, B); cerror = hipGetLastError(); if (cerror != hipSuccess) { fprintf(stderr, "!!!!! CUDA error: %s during twiddle factor multiplication.\n", hipGetErrorString(cerror)); return FFT_FAILURE; } // Wait for GPU to finish work hipDeviceSynchronize(); // Call the optimized fft4 function to avoid transposition fft_status = fft4_transposed(N / 4, FX_re, FX_im, FX_re, FX_im, B); if (fft_status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Function execution error (calling fft4_transposed).\n"); return FFT_FAILURE; } // Wait for GPU to finish work hipDeviceSynchronize(); return FFT_SUCCESS; } /* * Initialize Fourier matrix * Allocate unified memory and set value for F4_re and F4_im * */ FFT_S init_F4() { F4_re[0] = 1.0f; F4_re[1] = 1.0f; F4_re[2] = 1.0f; F4_re[3] = 1.0f; F4_re[4] = 1.0f; F4_re[5] = 0.0f; F4_re[6] =-1.0f; F4_re[7] = 0.0f; F4_re[8] = 1.0f; F4_re[9] =-1.0f; F4_re[10] = 1.0f; F4_re[11] =-1.0f; F4_re[12] = 1.0f; F4_re[13] = 0.0f; F4_re[14] =-1.0f; F4_re[15] = 0.0f; F4_im[0] = 0.0f; F4_im[1] = 0.0f; F4_im[2] = 0.0f; F4_im[3] = 0.0f; F4_im[4] = 0.0f; F4_im[5] =-1.0f; F4_im[6] = 0.0f; F4_im[7] = 1.0f; F4_im[8] = 0.0f; F4_im[9] = 0.0f; F4_im[10] = 0.0f; F4_im[11] = 0.0f; F4_im[12] = 0.0f; F4_im[13] = 1.0f; F4_im[14] = 0.0f; F4_im[15] =-1.0f; return FFT_SUCCESS; } /* * Transpose every input matrix of size m * n * Number of matrices is given by B * Every matrix in a batch is transposed independently * Input is expected to be matrix of size m * (n * B) * Output is expected to be matrix of size n * (m * B) * The grid size is expected to be B in horizontal dimension * Usage: transpose a matrix of size 4 * (N/4 * B) to (N/4) * (4 * B) * */ __global__ void myTranspose(int m, int n, float* input, float* output, int B) { // Calculate position in the OUTPUT matrix (0 based) int j = threadIdx.x; // Column number within a matrix, expected to be 0, 1, 2, 3 int i = blockIdx.y * blockDim.y + threadIdx.y; // Row number int matrix_id = blockIdx.x; // The index of matrix in the batch if (i < n && j < m && matrix_id < B){ output[matrix_id * m * n + j * n + i] = input[matrix_id * m * n + i * m + j]; } } /* * Multifly every element of the input matrix with the twiddle factor * Every matrix in a batch is processed independently * Block and thread layout should be 2D, and the total dimension is expected to be (m, n * B) * n is expected to be 4 * result.re(i, j) [0 based] = xre(i, j) * cos(2pi/N * i * j) + xim(i, j) * sin(2pi/N * i * j) * result.im(i, j) [0 based] = -xre(i, j) * sin(2pi/N * i * j) + xim(i, j) * cos(2pi/N * i * j) * ONLY that thread will access the particular matrix_re and matrix_im, so buffer is not needed * */ __global__ void multiply_twiddle(int N, int m, int n, float* matrix_re, float* matrix_im, int B) { // Calculate position int j = threadIdx.x; // Column number within a matrix, 0 to 3 in radix 4 int i = blockIdx.y * blockDim.y + threadIdx.y; // Row number within a matrix int matrix_id = blockIdx.x; // Index of matrix in the batch if (i < m && j < n && matrix_id < B){ // Per-thread local variables int index = matrix_id * N + j * m + i; float tw_re = cos(2 * PI / N * i * j); float tw_im = sin(2 * PI / N * i * j); float result_re = matrix_re[index] * tw_re + matrix_im[index] * tw_im; float result_im = -1.0f * matrix_re[index] * tw_im + matrix_im[index] * tw_re; matrix_re[index] = result_re; matrix_im[index] = result_im; } } /* * Perform fft on every length-4 vector * Batch size is given by B * Internally split every FP32 input into two FP16 vectors * Combine them together after FFT * */ FFT_S fft4(float* X_re, float* X_im, float* FX_re, float* FX_im, int B) { // Variable declaration hipError_t cerror; float alpha = 1.0f, beta = 0.0f; // Temporary results are global variables // Split input //// Define segmentation pointers for convenience //float* X_re = X_split + 4 * B * 0; //float* X_im = X_split + 4 * B * 2; // float* re_s1 = scales + B * 0; // float* re_s2 = scales + B * 1; // float* im_s1 = scales + B * 2; // float* im_s2 = scales + B * 3; //// Call the splitting kernel //int numThreads = 64; //int numBlocks = (B + 63) / 64; //mySplit<<<numBlocks, numThreads>>>(4, X_re, X_re_hi, X_re_lo, re_s1, re_s2, B, buffer); //mySplit<<<numBlocks, numThreads>>>(4, X_im, X_im_hi, X_im_lo, im_s1, im_s2, B, buffer); //cerror = hipGetLastError(); //if (cerror != hipSuccess) //{ // fprintf(stderr, "!!!!! CUDA error: %s during fft4 splitting\n", hipGetErrorString(cerror)); //return FFT_FAILURE; //} // Matrix multiplication with Fourier matrix //// Call cublas gemm on F4_re * X_re status = hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 4, B, 4, &alpha, F4_re, HIP_R_32F, 4, X_re, HIP_R_32F, 4, &beta, result1, HIP_R_32F, 4, HIP_R_32F, HIPBLAS_GEMM_DEFAULT); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS kernel execution error (F4_re * X_re).\n"); return FFT_FAILURE; } //// Call cublas gemm on F4_re * X_im status = hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 4, B, 4, &alpha, F4_re, HIP_R_32F, 4, X_im, HIP_R_32F, 4, &beta, result2, HIP_R_32F, 4, HIP_R_32F, HIPBLAS_GEMM_DEFAULT); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS kernel execution error (F4_re * X_im).\n"); return FFT_FAILURE; } //// Call cublas gemm on F4_im * X_re status = hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 4, B, 4, &alpha, F4_im, HIP_R_32F, 4, X_re, HIP_R_32F, 4, &beta, result3, HIP_R_32F, 4, HIP_R_32F, HIPBLAS_GEMM_DEFAULT); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS kernel execution error (F4_im * X_re).\n"); return FFT_FAILURE; } //// Call cublas gemm on F4_im * X_im status = hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 4, B, 4, &alpha, F4_im, HIP_R_32F, 4, X_im, HIP_R_32F, 4, &beta, result4, HIP_R_32F, 4, HIP_R_32F, HIPBLAS_GEMM_DEFAULT); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS kernel execution error (F4_im * X_im).\n"); return FFT_FAILURE; } // Rescale the result and combine them together //// Set grid and block size dim3 threadsPerBlock(16, 4); dim3 blocksPerGrid((B+15)/16, 1); //// call kernel function (FX_re and FX_im will be zero-initialized) hipLaunchKernelGGL(( myAccumulate), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, 4, result1, result2, result3, result4, FX_re, FX_im, B); cerror = hipGetLastError(); if (cerror != hipSuccess) { fprintf(stderr, "!!!!! CUDA error: %s during fft4 accumulation\n", hipGetErrorString(cerror)); return FFT_FAILURE; } return FFT_SUCCESS; } /* * For (a + bi) * (c + di), re = ac - bd, im = ad + bc * Need to rescale the result before accumulation * N is number of elements in one vector (expected to be 4) * The number of vectors is given by B * X1, X2 are 4 * (B * 4) column-major matrix. Inner order is by batch. Outer order is Re_hi, Re_lo, Im_hi, Im_lo * alpha is B * 4 array. Inner order is by batch. Outer order is re_s1, re_s2, im_s1, im_s2 * R1, R2 are resulting matrix of size 4 * B * */ __global__ void myAccumulate(int N, float* X1, float* X2, float* X3, float* X4, float* R1, float* R2, int B) { int i = blockIdx.y * blockDim.y + threadIdx.y; // row number int j = blockIdx.x * blockDim.x + threadIdx.x; // column number if (i < N && j < B){ R1[i + j * N] = R2[i + j * N] = 0.0f; R1[i + j * N] += X1[i + j * N]; R1[i + j * N] += -1.0f * X4[i + j * N]; R2[i + j * N] += X2[i + j * N]; R2[i + j * N] += X3[i + j * N]; } } /* * Perform fft4 assuming the input is in the transposed layout * The number of vectors is M * B * The number of rows of the input matrix is M * The number of columns of the input matrix is 4 * B (4 for radix 4) * Note that the fourier matrix is symmetric */ FFT_S fft4_transposed(int M, float* X_re, float* X_im, float* FX_re, float* FX_im, int B) { // Variable declaration hipError_t cerror; float alpha = 1.0f, beta = 0.0f; // Temporary results are global variables // Split input //// Define segmentation pointers for convenience //float* X_re = X_split + M * 4 * B * 0; //float* X_im = X_split + M * 4 * B * 2; //float* re_s1 = scales + M * B * 0; //float* re_s2 = scales + M * B * 1; //float* im_s1 = scales + M * B * 2; //float* im_s2 = scales + M * B * 3; //// Call splitting function //dim3 threadsPerBlock1(4, 16); //dim3 blocksPerGrid1((B + 3)/4, (M + 15)/16); //mySplit_transposed<<<blocksPerGrid1, threadsPerBlock1>>>(4, M, X_re, X_re_hi, X_re_lo, re_s1, re_s2, B, buffer); //mySplit_transposed<<<blocksPerGrid1, threadsPerBlock1>>>(4, M, X_im, X_im_hi, X_im_lo, im_s1, im_s2, B, buffer); //cerror = hipGetLastError(); //if (cerror != hipSuccess) //{ // fprintf(stderr, "!!!!! CUDA error: %s during splitting in fft4_transposed\n", hipGetErrorString(cerror)); //return FFT_FAILURE; //} // Matrix multiplication with F4_re and F4_im // Note that the order of multiplicands are reversed //// Call batched gemm on X_re * F4_re status = hipblasGemmStridedBatchedEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, 4, 4, &alpha, X_re, HIP_R_32F, M, M * 4, F4_re, HIP_R_32F, 4, 0, &beta, result1, HIP_R_32F, M, M * 4, B, HIP_R_32F, HIPBLAS_GEMM_DEFAULT); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS kernel execution error in fft4_transposed X_re*F4_re multiplication.\n"); return FFT_FAILURE; } //// Call batched gemm on X_im * F4_re status = hipblasGemmStridedBatchedEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, 4, 4, &alpha, X_im, HIP_R_32F, M, M * 4, F4_re, HIP_R_32F, 4, 0, &beta, result2, HIP_R_32F, M, M * 4, B, HIP_R_32F, HIPBLAS_GEMM_DEFAULT); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS kernel execution error in fft4_transposed X_im*F4_re multiplication.\n"); return FFT_FAILURE; } //// Call batched gemm on X_re * F4_im status = hipblasGemmStridedBatchedEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, 4, 4, &alpha, X_re, HIP_R_32F, M, M * 4, F4_im, HIP_R_32F, 4, 0, &beta, result3, HIP_R_32F, M, M * 4, B, HIP_R_32F, HIPBLAS_GEMM_DEFAULT); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS kernel execution error in fft4_transposed X_re*F4_im multiplication.\n"); return FFT_FAILURE; } //// Call batched gemm on X_im * F4_im status = hipblasGemmStridedBatchedEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, 4, 4, &alpha, X_im, HIP_R_32F, M, M * 4, F4_im, HIP_R_32F, 4, 0, &beta, result4, HIP_R_32F, M, M * 4, B, HIP_R_32F, HIPBLAS_GEMM_DEFAULT); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS kernel execution error in fft4_transposed X_im*F4_im multiplication.\n"); return FFT_FAILURE; } // Rescale the result and combine real and imaginary part //// Set grid and block size dim3 threadsPerBlock2(16, 16); dim3 blocksPerGrid2((4 * B + 15)/16, (M + 15)/16); //// call the accumulation kernel function (FX_re and FX_im will be zero-initialized inside) hipLaunchKernelGGL(( myAccumulate_transposed), dim3(blocksPerGrid2), dim3(threadsPerBlock2), 0, 0, 4, M, result1, result2, result3, result4, FX_re, FX_im, B); cerror = hipGetLastError(); if (cerror != hipSuccess) { fprintf(stderr, "!!!!! CUDA error: %s during accumulation in fft4_transposed\n", hipGetErrorString(cerror)); return FFT_FAILURE; } return FFT_SUCCESS; } /* * The kernel rescales the multiplication result and accumulates them * Each thread works on one element (instead of one vector) in the resulting matrix * The length of one vector (unit) is given by n, expected to be 4 * The total number of vectors is M * B * M is the vertical dimension, B is the horizontal dimension * X1, X2 are M * (4 * B * 4) matrices. The inner-most column order is by element in a unit. Then by batch. Outer order is Re_hi, Re_lo, Im_hi, Im_lo * alpha is a M * B * 4 arrays. Inner most order is by horizontal index. Then by batch. Outer order is re_s1, re_s2, im_s1, im_s2 * R1, R2 are M * (4 * B) matrices * */ __global__ void myAccumulate_transposed(int n, int M, float* X1, float* X2, float* X3, float* X4, float* R1, float* R2, int B) { int i = blockIdx.y * blockDim.y + threadIdx.y; // vertical index of the element, max M int j = blockIdx.x * blockDim.x + threadIdx.x; // horizontal index of the element, max 4 * B if (i < M && j < 4 * B){ int result_idx = i + j * M; R1[result_idx] = R2[result_idx] = 0.0f; R1[result_idx] += X1[result_idx]; R1[result_idx] += -1.0f * X4[result_idx]; R2[result_idx] += X2[result_idx]; R2[result_idx] += X3[result_idx]; } }
6ce1e4df6902b08ba46f0e8391b994fa62f9d272.cu
/* * This version is WITHOUT splitting. All data are in FP32 type. * Implementing the FFT algorithm for general input * Input should be fp32 vectors with size equals to the power of 4 * Number of vectors is given by BATCH (B) * Recursive algorithm, base case is fft4 * Combine all components in one file * Version after multiple optimizations * This implementation is without matrix and vector * This implementation uses global cublas handle */ #include "util/my_include_combined.h" #define PI 3.14159265 const float UPPER_BOUND = 1.0f; const int BATCH = 16; const int SIZE = 1024; FFT_S gfft(int N, float* X_re, float* X_im, float*& FX_re, float*& FX_im, int B); __global__ void myTranspose(int m, int n, float* input, float* output, int B); __global__ void multiply_twiddle(int N, int m, int n, float* matrix_re, float* matrix_im, int B); FFT_S init_F4(); FFT_S fft4(float* X_re, float* X_im, float* FX_re, float* FX_im, int B); __global__ void myAccumulate(int N, float* X1, float* X2, float* X3, float* X4, float* R1, float* R2, int B); FFT_S fft4_transposed(int M, float* X_re, float* X_im, float* FX_re, float* FX_im, int B); __global__ void myAccumulate_transposed(int n, int M, float* X1, float* X2, float* X3, float* X4, float* R1, float* R2, int B); cublasStatus_t status; cublasHandle_t handle; float* F4_re; float* F4_im; float* buffer; //float* scales; // = re_s1, re_s2, im_s1, im_s2; //float* X_split; // = X_re_hi, X_re_lo, X_im_hi, X_im_lo; float *result1, *result2, *result3, *result4; // F4_re * X_split, F4_im * X_split int main() { int mem_size; FFT_S fft_status; // Allocate unified memory for input and output matrix float *input_re, *input_im, *output_re, *output_im; mem_size = BATCH * SIZE * sizeof(float); checkCudaErrors(cudaMallocManaged((void **) &input_re, mem_size)); checkCudaErrors(cudaMallocManaged((void **) &input_im, mem_size)); checkCudaErrors(cudaMallocManaged((void **) &output_re, mem_size)); checkCudaErrors(cudaMallocManaged((void **) &output_im, mem_size)); // Initialize the input data srand(time(NULL)); printf("The input is: \n"); for (int j = 0; j < BATCH; j++){ printf("Vector %d: \n", j); for (int i = 0; i < SIZE; i++){ input_re[i + j * SIZE] = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND; input_im[i + j * SIZE] = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND; input_re[i + j * SIZE] = (float)i + 1; input_im[i + j * SIZE] = 0.0f; printf("X[%d] = (%.10f, %.10f) \n", i, input_re[i + j * SIZE], input_im[i + j * SIZE]); } printf("\n"); } // Allocate unified memory for the buffer (global) mem_size = SIZE * BATCH * sizeof(float); checkCudaErrors(cudaMallocManaged((void **) &buffer, mem_size)); // Allocate unified memory for temporary result (global) //mem_size = SIZE / 4 * BATCH * 4 * sizeof(float); // Unit length = 4, re_s1, re_s2, im_s1, im_s2 //checkCudaErrors(cudaMallocManaged((void **) &scales, mem_size)); //mem_size = SIZE * BATCH * 4 * sizeof(float); // re_hi, re_lo, im_hi, im_lo //checkCudaErrors(cudaMallocManaged((void **) &X_split, mem_size)); mem_size = SIZE * BATCH * sizeof(float); // re_hi, re_lo, im_hi, im_lo checkCudaErrors(cudaMallocManaged((void **) &result1, mem_size)); checkCudaErrors(cudaMallocManaged((void **) &result2, mem_size)); checkCudaErrors(cudaMallocManaged((void **) &result3, mem_size)); checkCudaErrors(cudaMallocManaged((void **) &result4, mem_size)); // Allocate memory for and initialize Fourier matrix mem_size = 16 * sizeof(float); checkCudaErrors(cudaMallocManaged((void **) &F4_re, mem_size)); checkCudaErrors(cudaMallocManaged((void **) &F4_im, mem_size)); fft_status = init_F4(); if (fft_status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Matrix initialization error (Fourier matrix).\n"); exit(1); } // Initialize cublas with global cublas handle and status status = cublasCreate(&handle); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS initialization error.\n"); exit(1); } // Allow cublas to use Tensor Core status = cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS setting math mode error.\n"); exit(1); } // Call gfft function fft_status = gfft(SIZE, input_re, input_im, output_re, output_im, BATCH); if (fft_status != FFT_SUCCESS){ fprintf(stderr, "!!!!! gFFT execution error.\n"); exit(1); } // Shutdown cublas status = cublasDestroy(handle); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS shutdown error.\n"); exit(1); } // Deallocate unified memory for buffer and temporary result checkCudaErrors(cudaFree(F4_re)); checkCudaErrors(cudaFree(F4_im)); checkCudaErrors(cudaFree(buffer)); //checkCudaErrors(cudaFree(scales)); //checkCudaErrors(cudaFree(X_split)); checkCudaErrors(cudaFree(result1)); checkCudaErrors(cudaFree(result2)); checkCudaErrors(cudaFree(result3)); checkCudaErrors(cudaFree(result4)); // Print result printf("Result: \n"); for (int j = 0; j < BATCH; j++){ printf("Resulting vector %d: \n", j); for (int i = 0; i < SIZE; i++){ printf("FX[%d] = (%.10f, %.10f) \n", i, output_re[i + j * SIZE], output_im[i + j * SIZE]); } } // Deallocate unified memory checkCudaErrors(cudaFree(input_re)); checkCudaErrors(cudaFree(input_im)); checkCudaErrors(cudaFree(output_re)); checkCudaErrors(cudaFree(output_im)); exit(0); } FFT_S gfft(int N, float* X_re, float* X_im, float*& FX_re, float*& FX_im, int B) { // Base case if (N == 4) { return fft4(X_re, X_im, FX_re, FX_im, B); } // Status and error variable declaration FFT_S fft_status; cudaError_t cerror; // Declare temp variable for buffer swapping float* temp; // Transpose input matrix: 4 * (N/4*B) --> (N/4) * (4*B) // First store the result in buffer to avoid racing condition //// Set grid and block size dim3 threadsPerBlock1(4, 16); dim3 blockPerGrid1(B, (N / 4 + 15)/16); // Make sure blocks are enough //// Transpose real matrix myTranspose<<<blockPerGrid1, threadsPerBlock1>>>(4, N / 4, X_re, buffer, B); cerror = cudaGetLastError(); if (cerror != cudaSuccess) { fprintf(stderr, "!!!!! CUDA error: %s during transposition of real matrix.\n", cudaGetErrorString(cerror)); return FFT_FAILURE; } //// Swap FX_re and buffer to store the transposition result in FX_re temp = FX_re; FX_re = buffer; buffer = temp; //// Transpose imaginary matrix myTranspose<<<blockPerGrid1, threadsPerBlock1>>>(4, N / 4, X_im, buffer, B); cerror = cudaGetLastError(); if (cerror != cudaSuccess) { fprintf(stderr, "!!!!! CUDA error: %s during transposition of imaginary matrix.\n", cudaGetErrorString(cerror)); return FFT_FAILURE; } ////// Swap FX_im and buffer to store the transposition result in FX_im temp = FX_im; FX_im = buffer; buffer = temp; // Wait for GPU to finish work cudaDeviceSynchronize(); // Recursively call gfft function, NOT using buffer matrix fft_status = gfft(N / 4, FX_re, FX_im, FX_re, FX_im, 4 * B); if (fft_status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Function execution error (recursively call gfft).\n"); return FFT_FAILURE; } // Multiplicate each element with twiddle factor //// Set grid and block size dim3 threadsPerBlock2(4, 16); dim3 blockPerGrid2(B, (N / 4 + 15)/16); // Make sure blocks are enough //// Call kernel function multiply_twiddle<<<blockPerGrid2, threadsPerBlock2>>>(N, N/4, 4, FX_re, FX_im, B); cerror = cudaGetLastError(); if (cerror != cudaSuccess) { fprintf(stderr, "!!!!! CUDA error: %s during twiddle factor multiplication.\n", cudaGetErrorString(cerror)); return FFT_FAILURE; } // Wait for GPU to finish work cudaDeviceSynchronize(); // Call the optimized fft4 function to avoid transposition fft_status = fft4_transposed(N / 4, FX_re, FX_im, FX_re, FX_im, B); if (fft_status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Function execution error (calling fft4_transposed).\n"); return FFT_FAILURE; } // Wait for GPU to finish work cudaDeviceSynchronize(); return FFT_SUCCESS; } /* * Initialize Fourier matrix * Allocate unified memory and set value for F4_re and F4_im * */ FFT_S init_F4() { F4_re[0] = 1.0f; F4_re[1] = 1.0f; F4_re[2] = 1.0f; F4_re[3] = 1.0f; F4_re[4] = 1.0f; F4_re[5] = 0.0f; F4_re[6] =-1.0f; F4_re[7] = 0.0f; F4_re[8] = 1.0f; F4_re[9] =-1.0f; F4_re[10] = 1.0f; F4_re[11] =-1.0f; F4_re[12] = 1.0f; F4_re[13] = 0.0f; F4_re[14] =-1.0f; F4_re[15] = 0.0f; F4_im[0] = 0.0f; F4_im[1] = 0.0f; F4_im[2] = 0.0f; F4_im[3] = 0.0f; F4_im[4] = 0.0f; F4_im[5] =-1.0f; F4_im[6] = 0.0f; F4_im[7] = 1.0f; F4_im[8] = 0.0f; F4_im[9] = 0.0f; F4_im[10] = 0.0f; F4_im[11] = 0.0f; F4_im[12] = 0.0f; F4_im[13] = 1.0f; F4_im[14] = 0.0f; F4_im[15] =-1.0f; return FFT_SUCCESS; } /* * Transpose every input matrix of size m * n * Number of matrices is given by B * Every matrix in a batch is transposed independently * Input is expected to be matrix of size m * (n * B) * Output is expected to be matrix of size n * (m * B) * The grid size is expected to be B in horizontal dimension * Usage: transpose a matrix of size 4 * (N/4 * B) to (N/4) * (4 * B) * */ __global__ void myTranspose(int m, int n, float* input, float* output, int B) { // Calculate position in the OUTPUT matrix (0 based) int j = threadIdx.x; // Column number within a matrix, expected to be 0, 1, 2, 3 int i = blockIdx.y * blockDim.y + threadIdx.y; // Row number int matrix_id = blockIdx.x; // The index of matrix in the batch if (i < n && j < m && matrix_id < B){ output[matrix_id * m * n + j * n + i] = input[matrix_id * m * n + i * m + j]; } } /* * Multifly every element of the input matrix with the twiddle factor * Every matrix in a batch is processed independently * Block and thread layout should be 2D, and the total dimension is expected to be (m, n * B) * n is expected to be 4 * result.re(i, j) [0 based] = xre(i, j) * cos(2pi/N * i * j) + xim(i, j) * sin(2pi/N * i * j) * result.im(i, j) [0 based] = -xre(i, j) * sin(2pi/N * i * j) + xim(i, j) * cos(2pi/N * i * j) * ONLY that thread will access the particular matrix_re and matrix_im, so buffer is not needed * */ __global__ void multiply_twiddle(int N, int m, int n, float* matrix_re, float* matrix_im, int B) { // Calculate position int j = threadIdx.x; // Column number within a matrix, 0 to 3 in radix 4 int i = blockIdx.y * blockDim.y + threadIdx.y; // Row number within a matrix int matrix_id = blockIdx.x; // Index of matrix in the batch if (i < m && j < n && matrix_id < B){ // Per-thread local variables int index = matrix_id * N + j * m + i; float tw_re = cos(2 * PI / N * i * j); float tw_im = sin(2 * PI / N * i * j); float result_re = matrix_re[index] * tw_re + matrix_im[index] * tw_im; float result_im = -1.0f * matrix_re[index] * tw_im + matrix_im[index] * tw_re; matrix_re[index] = result_re; matrix_im[index] = result_im; } } /* * Perform fft on every length-4 vector * Batch size is given by B * Internally split every FP32 input into two FP16 vectors * Combine them together after FFT * */ FFT_S fft4(float* X_re, float* X_im, float* FX_re, float* FX_im, int B) { // Variable declaration cudaError_t cerror; float alpha = 1.0f, beta = 0.0f; // Temporary results are global variables // Split input //// Define segmentation pointers for convenience //float* X_re = X_split + 4 * B * 0; //float* X_im = X_split + 4 * B * 2; // float* re_s1 = scales + B * 0; // float* re_s2 = scales + B * 1; // float* im_s1 = scales + B * 2; // float* im_s2 = scales + B * 3; //// Call the splitting kernel //int numThreads = 64; //int numBlocks = (B + 63) / 64; //mySplit<<<numBlocks, numThreads>>>(4, X_re, X_re_hi, X_re_lo, re_s1, re_s2, B, buffer); //mySplit<<<numBlocks, numThreads>>>(4, X_im, X_im_hi, X_im_lo, im_s1, im_s2, B, buffer); //cerror = cudaGetLastError(); //if (cerror != cudaSuccess) //{ // fprintf(stderr, "!!!!! CUDA error: %s during fft4 splitting\n", cudaGetErrorString(cerror)); //return FFT_FAILURE; //} // Matrix multiplication with Fourier matrix //// Call cublas gemm on F4_re * X_re status = cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, 4, B, 4, &alpha, F4_re, CUDA_R_32F, 4, X_re, CUDA_R_32F, 4, &beta, result1, CUDA_R_32F, 4, CUDA_R_32F, CUBLAS_GEMM_DEFAULT); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS kernel execution error (F4_re * X_re).\n"); return FFT_FAILURE; } //// Call cublas gemm on F4_re * X_im status = cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, 4, B, 4, &alpha, F4_re, CUDA_R_32F, 4, X_im, CUDA_R_32F, 4, &beta, result2, CUDA_R_32F, 4, CUDA_R_32F, CUBLAS_GEMM_DEFAULT); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS kernel execution error (F4_re * X_im).\n"); return FFT_FAILURE; } //// Call cublas gemm on F4_im * X_re status = cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, 4, B, 4, &alpha, F4_im, CUDA_R_32F, 4, X_re, CUDA_R_32F, 4, &beta, result3, CUDA_R_32F, 4, CUDA_R_32F, CUBLAS_GEMM_DEFAULT); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS kernel execution error (F4_im * X_re).\n"); return FFT_FAILURE; } //// Call cublas gemm on F4_im * X_im status = cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, 4, B, 4, &alpha, F4_im, CUDA_R_32F, 4, X_im, CUDA_R_32F, 4, &beta, result4, CUDA_R_32F, 4, CUDA_R_32F, CUBLAS_GEMM_DEFAULT); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS kernel execution error (F4_im * X_im).\n"); return FFT_FAILURE; } // Rescale the result and combine them together //// Set grid and block size dim3 threadsPerBlock(16, 4); dim3 blocksPerGrid((B+15)/16, 1); //// call kernel function (FX_re and FX_im will be zero-initialized) myAccumulate<<<blocksPerGrid, threadsPerBlock>>>(4, result1, result2, result3, result4, FX_re, FX_im, B); cerror = cudaGetLastError(); if (cerror != cudaSuccess) { fprintf(stderr, "!!!!! CUDA error: %s during fft4 accumulation\n", cudaGetErrorString(cerror)); return FFT_FAILURE; } return FFT_SUCCESS; } /* * For (a + bi) * (c + di), re = ac - bd, im = ad + bc * Need to rescale the result before accumulation * N is number of elements in one vector (expected to be 4) * The number of vectors is given by B * X1, X2 are 4 * (B * 4) column-major matrix. Inner order is by batch. Outer order is Re_hi, Re_lo, Im_hi, Im_lo * alpha is B * 4 array. Inner order is by batch. Outer order is re_s1, re_s2, im_s1, im_s2 * R1, R2 are resulting matrix of size 4 * B * */ __global__ void myAccumulate(int N, float* X1, float* X2, float* X3, float* X4, float* R1, float* R2, int B) { int i = blockIdx.y * blockDim.y + threadIdx.y; // row number int j = blockIdx.x * blockDim.x + threadIdx.x; // column number if (i < N && j < B){ R1[i + j * N] = R2[i + j * N] = 0.0f; R1[i + j * N] += X1[i + j * N]; R1[i + j * N] += -1.0f * X4[i + j * N]; R2[i + j * N] += X2[i + j * N]; R2[i + j * N] += X3[i + j * N]; } } /* * Perform fft4 assuming the input is in the transposed layout * The number of vectors is M * B * The number of rows of the input matrix is M * The number of columns of the input matrix is 4 * B (4 for radix 4) * Note that the fourier matrix is symmetric */ FFT_S fft4_transposed(int M, float* X_re, float* X_im, float* FX_re, float* FX_im, int B) { // Variable declaration cudaError_t cerror; float alpha = 1.0f, beta = 0.0f; // Temporary results are global variables // Split input //// Define segmentation pointers for convenience //float* X_re = X_split + M * 4 * B * 0; //float* X_im = X_split + M * 4 * B * 2; //float* re_s1 = scales + M * B * 0; //float* re_s2 = scales + M * B * 1; //float* im_s1 = scales + M * B * 2; //float* im_s2 = scales + M * B * 3; //// Call splitting function //dim3 threadsPerBlock1(4, 16); //dim3 blocksPerGrid1((B + 3)/4, (M + 15)/16); //mySplit_transposed<<<blocksPerGrid1, threadsPerBlock1>>>(4, M, X_re, X_re_hi, X_re_lo, re_s1, re_s2, B, buffer); //mySplit_transposed<<<blocksPerGrid1, threadsPerBlock1>>>(4, M, X_im, X_im_hi, X_im_lo, im_s1, im_s2, B, buffer); //cerror = cudaGetLastError(); //if (cerror != cudaSuccess) //{ // fprintf(stderr, "!!!!! CUDA error: %s during splitting in fft4_transposed\n", cudaGetErrorString(cerror)); //return FFT_FAILURE; //} // Matrix multiplication with F4_re and F4_im // Note that the order of multiplicands are reversed //// Call batched gemm on X_re * F4_re status = cublasGemmStridedBatchedEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, 4, 4, &alpha, X_re, CUDA_R_32F, M, M * 4, F4_re, CUDA_R_32F, 4, 0, &beta, result1, CUDA_R_32F, M, M * 4, B, CUDA_R_32F, CUBLAS_GEMM_DEFAULT); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS kernel execution error in fft4_transposed X_re*F4_re multiplication.\n"); return FFT_FAILURE; } //// Call batched gemm on X_im * F4_re status = cublasGemmStridedBatchedEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, 4, 4, &alpha, X_im, CUDA_R_32F, M, M * 4, F4_re, CUDA_R_32F, 4, 0, &beta, result2, CUDA_R_32F, M, M * 4, B, CUDA_R_32F, CUBLAS_GEMM_DEFAULT); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS kernel execution error in fft4_transposed X_im*F4_re multiplication.\n"); return FFT_FAILURE; } //// Call batched gemm on X_re * F4_im status = cublasGemmStridedBatchedEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, 4, 4, &alpha, X_re, CUDA_R_32F, M, M * 4, F4_im, CUDA_R_32F, 4, 0, &beta, result3, CUDA_R_32F, M, M * 4, B, CUDA_R_32F, CUBLAS_GEMM_DEFAULT); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS kernel execution error in fft4_transposed X_re*F4_im multiplication.\n"); return FFT_FAILURE; } //// Call batched gemm on X_im * F4_im status = cublasGemmStridedBatchedEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, 4, 4, &alpha, X_im, CUDA_R_32F, M, M * 4, F4_im, CUDA_R_32F, 4, 0, &beta, result4, CUDA_R_32F, M, M * 4, B, CUDA_R_32F, CUBLAS_GEMM_DEFAULT); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!!! CUBLAS kernel execution error in fft4_transposed X_im*F4_im multiplication.\n"); return FFT_FAILURE; } // Rescale the result and combine real and imaginary part //// Set grid and block size dim3 threadsPerBlock2(16, 16); dim3 blocksPerGrid2((4 * B + 15)/16, (M + 15)/16); //// call the accumulation kernel function (FX_re and FX_im will be zero-initialized inside) myAccumulate_transposed<<<blocksPerGrid2, threadsPerBlock2>>>(4, M, result1, result2, result3, result4, FX_re, FX_im, B); cerror = cudaGetLastError(); if (cerror != cudaSuccess) { fprintf(stderr, "!!!!! CUDA error: %s during accumulation in fft4_transposed\n", cudaGetErrorString(cerror)); return FFT_FAILURE; } return FFT_SUCCESS; } /* * The kernel rescales the multiplication result and accumulates them * Each thread works on one element (instead of one vector) in the resulting matrix * The length of one vector (unit) is given by n, expected to be 4 * The total number of vectors is M * B * M is the vertical dimension, B is the horizontal dimension * X1, X2 are M * (4 * B * 4) matrices. The inner-most column order is by element in a unit. Then by batch. Outer order is Re_hi, Re_lo, Im_hi, Im_lo * alpha is a M * B * 4 arrays. Inner most order is by horizontal index. Then by batch. Outer order is re_s1, re_s2, im_s1, im_s2 * R1, R2 are M * (4 * B) matrices * */ __global__ void myAccumulate_transposed(int n, int M, float* X1, float* X2, float* X3, float* X4, float* R1, float* R2, int B) { int i = blockIdx.y * blockDim.y + threadIdx.y; // vertical index of the element, max M int j = blockIdx.x * blockDim.x + threadIdx.x; // horizontal index of the element, max 4 * B if (i < M && j < 4 * B){ int result_idx = i + j * M; R1[result_idx] = R2[result_idx] = 0.0f; R1[result_idx] += X1[result_idx]; R1[result_idx] += -1.0f * X4[result_idx]; R2[result_idx] += X2[result_idx]; R2[result_idx] += X3[result_idx]; } }